Lines Matching refs:gfp_mask

3757 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)  in __should_fail_alloc_page()  argument
3761 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3763 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3766 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3797 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3804 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3806 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3912 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument
3940 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
3986 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3994 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment()
4018 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument
4022 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma()
4033 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
4055 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
4105 gfp_mask)) { in get_page_from_freelist()
4127 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4147 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
4149 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
4182 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument
4191 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_show_mem()
4195 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) in warn_alloc_show_mem()
4201 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument
4207 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) in warn_alloc()
4214 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc()
4221 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc()
4225 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4231 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4238 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4245 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4252 .gfp_mask = gfp_mask, in __alloc_pages_may_oom()
4276 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
4296 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) in __alloc_pages_may_oom()
4314 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { in __alloc_pages_may_oom()
4321 if (gfp_mask & __GFP_NOFAIL) in __alloc_pages_may_oom()
4322 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4339 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4353 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4369 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4373 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4477 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4517 static bool __need_reclaim(gfp_t gfp_mask) in __need_reclaim() argument
4520 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) in __need_reclaim()
4527 if (gfp_mask & __GFP_NOLOCKDEP) in __need_reclaim()
4543 void fs_reclaim_acquire(gfp_t gfp_mask) in fs_reclaim_acquire() argument
4545 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_acquire()
4547 if (__need_reclaim(gfp_mask)) { in fs_reclaim_acquire()
4548 if (gfp_mask & __GFP_FS) in fs_reclaim_acquire()
4560 void fs_reclaim_release(gfp_t gfp_mask) in fs_reclaim_release() argument
4562 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_release()
4564 if (__need_reclaim(gfp_mask)) { in fs_reclaim_release()
4565 if (gfp_mask & __GFP_FS) in fs_reclaim_release()
4574 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4585 fs_reclaim_acquire(gfp_mask); in __perform_reclaim()
4588 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4592 fs_reclaim_release(gfp_mask); in __perform_reclaim()
4602 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4609 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4614 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4631 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4642 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4648 gfp_to_alloc_flags(gfp_t gfp_mask) in gfp_to_alloc_flags() argument
4667 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); in gfp_to_alloc_flags()
4669 if (gfp_mask & __GFP_ATOMIC) { in gfp_to_alloc_flags()
4674 if (!(gfp_mask & __GFP_NOMEMALLOC)) in gfp_to_alloc_flags()
4684 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); in gfp_to_alloc_flags()
4708 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) in __gfp_pfmemalloc_flags() argument
4710 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) in __gfp_pfmemalloc_flags()
4712 if (gfp_mask & __GFP_MEMALLOC) in __gfp_pfmemalloc_flags()
4726 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
4728 return !!__gfp_pfmemalloc_flags(gfp_mask); in gfp_pfmemalloc_allowed()
4742 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4847 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4850 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; in __alloc_pages_slowpath()
4866 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == in __alloc_pages_slowpath()
4868 gfp_mask &= ~__GFP_ATOMIC; in __alloc_pages_slowpath()
4881 alloc_flags = gfp_to_alloc_flags(gfp_mask); in __alloc_pages_slowpath()
4899 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { in __alloc_pages_slowpath()
4908 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4914 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4930 && !gfp_pfmemalloc_allowed(gfp_mask)) { in __alloc_pages_slowpath()
4931 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4942 if (costly_order && (gfp_mask & __GFP_NORETRY)) { in __alloc_pages_slowpath()
4976 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4978 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); in __alloc_pages_slowpath()
4980 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); in __alloc_pages_slowpath()
4994 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
5007 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5013 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5019 if (gfp_mask & __GFP_NORETRY) in __alloc_pages_slowpath()
5026 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) in __alloc_pages_slowpath()
5029 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
5051 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
5058 (gfp_mask & __GFP_NOMEMALLOC))) in __alloc_pages_slowpath()
5076 if (gfp_mask & __GFP_NOFAIL) { in __alloc_pages_slowpath()
5105 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
5113 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
5119 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
5124 ac->highest_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
5125 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
5127 ac->migratetype = gfp_migratetype(gfp_mask); in prepare_alloc_pages()
5141 fs_reclaim_acquire(gfp_mask); in prepare_alloc_pages()
5142 fs_reclaim_release(gfp_mask); in prepare_alloc_pages()
5144 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in prepare_alloc_pages()
5146 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5149 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); in prepare_alloc_pages()
5152 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in prepare_alloc_pages()
5414 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
5418 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5425 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page() argument
5427 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page()
5483 gfp_t gfp_mask) in __page_frag_cache_refill() argument
5486 gfp_t gfp = gfp_mask; in __page_frag_cache_refill()
5489 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | in __page_frag_cache_refill()
5491 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_cache_refill()
5513 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align() argument
5522 page = __page_frag_cache_refill(nc, gfp_mask); in page_frag_alloc_align()
5616 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact() argument
5621 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact()
5622 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact()
5624 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
5641 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument
5646 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact_nid()
5647 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact_nid()
5649 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
8967 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, in __alloc_contig_migrate_range()
9037 unsigned migratetype, gfp_t gfp_mask) in alloc_contig_range() argument
9050 .gfp_mask = current_gfp_context(gfp_mask), in alloc_contig_range()
9168 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_contig_pages() argument
9173 gfp_mask); in __alloc_contig_pages()
9225 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages() argument
9233 zonelist = node_zonelist(nid, gfp_mask); in alloc_contig_pages()
9235 gfp_zone(gfp_mask), nodemask) { in alloc_contig_pages()
9250 gfp_mask); in alloc_contig_pages()