Lines Matching refs:gfp_mask
3548 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3552 if (gfp_mask & __GFP_NOFAIL)
3554 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3557 (gfp_mask & __GFP_DIRECT_RECLAIM))
3588 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3595 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3597 return __should_fail_alloc_page(gfp_mask, order);
3703 unsigned int alloc_flags, gfp_t gfp_mask)
3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3779 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3787 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3810 static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
3817 gfp_migratetype(gfp_mask) == get_cma_migratetype())
3829 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3851 !__cpuset_zone_allowed(zone, gfp_mask))
3901 gfp_mask)) {
3923 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3943 gfp_mask, alloc_flags, ac->migratetype);
3945 prep_new_page(page, order, gfp_mask, alloc_flags);
3978 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3987 if (!(gfp_mask & __GFP_NOMEMALLOC))
3991 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3997 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4003 if ((gfp_mask & __GFP_NOWARN) ||
4005 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
4012 current->comm, &vaf, gfp_mask, &gfp_mask,
4019 warn_alloc_show_mem(gfp_mask, nodemask);
4023 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4029 page = get_page_from_freelist(gfp_mask, order,
4036 page = get_page_from_freelist(gfp_mask, order,
4043 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4050 .gfp_mask = gfp_mask,
4074 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4094 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4112 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4119 if (gfp_mask & __GFP_NOFAIL)
4120 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4137 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4151 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4165 prep_new_page(page, order, gfp_mask, alloc_flags);
4169 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4270 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4310 static bool __need_fs_reclaim(gfp_t gfp_mask)
4312 gfp_mask = current_gfp_context(gfp_mask);
4315 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4323 if (!(gfp_mask & __GFP_FS))
4326 if (gfp_mask & __GFP_NOLOCKDEP)
4342 void fs_reclaim_acquire(gfp_t gfp_mask)
4344 if (__need_fs_reclaim(gfp_mask))
4349 void fs_reclaim_release(gfp_t gfp_mask)
4351 if (__need_fs_reclaim(gfp_mask))
4383 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
4394 fs_reclaim_acquire(gfp_mask);
4397 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4401 fs_reclaim_release(gfp_mask);
4411 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4418 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4423 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4446 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4457 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4463 gfp_to_alloc_flags(gfp_t gfp_mask)
4482 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4484 if (gfp_mask & __GFP_ATOMIC) {
4489 if (!(gfp_mask & __GFP_NOMEMALLOC))
4499 alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
4523 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4525 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4527 if (gfp_mask & __GFP_MEMALLOC)
4541 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4543 return !!__gfp_pfmemalloc_flags(gfp_mask);
4557 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4681 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4684 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4704 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4706 gfp_mask &= ~__GFP_ATOMIC;
4720 alloc_flags = gfp_to_alloc_flags(gfp_mask);
4734 wake_all_kswapds(order, gfp_mask, ac);
4740 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4756 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4757 page = __alloc_pages_direct_compact(gfp_mask, order,
4768 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4802 wake_all_kswapds(order, gfp_mask, ac);
4804 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4806 alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
4820 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4836 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4845 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4851 if (gfp_mask & __GFP_NORETRY)
4858 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4861 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4887 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4894 (gfp_mask & __GFP_NOMEMALLOC)))
4916 if (gfp_mask & __GFP_NOFAIL) {
4945 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4953 warn_alloc(gfp_mask, ac->nodemask,
4959 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4964 ac->highest_zoneidx = gfp_zone(gfp_mask);
4965 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4967 ac->migratetype = gfp_migratetype(gfp_mask);
4981 fs_reclaim_acquire(gfp_mask);
4982 fs_reclaim_release(gfp_mask);
4984 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4987 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4991 if (should_fail_alloc_page(gfp_mask, order))
4994 *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
4997 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5014 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
5027 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
5031 gfp_mask &= gfp_allowed_mask;
5032 alloc_mask = gfp_mask;
5033 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
5040 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
5053 alloc_mask = current_gfp_context(gfp_mask);
5065 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
5066 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
5082 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5086 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5093 unsigned long get_zeroed_page(gfp_t gfp_mask)
5095 return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5142 gfp_t gfp_mask)
5145 gfp_t gfp = gfp_mask;
5148 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5150 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5197 unsigned int fragsz, gfp_t gfp_mask)
5205 page = __page_frag_cache_refill(nc, gfp_mask);
5306 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5318 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5323 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5324 gfp_mask &= ~__GFP_COMP;
5326 addr = __get_free_pages(gfp_mask, order);
5336 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5343 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5348 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5349 gfp_mask &= ~__GFP_COMP;
5351 p = alloc_pages_node(nid, gfp_mask, order);
8599 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8645 * @gfp_mask: GFP mask to use during compaction
8659 unsigned migratetype, gfp_t gfp_mask)
8672 .gfp_mask = current_gfp_context(gfp_mask),
8792 unsigned long nr_pages, gfp_t gfp_mask)
8797 gfp_mask);
8837 * @gfp_mask: GFP mask to limit search and used during compaction
8855 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8863 zonelist = node_zonelist(nid, gfp_mask);
8865 gfp_zone(gfp_mask), nodemask) {
8880 gfp_mask);