Lines Matching refs:gfp_mask
2832 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2834 return __should_fail_alloc_page(gfp_mask, order);
2956 unsigned int alloc_flags, gfp_t gfp_mask)
3035 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3043 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3066 /* Must be called after current_gfp_context() which can change gfp_mask */
3067 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3071 if (gfp_migratetype(gfp_mask) == get_cma_migratetype())
3082 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3105 !__cpuset_zone_allowed(zone, gfp_mask))
3155 gfp_mask)) {
3182 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3202 gfp_mask, alloc_flags, ac->migratetype);
3204 prep_new_page(page, order, gfp_mask, alloc_flags);
3242 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3251 if (!(gfp_mask & __GFP_NOMEMALLOC))
3255 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3258 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3261 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3267 if ((gfp_mask & __GFP_NOWARN) ||
3269 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3276 current->comm, &vaf, gfp_mask, &gfp_mask,
3283 warn_alloc_show_mem(gfp_mask, nodemask);
3287 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3293 page = get_page_from_freelist(gfp_mask, order,
3300 page = get_page_from_freelist(gfp_mask, order,
3307 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3314 .gfp_mask = gfp_mask,
3338 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3358 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3377 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3384 if (gfp_mask & __GFP_NOFAIL)
3385 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3402 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3417 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3434 prep_new_page(page, order, gfp_mask, alloc_flags);
3438 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3527 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3567 static bool __need_reclaim(gfp_t gfp_mask)
3570 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3577 if (gfp_mask & __GFP_NOLOCKDEP)
3593 void fs_reclaim_acquire(gfp_t gfp_mask)
3595 gfp_mask = current_gfp_context(gfp_mask);
3597 if (__need_reclaim(gfp_mask)) {
3598 if (gfp_mask & __GFP_FS)
3610 void fs_reclaim_release(gfp_t gfp_mask)
3612 gfp_mask = current_gfp_context(gfp_mask);
3614 if (__need_reclaim(gfp_mask)) {
3615 if (gfp_mask & __GFP_FS)
3648 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3658 fs_reclaim_acquire(gfp_mask);
3661 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3665 fs_reclaim_release(gfp_mask);
3674 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3683 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3688 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3707 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3720 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3727 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3746 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3748 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3753 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3770 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3794 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3796 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3798 if (gfp_mask & __GFP_MEMALLOC)
3812 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3814 return !!__gfp_pfmemalloc_flags(gfp_mask);
3828 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3933 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3936 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3961 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3979 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
3988 wake_all_kswapds(order, gfp_mask, ac);
3994 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4010 && !gfp_pfmemalloc_allowed(gfp_mask)) {
4011 page = __alloc_pages_direct_compact(gfp_mask, order,
4022 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4056 wake_all_kswapds(order, gfp_mask, ac);
4058 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4060 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4075 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4088 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4094 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4100 if (gfp_mask & __GFP_NORETRY)
4107 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4110 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4136 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4143 (gfp_mask & __GFP_NOMEMALLOC)))
4165 if (gfp_mask & __GFP_NOFAIL) {
4170 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4178 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4186 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4195 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4203 warn_alloc(gfp_mask, ac->nodemask,
4209 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4214 ac->highest_zoneidx = gfp_zone(gfp_mask);
4215 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4217 ac->migratetype = gfp_migratetype(gfp_mask);
4231 might_alloc(gfp_mask);
4234 if (gfp_mask & __GFP_KSWAPD_RECLAIM)
4238 if (should_fail_alloc_page(gfp_mask, order))
4241 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4244 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4512 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4516 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4523 unsigned long get_zeroed_page(gfp_t gfp_mask)
4525 return __get_free_page(gfp_mask | __GFP_ZERO);
4584 gfp_t gfp_mask)
4587 gfp_t gfp = gfp_mask;
4590 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4592 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4614 unsigned int fragsz, gfp_t gfp_mask,
4623 page = __page_frag_cache_refill(nc, gfp_mask);
4721 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4733 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4738 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4739 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4741 addr = __get_free_pages(gfp_mask, order);
4751 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4758 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4763 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4764 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4766 p = alloc_pages_node(nid, gfp_mask, order);
6079 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6119 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6135 * @gfp_mask: GFP mask to use during compaction
6149 unsigned migratetype, gfp_t gfp_mask)
6162 .gfp_mask = current_gfp_context(gfp_mask),
6188 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6275 unsigned long nr_pages, gfp_t gfp_mask)
6280 gfp_mask);
6317 * @gfp_mask: GFP mask to limit search and used during compaction
6335 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6343 zonelist = node_zonelist(nid, gfp_mask);
6345 gfp_zone(gfp_mask), nodemask) {
6360 gfp_mask);