/kernel/linux/linux-5.10/include/linux/ |
H A D | gfp.h | 489 * We get the zone list from the current node and the gfp_mask. 516 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, 520 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument 522 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages() 530 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument 533 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); in __alloc_pages_node() 535 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node() 543 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 549 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node() 553 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigne 556 alloc_pages(gfp_t gfp_mask, unsigned int order) alloc_pages() argument 566 alloc_pages(gfp_t gfp_mask, unsigned int order) alloc_pages() argument [all...] |
H A D | cpuset.h | 69 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask); 71 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 74 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed() 78 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 80 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 83 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 86 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 211 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument 216 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 221 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument [all...] |
H A D | mempool.h | 13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 36 gfp_t gfp_mask, int node_id); 44 gfp_t gfp_mask, int nid); 48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | gfp.h | 163 * We get the zone list from the current node and the gfp_mask. 219 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() argument 221 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in warn_if_node_offline() 229 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in warn_if_node_offline() 238 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument 241 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node() 243 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node() 260 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument 266 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node() 275 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigne argument 317 page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) page_frag_alloc() argument [all...] |
H A D | cpuset.h | 85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 223 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 228 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
H A D | mempool.h | 13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 41 gfp_t gfp_mask, int node_id); 49 gfp_t gfp_mask, int nid); 53 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 61 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 82 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 101 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
/kernel/linux/linux-5.10/block/ |
H A D | blk-lib.c | 26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() 97 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 125 * @gfp_mask: memory allocation flags (for bio_alloc) 132 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() 139 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 158 * @gfp_mask: memory allocation flags (for bio_alloc) 166 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() 191 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same() 220 * @gfp_mask: memory allocation flags (for bio_alloc) 227 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() 25 __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, int flags, struct bio **biop) __blkdev_issue_discard() argument 131 blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) blkdev_issue_discard() argument 165 __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page, struct bio **biop) __blkdev_issue_write_same() argument 226 blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page) blkdev_issue_write_same() argument 246 __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) __blkdev_issue_write_zeroes() argument 302 __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) __blkdev_issue_zero_pages() argument 358 __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) __blkdev_issue_zeroout() argument 392 blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) blkdev_issue_zeroout() argument [all...] |
H A D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 131 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() 141 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 158 bio = bio_kmalloc(gfp_mask, nr_pages); in bio_copy_user_iov() 186 page = alloc_page(rq->q->bounce_gfp | gfp_mask); in bio_copy_user_iov() 244 gfp_t gfp_mask) in bio_map_user_iov() 254 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES)); in bio_map_user_iov() 376 * @gfp_mask: allocation flags for bio allocation 382 unsigned int len, gfp_t gfp_mask) in bio_map_kern() 21 bio_alloc_map_data(struct iov_iter *data, gfp_t gfp_mask) bio_alloc_map_data() argument 130 bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask) bio_copy_user_iov() argument 243 bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) bio_map_user_iov() argument 381 bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) bio_map_kern() argument 463 bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) bio_copy_kern() argument 578 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) blk_rq_map_user_iov() argument 620 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) blk_rq_map_user() argument 684 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) blk_rq_map_kern() argument [all...] |
/kernel/linux/common_modules/tzdriver/ |
H A D | ko_adapt.h | 38 struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order); 39 struct workqueue_attrs *koadpt_alloc_workqueue_attrs(gfp_t gfp_mask); 55 static inline struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order) in koadpt_alloc_pages() argument 57 return alloc_pages(gfp_mask, order); in koadpt_alloc_pages() 61 gfp_t gfp_mask) in koadpt_alloc_workqueue_attrs() 64 return alloc_workqueue_attrs(gfp_mask); in koadpt_alloc_workqueue_attrs() 66 (void)gfp_mask; in koadpt_alloc_workqueue_attrs() 60 koadpt_alloc_workqueue_attrs( gfp_t gfp_mask) koadpt_alloc_workqueue_attrs() argument
|
H A D | ko_adapt.c | 33 typedef struct page *(alloc_pages_func)(gfp_t gfp_mask, unsigned int order); 34 typedef struct workqueue_attrs *(alloc_workqueue_attrs_func)(gfp_t gfp_mask); 82 struct page *koadpt_alloc_pages(gfp_t gfp_mask, unsigned int order) in koadpt_alloc_pages() argument 86 return alloc_pages(gfp_mask, order); in koadpt_alloc_pages() 98 return alloc_pages_pt(gfp_mask, order); in koadpt_alloc_pages() 101 return alloc_pages(gfp_mask, order); in koadpt_alloc_pages() 105 struct workqueue_attrs *koadpt_alloc_workqueue_attrs(gfp_t gfp_mask) in koadpt_alloc_workqueue_attrs() argument 109 (void)gfp_mask; in koadpt_alloc_workqueue_attrs() 137 return alloc_workqueue_attrs_pt(gfp_mask); in koadpt_alloc_workqueue_attrs()
|
/kernel/linux/linux-5.10/mm/ |
H A D | mempool.c | 180 gfp_t gfp_mask, int node_id) in mempool_init_node() 190 gfp_mask, node_id); in mempool_init_node() 200 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 261 gfp_t gfp_mask, int node_id) in mempool_create_node() 265 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 270 gfp_mask, node_id)) { in mempool_create_node() 363 * @gfp_mask: the usual allocation bitmask. 373 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 380 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 381 might_sleep_if(gfp_mask in mempool_alloc() 178 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_init_node() argument 259 mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_create_node() argument 507 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) mempool_alloc_slab() argument 526 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) mempool_kmalloc() argument 543 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) mempool_alloc_pages() argument [all...] |
H A D | page_owner.c | 26 gfp_t gfp_mask; member 170 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() 179 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 191 gfp_t gfp_mask) in __set_page_owner() 199 handle = save_stack(gfp_mask); in __set_page_owner() 200 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner() 243 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner() 326 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 168 __set_page_owner_handle(struct page *page, struct page_ext *page_ext, depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask) __set_page_owner_handle() argument 190 __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) __set_page_owner() argument 420 gfp_t gfp_mask; __dump_page_owner() local [all...] |
H A D | page_alloc.c | 3548 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3552 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 3554 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 3557 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 3588 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 3595 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 3597 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page() 3703 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() 3735 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast() 3779 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument 3701 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) zone_watermark_fast() argument 3810 current_alloc_flags(gfp_t gfp_mask, unsigned int alloc_flags) current_alloc_flags() argument 3829 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) get_page_from_freelist() argument 3978 warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) warn_alloc_show_mem() argument 3997 warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) warn_alloc() argument 4023 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) __alloc_pages_cpuset_fallback() argument 4043 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_may_oom() argument 4137 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 4270 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 4310 __need_fs_reclaim(gfp_t gfp_mask) __need_fs_reclaim() argument 4342 fs_reclaim_acquire(gfp_t gfp_mask) fs_reclaim_acquire() argument 4349 fs_reclaim_release(gfp_t gfp_mask) fs_reclaim_release() argument 4383 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) __perform_reclaim() argument 4411 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_direct_reclaim() argument 4446 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) wake_all_kswapds() argument 4463 gfp_to_alloc_flags(gfp_t gfp_mask) gfp_to_alloc_flags() argument 4523 __gfp_pfmemalloc_flags(gfp_t gfp_mask) __gfp_pfmemalloc_flags() argument 4541 gfp_pfmemalloc_allowed(gfp_t gfp_mask) gfp_pfmemalloc_allowed() argument 4557 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) should_reclaim_retry() argument 4681 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) __alloc_pages_slowpath() argument 4959 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_mask, unsigned int *alloc_flags) prepare_alloc_pages() argument 5014 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask) __alloc_pages_nodemask() argument 5082 __get_free_pages(gfp_t gfp_mask, unsigned int order) __get_free_pages() argument 5093 get_zeroed_page(gfp_t gfp_mask) get_zeroed_page() argument 5141 __page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) __page_frag_cache_refill() argument 5196 page_frag_alloc(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask) page_frag_alloc() argument 5318 alloc_pages_exact(size_t size, gfp_t gfp_mask) alloc_pages_exact() argument 5343 alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) alloc_pages_exact_nid() argument 8658 alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) alloc_contig_range() argument 8791 __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) __alloc_contig_pages() argument 8855 alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) alloc_contig_pages() argument [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | mempool.c | 187 gfp_t gfp_mask, int node_id) in mempool_init_node() 197 gfp_mask, node_id); in mempool_init_node() 207 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 268 gfp_t gfp_mask, int node_id) in mempool_create_node() 272 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 277 gfp_mask, node_id)) { in mempool_create_node() 370 * @gfp_mask: the usual allocation bitmask. 380 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument 387 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc() 388 might_alloc(gfp_mask); in mempool_alloc() 185 mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_init_node() argument 266 mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id) mempool_create_node() argument 514 mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) mempool_alloc_slab() argument 533 mempool_kmalloc(gfp_t gfp_mask, void *pool_data) mempool_kmalloc() argument 550 mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) mempool_alloc_pages() argument [all...] |
H A D | fail_page_alloc.c | 24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 30 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 35 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 39 if (gfp_mask & __GFP_NOWARN) in __should_fail_alloc_page()
|
H A D | page_alloc.c | 2832 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 2834 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page() 2956 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() 3035 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument 3043 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment() 3066 /* Must be called after current_gfp_context() which can change gfp_mask */ 3067 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument 3071 if (gfp_migratetype(gfp_mask) == get_cma_migratetype()) in gfp_to_alloc_flags_cma() 3082 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 3105 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 2954 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) zone_watermark_fast() argument 3242 warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) warn_alloc_show_mem() argument 3261 warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) warn_alloc() argument 3287 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) __alloc_pages_cpuset_fallback() argument 3307 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_may_oom() argument 3402 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 3527 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 3567 __need_reclaim(gfp_t gfp_mask) __need_reclaim() argument 3593 fs_reclaim_acquire(gfp_t gfp_mask) fs_reclaim_acquire() argument 3610 fs_reclaim_release(gfp_t gfp_mask) fs_reclaim_release() argument 3648 __perform_reclaim(gfp_t gfp_mask, unsigned int order, const struct alloc_context *ac) __perform_reclaim() argument 3674 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_direct_reclaim() argument 3707 wake_all_kswapds(unsigned int order, gfp_t gfp_mask, const struct alloc_context *ac) wake_all_kswapds() argument 3727 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) gfp_to_alloc_flags() argument 3794 __gfp_pfmemalloc_flags(gfp_t gfp_mask) __gfp_pfmemalloc_flags() argument 3812 gfp_pfmemalloc_allowed(gfp_t gfp_mask) gfp_pfmemalloc_allowed() argument 3828 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) should_reclaim_retry() argument 3933 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct alloc_context *ac) __alloc_pages_slowpath() argument 4209 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) prepare_alloc_pages() argument 4512 __get_free_pages(gfp_t gfp_mask, unsigned int order) __get_free_pages() argument 4523 get_zeroed_page(gfp_t gfp_mask) get_zeroed_page() argument 4583 __page_frag_cache_refill(struct page_frag_cache *nc, gfp_t gfp_mask) __page_frag_cache_refill() argument 4613 page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask, unsigned int align_mask) page_frag_alloc_align() argument 4733 alloc_pages_exact(size_t size, gfp_t gfp_mask) alloc_pages_exact() argument 4758 alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) alloc_pages_exact_nid() argument 6148 alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) alloc_contig_range() argument 6274 __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) __alloc_contig_pages() argument 6335 alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, int nid, nodemask_t *nodemask) alloc_contig_pages() argument [all...] |
H A D | page_owner.c | 27 gfp_t gfp_mask; member 162 unsigned short order, gfp_t gfp_mask) in __set_page_owner_handle() 172 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle() 187 gfp_t gfp_mask) in __set_page_owner() 192 handle = save_stack(gfp_mask); in __set_page_owner() 197 __set_page_owner_handle(page_ext, handle, order, gfp_mask); in __set_page_owner() 250 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __folio_copy_owner() 334 page_mt = gfp_migratetype(page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print() 160 __set_page_owner_handle(struct page_ext *page_ext, depot_stack_handle_t handle, unsigned short order, gfp_t gfp_mask) __set_page_owner_handle() argument 186 __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask) __set_page_owner() argument 460 gfp_t gfp_mask; __dump_page_owner() local [all...] |
/kernel/linux/linux-6.6/block/ |
H A D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() 142 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov() 185 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov() 254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() 259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc() 264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc() 273 gfp_t gfp_mask) in bio_map_user_iov() 21 bio_alloc_map_data(struct iov_iter *data, gfp_t gfp_mask) bio_alloc_map_data() argument 131 bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask) bio_copy_user_iov() argument 253 blk_rq_map_bio_alloc(struct request *rq, unsigned int nr_vecs, gfp_t gfp_mask) blk_rq_map_bio_alloc() argument 272 bio_map_user_iov(struct request *rq, struct iov_iter *iter, gfp_t gfp_mask) bio_map_user_iov() argument 389 bio_map_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask) bio_map_kern() argument 474 bio_copy_kern(struct request_queue *q, void *data, unsigned int len, gfp_t gfp_mask, int reading) bio_copy_kern() argument 632 blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) blk_rq_map_user_iov() argument 687 blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) blk_rq_map_user() argument 701 blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, bool vec, int iov_count, bool check_iter_count, int rw) blk_rq_map_user_io() argument 782 blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) blk_rq_map_kern() argument [all...] |
H A D | blk-lib.c | 39 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() 67 bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask); in __blkdev_issue_discard() 92 * @gfp_mask: memory allocation flags (for bio_alloc) 98 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() 105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() 135 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes() 169 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() 181 REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages() 204 * @gfp_mask 38 __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) __blkdev_issue_discard() argument 97 blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) blkdev_issue_discard() argument 118 __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) __blkdev_issue_write_zeroes() argument 168 __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) __blkdev_issue_zero_pages() argument 218 __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) __blkdev_issue_zeroout() argument 252 blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) blkdev_issue_zeroout() argument [all...] |
/kernel/linux/linux-5.10/fs/nfs/blocklayout/ |
H A D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() 402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() 407 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() 425 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() 235 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_simple() argument 352 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_scsi() argument 401 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_slice() argument 417 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_concat() argument 446 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_stripe() argument 475 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_deviceid() argument 496 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_mask) bl_alloc_deviceid_node() argument [all...] |
/kernel/linux/linux-6.6/fs/nfs/blocklayout/ |
H A D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 329 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() 384 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() 389 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 400 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() 407 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 413 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 429 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() 235 bl_parse_simple(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_simple() argument 328 bl_parse_scsi(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_scsi() argument 383 bl_parse_slice(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_slice() argument 399 bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_concat() argument 428 bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_stripe() argument 457 bl_parse_deviceid(struct nfs_server *server, struct pnfs_block_dev *d, struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) bl_parse_deviceid() argument 478 bl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_mask) bl_alloc_deviceid_node() argument [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | ulist.h | 48 struct ulist *ulist_alloc(gfp_t gfp_mask); 50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 52 u64 *old_aux, gfp_t gfp_mask); 57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() 61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr() 56 ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux, void **old_aux, gfp_t gfp_mask) ulist_add_merge_ptr() argument
|
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | ulist.h | 48 struct ulist *ulist_alloc(gfp_t gfp_mask); 50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 52 u64 *old_aux, gfp_t gfp_mask); 57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() 61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr() 56 ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux, void **old_aux, gfp_t gfp_mask) ulist_add_merge_ptr() argument
|
/kernel/linux/linux-5.10/net/sunrpc/auth_gss/ |
H A D | gss_krb5_mech.c | 32 static int gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask); 33 static int gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask); 34 static int gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask); 320 gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_des() argument 326 gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v1() argument 353 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in gss_krb5_import_ctx_v1() 371 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 387 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in gss_krb5_import_ctx_v2() 402 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in gss_krb5_import_ctx_v2() 417 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in gss_krb5_import_ctx_v2() 482 gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) gss_import_v2_context() argument 559 gss_import_sec_context_kerberos(const void *p, size_t len, struct gss_ctx *ctx_id, time64_t *endtime, gfp_t gfp_mask) gss_import_sec_context_kerberos() argument [all...] |
/kernel/linux/linux-5.10/drivers/staging/android/ion/ |
H A D | ion_page_pool.c | 19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 100 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument 109 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink() 134 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument 144 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
|