/kernel/linux/linux-6.6/mm/ |
H A D | page_alloc.c | 1548 unsigned int alloc_flags) in prep_new_page() 1561 if (alloc_flags & ALLOC_NO_WATERMARKS) in prep_new_page() 1775 unsigned int alloc_flags, int start_type, bool whole_block) in steal_suitable_fallback() 1801 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback() 2012 unsigned int alloc_flags) in __rmqueue_fallback() 2026 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_fallback() 2078 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback() 2090 int migratetype, unsigned int alloc_flags) in __rmqueue_with_cma_reuse() 2098 alloc_flags &= ~ALLOC_CMA; in __rmqueue_with_cma_reuse() 2103 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse() 1547 prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) prep_new_page() argument 1774 steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) steal_suitable_fallback() argument 2011 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) __rmqueue_fallback() argument 2089 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) __rmqueue_with_cma_reuse() argument 2114 __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) __rmqueue() argument 2157 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) rmqueue_bulk() argument 2668 rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, unsigned int order, unsigned int alloc_flags, int migratetype) rmqueue_buddy() argument 2710 __rmqueue_pcplist(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list) __rmqueue_pcplist() argument 2750 rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) rmqueue_pcplist() argument 2797 rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) rmqueue() argument 2838 __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) __zone_watermark_unusable_free() argument 2869 __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages) __zone_watermark_ok() argument 2947 zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags) zone_watermark_ok() argument 2954 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) zone_watermark_fast() argument 3037 unsigned int alloc_flags; alloc_flags_nofragment() local 3067 gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) gfp_to_alloc_flags_cma() argument 3082 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) get_page_from_freelist() argument 3287 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) __alloc_pages_cpuset_fallback() argument 3402 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 3461 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) should_compact_retry() argument 3527 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 3536 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) should_compact_retry() argument 3674 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_direct_reclaim() argument 3729 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; gfp_to_alloc_flags() local 3828 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) should_reclaim_retry() argument 3939 unsigned int alloc_flags; __alloc_pages_slowpath() local 4209 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_gfp, unsigned int *alloc_flags) prepare_alloc_pages() argument 4290 unsigned int alloc_flags = ALLOC_WMARK_LOW; __alloc_pages_bulk() local 4433 unsigned int alloc_flags = ALLOC_WMARK_LOW; __alloc_pages() local [all...] |
H A D | compaction.c | 2286 * watermark and alloc_flags have to match, or be more pessimistic than in __compaction_suitable() 2288 * compactor's alloc_flags, as they are not relevant for freepage in __compaction_suitable() 2351 int alloc_flags) in compaction_zonelist_suitable() 2409 cc->alloc_flags & ALLOC_WMARK_MASK); in compact_zone() 2411 cc->highest_zoneidx, cc->alloc_flags)) in compact_zone() 2620 unsigned int alloc_flags, int highest_zoneidx, in compact_zone_order() 2631 .alloc_flags = alloc_flags, in compact_zone_order() 2676 * @alloc_flags: The allocation flags of the current allocation 2684 unsigned int alloc_flags, cons in try_to_compact_pages() 2350 compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) compaction_zonelist_suitable() argument 2618 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int highest_zoneidx, struct page **capture) compact_zone_order() argument 2683 try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) try_to_compact_pages() argument [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | page_alloc.c | 2290 unsigned int alloc_flags) in prep_new_page() 2306 if (alloc_flags & ALLOC_NO_WATERMARKS) in prep_new_page() 2526 unsigned int alloc_flags, int start_type, bool whole_block) in steal_suitable_fallback() 2552 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback() 2768 unsigned int alloc_flags) in __rmqueue_fallback() 2782 if (alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_fallback() 2834 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback() 2846 int migratetype, unsigned int alloc_flags) in __rmqueue_with_cma_reuse() 2854 alloc_flags &= ~ALLOC_CMA; in __rmqueue_with_cma_reuse() 2859 __rmqueue_fallback(zone, order, migratetype, alloc_flags)) in __rmqueue_with_cma_reuse() 2289 prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) prep_new_page() argument 2525 steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) steal_suitable_fallback() argument 2767 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) __rmqueue_fallback() argument 2845 __rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) __rmqueue_with_cma_reuse() argument 2870 __rmqueue(struct zone *zone, unsigned int order, int migratetype, unsigned int alloc_flags) __rmqueue() argument 2915 rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, int migratetype, unsigned int alloc_flags) rmqueue_bulk() argument 3407 __rmqueue_pcplist(struct zone *zone, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, struct list_head *list) __rmqueue_pcplist() argument 3432 rmqueue_pcplist(struct zone *preferred_zone, struct zone *zone, gfp_t gfp_flags, int migratetype, unsigned int alloc_flags) rmqueue_pcplist() argument 3457 rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags, int migratetype) rmqueue() argument 3601 __zone_watermark_unusable_free(struct zone *z, unsigned int order, unsigned int alloc_flags) __zone_watermark_unusable_free() argument 3630 __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages) __zone_watermark_ok() argument 3694 zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags) zone_watermark_ok() argument 3701 zone_watermark_fast(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, gfp_t gfp_mask) zone_watermark_fast() argument 3781 unsigned int alloc_flags; alloc_flags_nofragment() local 3810 current_alloc_flags(gfp_t gfp_mask, unsigned int alloc_flags) current_alloc_flags() argument 3829 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac) get_page_from_freelist() argument 4023 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac) __alloc_pages_cpuset_fallback() argument 4137 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 4192 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) should_compact_retry() argument 4270 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, enum compact_result *compact_result) __alloc_pages_direct_compact() argument 4279 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, enum compact_priority *compact_priority, int *compaction_retries) should_compact_retry() argument 4411 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, unsigned long *did_some_progress) __alloc_pages_direct_reclaim() argument 4465 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; gfp_to_alloc_flags() local 4557 should_reclaim_retry(gfp_t gfp_mask, unsigned order, struct alloc_context *ac, int alloc_flags, bool did_some_progress, int *no_progress_loops) should_reclaim_retry() argument 4687 unsigned int alloc_flags; __alloc_pages_slowpath() local 4959 prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid, nodemask_t *nodemask, struct alloc_context *ac, gfp_t *alloc_mask, unsigned int *alloc_flags) prepare_alloc_pages() argument 5018 unsigned int alloc_flags = ALLOC_WMARK_LOW; __alloc_pages_nodemask() local [all...] |
H A D | compaction.c | 2079 unsigned int alloc_flags, in __compaction_suitable() 2088 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in __compaction_suitable() 2094 alloc_flags)) in __compaction_suitable() 2100 * watermark and alloc_flags have to match, or be more pessimistic than in __compaction_suitable() 2102 * compactor's alloc_flags, as they are not relevant for freepage in __compaction_suitable() 2122 unsigned int alloc_flags, in compaction_suitable() 2128 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, in compaction_suitable() 2160 int alloc_flags) in compaction_zonelist_suitable() 2182 compact_result = __compaction_suitable(zone, order, alloc_flags, in compaction_zonelist_suitable() 2213 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, in compact_zone() 2078 __compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int highest_zoneidx, unsigned long wmark_target) __compaction_suitable() argument 2121 compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int highest_zoneidx) compaction_suitable() argument 2159 compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) compaction_zonelist_suitable() argument 2403 compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int highest_zoneidx, struct page **capture) compact_zone_order() argument 2465 try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) try_to_compact_pages() argument [all...] |
/kernel/linux/linux-6.6/drivers/base/regmap/ |
H A D | regcache-maple.c | 77 map->alloc_flags); in regcache_maple_write() 95 ret = mas_store_gfp(&mas, entry, map->alloc_flags); in regcache_maple_write() 137 map->alloc_flags); in regcache_maple_drop() 151 map->alloc_flags); in regcache_maple_drop() 165 ret = mas_store_gfp(&mas, lower, map->alloc_flags); in regcache_maple_drop() 173 ret = mas_store_gfp(&mas, upper, map->alloc_flags); in regcache_maple_drop() 207 buf = kmalloc(val_bytes * (max - min), map->alloc_flags); in regcache_maple_sync_block() 323 entry = kcalloc(last - first + 1, sizeof(unsigned long), map->alloc_flags); in regcache_maple_insert_block() 334 ret = mas_store_gfp(&mas, entry, map->alloc_flags); in regcache_maple_insert_block()
|
H A D | regcache-rbtree.c | 280 map->alloc_flags); in regcache_rbtree_insert_to_block() 289 map->alloc_flags); in regcache_rbtree_insert_to_block() 323 rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); in regcache_rbtree_node_alloc() 349 map->alloc_flags); in regcache_rbtree_node_alloc() 355 map->alloc_flags); in regcache_rbtree_node_alloc()
|
H A D | internal.h | 65 gfp_t alloc_flags; member
|
/kernel/linux/linux-5.10/lib/ |
H A D | stackdepot.c | 105 u32 hash, void **prealloc, gfp_t alloc_flags) in depot_alloc_stack() 231 * @alloc_flags: Allocation gfp flags 237 gfp_t alloc_flags) in stack_depot_save() 276 alloc_flags &= ~GFP_ZONEMASK; in stack_depot_save() 277 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); in stack_depot_save() 278 alloc_flags |= __GFP_NOWARN; in stack_depot_save() 279 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); in stack_depot_save() 290 hash, &prealloc, alloc_flags); in stack_depot_save() 104 depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc, gfp_t alloc_flags) depot_alloc_stack() argument 235 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) stack_depot_save() argument
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | compaction.h | 94 unsigned int order, unsigned int alloc_flags, 99 unsigned int alloc_flags, int highest_zoneidx); 180 int alloc_flags); 192 int alloc_flags, int highest_zoneidx) in compaction_suitable() 191 compaction_suitable(struct zone *zone, int order, int alloc_flags, int highest_zoneidx) compaction_suitable() argument
|
/kernel/linux/linux-6.6/lib/ |
H A D | stackdepot.c | 360 gfp_t alloc_flags, bool can_alloc) in __stack_depot_save() 407 alloc_flags &= ~GFP_ZONEMASK; in __stack_depot_save() 408 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); in __stack_depot_save() 409 alloc_flags |= __GFP_NOWARN; in __stack_depot_save() 410 page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); in __stack_depot_save() 454 gfp_t alloc_flags) in stack_depot_save() 456 return __stack_depot_save(entries, nr_entries, alloc_flags, true); in stack_depot_save() 358 __stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags, bool can_alloc) __stack_depot_save() argument 452 stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) stack_depot_save() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | compaction.h | 88 unsigned int order, unsigned int alloc_flags, 99 int alloc_flags);
|
/kernel/linux/linux-6.6/fs/xfs/libxfs/ |
H A D | xfs_alloc.c | 1540 uint32_t alloc_flags) in xfs_alloc_ag_vextent_near() 1560 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH; in xfs_alloc_ag_vextent_near() 1625 acur.busy_gen, alloc_flags); in xfs_alloc_ag_vextent_near() 1629 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH; in xfs_alloc_ag_vextent_near() 1655 uint32_t alloc_flags) in xfs_alloc_ag_vextent_size() 1670 alloc_flags |= XFS_ALLOC_FLAG_TRYFLUSH; in xfs_alloc_ag_vextent_size() 1740 busy_gen, alloc_flags); in xfs_alloc_ag_vextent_size() 1744 alloc_flags &= ~XFS_ALLOC_FLAG_TRYFLUSH; in xfs_alloc_ag_vextent_size() 1835 busy_gen, alloc_flags); in xfs_alloc_ag_vextent_size() 1839 alloc_flags in xfs_alloc_ag_vextent_size() 1538 xfs_alloc_ag_vextent_near( struct xfs_alloc_arg *args, uint32_t alloc_flags) xfs_alloc_ag_vextent_near() argument 1653 xfs_alloc_ag_vextent_size( struct xfs_alloc_arg *args, uint32_t alloc_flags) xfs_alloc_ag_vextent_size() argument 2630 xfs_alloc_fix_freelist( struct xfs_alloc_arg *args, uint32_t alloc_flags) xfs_alloc_fix_freelist() argument 3331 xfs_alloc_vextent_prepare_ag( struct xfs_alloc_arg *args, uint32_t alloc_flags) xfs_alloc_vextent_prepare_ag() argument 3464 uint32_t alloc_flags = 0; xfs_alloc_vextent_this_ag() local 3509 xfs_alloc_vextent_iterate_ags( struct xfs_alloc_arg *args, xfs_agnumber_t minimum_agno, xfs_agnumber_t start_agno, xfs_agblock_t target_agbno, uint32_t alloc_flags) xfs_alloc_vextent_iterate_ags() argument 3590 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK; xfs_alloc_vextent_start_ag() local 3644 uint32_t alloc_flags = XFS_ALLOC_FLAG_TRYLOCK; xfs_alloc_vextent_first_ag() local 3716 uint32_t alloc_flags = 0; xfs_alloc_vextent_near_bno() local [all...] |
H A D | xfs_alloc.h | 199 int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, uint32_t alloc_flags);
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd_gpuvm.c | 287 u32 alloc_flags = bo->kfd_bo->alloc_flags; in amdgpu_amdkfd_release_notify() local 290 amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, in amdgpu_amdkfd_release_notify() 317 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) in create_dmamap_sg_bo() 469 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags() 471 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags() 514 mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_userptr() 616 mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP); in kfd_mem_dmamap_sg_bo() 622 dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? in kfd_mem_dmamap_sg_bo() 681 mem->alloc_flags in kfd_mem_dmaunmap_userptr() 1646 u64 alloc_flags; amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() local [all...] |
/kernel/linux/linux-5.10/drivers/base/regmap/ |
H A D | regcache-rbtree.c | 280 map->alloc_flags); in regcache_rbtree_insert_to_block() 289 map->alloc_flags); in regcache_rbtree_insert_to_block() 323 rbnode = kzalloc(sizeof(*rbnode), map->alloc_flags); in regcache_rbtree_node_alloc() 349 map->alloc_flags); in regcache_rbtree_node_alloc() 355 map->alloc_flags); in regcache_rbtree_node_alloc()
|
H A D | internal.h | 60 gfp_t alloc_flags; member
|
/kernel/linux/linux-5.10/drivers/md/ |
H A D | dm-zoned-reclaim.c | 286 int alloc_flags = DMZ_ALLOC_SEQ; in dmz_reclaim_rnd_data() local 292 alloc_flags | DMZ_ALLOC_RECLAIM); in dmz_reclaim_rnd_data() 293 if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) { in dmz_reclaim_rnd_data() 294 alloc_flags = DMZ_ALLOC_RND; in dmz_reclaim_rnd_data()
|
/kernel/linux/linux-6.6/drivers/md/ |
H A D | dm-zoned-reclaim.c | 286 int alloc_flags = DMZ_ALLOC_SEQ; in dmz_reclaim_rnd_data() local 292 alloc_flags | DMZ_ALLOC_RECLAIM); in dmz_reclaim_rnd_data() 293 if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) { in dmz_reclaim_rnd_data() 294 alloc_flags = DMZ_ALLOC_RND; in dmz_reclaim_rnd_data()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_amdkfd_gpuvm.c | 393 bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT; in get_pte_flags() 397 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE) in get_pte_flags() 399 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE) in get_pte_flags() 404 if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { in get_pte_flags() 1149 u64 alloc_flags; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() local 1157 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1158 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1163 alloc_flags = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1167 alloc_flags = 0; in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() 1176 alloc_flags in amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu() [all...] |
H A D | amdgpu_amdkfd.h | 61 uint32_t alloc_flags; member
|
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_extent_busy.h | 74 unsigned busy_gen, uint32_t alloc_flags);
|
H A D | xfs_extent_busy.c | 609 uint32_t alloc_flags) in xfs_extent_busy_flush() 620 if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH) in xfs_extent_busy_flush() 626 if (alloc_flags & XFS_ALLOC_FLAG_FREEING) in xfs_extent_busy_flush() 605 xfs_extent_busy_flush( struct xfs_trans *tp, struct xfs_perag *pag, unsigned busy_gen, uint32_t alloc_flags) xfs_extent_busy_flush() argument
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_chardev.c | 1932 bo_bucket->alloc_flags = (uint32_t)kgd_mem->alloc_flags; in criu_checkpoint_bos() 1935 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { in criu_checkpoint_bos() 1943 if (bo_bucket->alloc_flags in criu_checkpoint_bos() 1946 bo_bucket->alloc_flags & in criu_checkpoint_bos() 1955 if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) in criu_checkpoint_bos() 1958 else if (bo_bucket->alloc_flags & in criu_checkpoint_bos() 1971 "gpu_id = 0x%x alloc_flags = 0x%x idr_handle = 0x%x", in criu_checkpoint_bos() 1976 bo_bucket->alloc_flags, in criu_checkpoint_bos() 2000 if (bo_buckets[bo_index].alloc_flags in criu_checkpoint_bos() [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | block-group.c | 2242 u64 alloc_flags; in btrfs_inc_block_group_ro() local 2273 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2274 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro() 2275 ret = btrfs_chunk_alloc(trans, alloc_flags, in btrfs_inc_block_group_ro() 2303 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro() 2304 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); in btrfs_inc_block_group_ro() 2312 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2314 check_system_chunk(trans, alloc_flags); in btrfs_inc_block_group_ro() 3092 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc() local 3094 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORC in btrfs_force_chunk_alloc() [all...] |
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | block-group.c | 2889 u64 alloc_flags; in btrfs_inc_block_group_ro() local 2937 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2938 if (alloc_flags != cache->flags) { in btrfs_inc_block_group_ro() 2939 ret = btrfs_chunk_alloc(trans, alloc_flags, in btrfs_inc_block_group_ro() 2967 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); in btrfs_inc_block_group_ro() 2968 ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); in btrfs_inc_block_group_ro() 2984 alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags); in btrfs_inc_block_group_ro() 2986 check_system_chunk(trans, alloc_flags); in btrfs_inc_block_group_ro() 3828 u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type); in btrfs_force_chunk_alloc() local 3830 return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORC in btrfs_force_chunk_alloc() [all...] |