/kernel/linux/linux-6.6/drivers/iommu/amd/ |
H A D | io_pgtable.c | 77 static void free_pt_page(u64 *pt, struct list_head *freelist) in free_pt_page() argument 81 list_add_tail(&p->lru, freelist); in free_pt_page() 84 static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl) in free_pt_lvl() argument 105 free_pt_lvl(p, freelist, lvl - 1); in free_pt_lvl() 107 free_pt_page(p, freelist); in free_pt_lvl() 110 free_pt_page(pt, freelist); in free_pt_lvl() 113 static void free_sub_pt(u64 *root, int mode, struct list_head *freelist) in free_sub_pt() argument 120 free_pt_page(root, freelist); in free_sub_pt() 127 free_pt_lvl(root, freelist, mode); in free_sub_pt() 339 static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist) in free_clear_pte() argument [all...] |
/kernel/linux/linux-5.10/kernel/bpf/ |
H A D | percpu_freelist.c | 10 s->freelist = alloc_percpu(struct pcpu_freelist_head); in pcpu_freelist_init() 11 if (!s->freelist) in pcpu_freelist_init() 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 27 free_percpu(s->freelist); in pcpu_freelist_destroy() 65 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi() 88 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node); in __pcpu_freelist_push() 112 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate() 131 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop() 165 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop_nmi()
|
H A D | stackmap.c | 30 struct pcpu_freelist freelist; member 76 err = pcpu_freelist_init(&smap->freelist); in prealloc_elems_and_freelist() 80 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist() 430 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid() 440 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid() 444 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid() 455 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid() 466 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in __bpf_get_stackid() 786 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in bpf_stackmap_copy() 837 pcpu_freelist_push(&smap->freelist, in stack_map_delete_elem() [all...] |
/kernel/linux/linux-6.6/kernel/bpf/ |
H A D | percpu_freelist.c | 10 s->freelist = alloc_percpu(struct pcpu_freelist_head); in pcpu_freelist_init() 11 if (!s->freelist) in pcpu_freelist_init() 15 struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_init() 27 free_percpu(s->freelist); in pcpu_freelist_destroy() 66 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_push_nmi() 86 ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node); in __pcpu_freelist_push() 110 head = per_cpu_ptr(s->freelist, cpu); in pcpu_freelist_populate() 128 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop() 160 head = per_cpu_ptr(s->freelist, cpu); in ___pcpu_freelist_pop_nmi()
|
H A D | stackmap.c | 29 struct pcpu_freelist freelist; member 56 err = pcpu_freelist_init(&smap->freelist); in prealloc_elems_and_freelist() 60 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist() 243 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid() 253 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid() 257 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid() 268 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid() 279 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in __bpf_get_stackid() 597 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in bpf_stackmap_copy() 648 pcpu_freelist_push(&smap->freelist, in stack_map_delete_elem() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | slub.c | 72 * A. slab->freelist -> List of free objects in a slab 82 * processors may put objects onto the freelist but the processor that 84 * slab's freelist. 113 * taken but it still utilizes the freelist for the common operations. 160 * freelist that allows lockless access to 161 * free objects in addition to the regular freelist 364 * freeptr_t represents a SLUB freelist pointer, which might be encoded 370 * Returns freelist pointer (ptr). With hardening, this is obfuscated 421 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in 534 freelist_aba_t old = { .freelist in __update_freelist_fast() 995 freelist_corrupted(struct kmem_cache *s, struct slab *slab, void **freelist, void *nextfree) freelist_corrupted() argument 1754 freelist_corrupted(struct kmem_cache *s, struct slab *slab, void **freelist, void *nextfree) freelist_corrupted() argument 2226 void *freelist; acquire_slab() local 2492 deactivate_slab(struct kmem_cache *s, struct slab *slab, void *freelist) deactivate_slab() argument 2748 void *freelist; flush_slab() local 2770 void *freelist = c->freelist; __flush_cpu_slab() local 3054 void *freelist; get_freelist() local 3098 void *freelist; ___slab_alloc() local 3741 void **freelist; do_slab_free() local 3838 void *freelist; global() member [all...] |
H A D | compaction.c | 69 static unsigned long release_freepages(struct list_head *freelist) in release_freepages() argument 74 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 580 * Isolate free pages onto a private freelist. If @strict is true, will abort 587 struct list_head *freelist, in isolate_freepages_block() 660 list_add_tail(&page->lru, freelist); in isolate_freepages_block() 726 LIST_HEAD(freelist); in isolate_freepages_range() 757 block_end_pfn, &freelist, 0, true); in isolate_freepages_range() 775 split_map_pages(&freelist); in isolate_freepages_range() 779 release_freepages(&freelist); in isolate_freepages_range() 1394 move_freelist_head(struct list_head *freelist, struc argument 584 isolate_freepages_block(struct compact_control *cc, unsigned long *start_pfn, unsigned long end_pfn, struct list_head *freelist, unsigned int stride, bool strict) isolate_freepages_block() argument 1411 move_freelist_tail(struct list_head *freelist, struct page *freepage) move_freelist_tail() argument 1513 struct list_head *freelist; fast_isolate_freepages() local 1646 struct list_head *freelist = &cc->freepages; isolate_freepages() local 1906 struct list_head *freelist; fast_find_migrateblock() local [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | slub.c | 56 * A. page->freelist -> List of object free in a page 64 * processors may put objects onto the freelist but the processor that 66 * page's freelist. 109 * freelist that allows lockless access to 110 * free objects in addition to the regular freelist 244 * Returns freelist pointer (ptr). With hardening, this is obfuscated 269 /* Returns the freelist pointer recorded at location ptr_addr. */ 367 if (cmpxchg_double(&page->freelist, &page->counters, in __cmpxchg_double_slab() 375 if (page->freelist == freelist_old && in __cmpxchg_double_slab() 377 page->freelist in __cmpxchg_double_slab() 675 freelist_corrupted(struct kmem_cache *s, struct page *page, void **freelist, void *nextfree) freelist_corrupted() argument 1493 freelist_corrupted(struct kmem_cache *s, struct page *page, void **freelist, void *nextfree) freelist_corrupted() argument 1914 void *freelist; acquire_slab() local 2157 deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist, struct kmem_cache_cpu *c) deactivate_slab() argument 2574 void *freelist; new_slab_objects() local 2628 void *freelist; get_freelist() local 2670 void *freelist; ___slab_alloc() local 3126 void **freelist = READ_ONCE(c->freelist); do_slab_free() local 3176 void *freelist; global() member [all...] |
H A D | slob.c | 53 * Node aware pages are still inserted in to the global freelist, and 56 * the freelist will only be done so on pages residing on the same node, 234 * freelist, in this case @page_removed_from_list will be set to 244 for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { in slob_page_alloc() 277 sp->freelist = next; in slob_page_alloc() 282 sp->freelist = cur + units; in slob_page_alloc() 324 * page with a matching node id in the freelist. in slob_alloc() 366 sp->freelist = b; in slob_alloc() 413 sp->freelist = b; in slob_free() 433 if (b < (slob_t *)sp->freelist) { in slob_free() [all...] |
H A D | compaction.c | 71 static unsigned long release_freepages(struct list_head *freelist) in release_freepages() argument 76 list_for_each_entry_safe(page, next, freelist, lru) { in release_freepages() 549 * Isolate free pages onto a private freelist. If @strict is true, will abort 556 struct list_head *freelist, in isolate_freepages_block() 636 list_add_tail(&page->lru, freelist); in isolate_freepages_block() 704 LIST_HEAD(freelist); in isolate_freepages_range() 736 block_end_pfn, &freelist, 0, true); in isolate_freepages_range() 754 split_map_pages(&freelist); in isolate_freepages_range() 758 release_freepages(&freelist); in isolate_freepages_range() 1218 move_freelist_head(struct list_head *freelist, struc argument 553 isolate_freepages_block(struct compact_control *cc, unsigned long *start_pfn, unsigned long end_pfn, struct list_head *freelist, unsigned int stride, bool strict) isolate_freepages_block() argument 1236 move_freelist_tail(struct list_head *freelist, struct page *freepage) move_freelist_tail() argument 1342 struct list_head *freelist; fast_isolate_freepages() local 1467 struct list_head *freelist = &cc->freepages; isolate_freepages() local 1709 struct list_head *freelist; fast_find_migrateblock() local [all...] |
H A D | slab.c | 414 * We don't need to consider alignment of freelist because in cache_estimate() 415 * freelist will be at the end of slab page. The objects will be in cache_estimate() 1099 * Drains freelist for a node on each slab cache, used for memory hot-remove. 1581 poison_obj(cachep, page->freelist - obj_offset(cachep), in slab_destroy_debugcheck() 1618 void *freelist; in slab_destroy() local 1620 freelist = page->freelist; in slab_destroy() 1628 * From now on, we don't use freelist in slab_destroy() 1632 kmem_cache_free(cachep->freelist_cache, freelist); in slab_destroy() 1824 * If slab auto-initialization on free is enabled, store the freelist in set_objfreelist_slab_cache() 2295 void *freelist; alloc_slabmgmt() local 2552 slab_map_pages(struct kmem_cache *cache, struct page *page, void *freelist) slab_map_pages() argument 2566 void *freelist; cache_grow_begin() local [all...] |
/kernel/linux/linux-6.6/drivers/scsi/elx/efct/ |
H A D | efct_io.c | 16 struct list_head freelist; member 33 INIT_LIST_HEAD(&io_pool->freelist); in efct_io_pool_create() 69 list_add_tail(&io->list_entry, &io_pool->freelist); in efct_io_pool_create() 115 if (!list_empty(&io_pool->freelist)) { in efct_io_pool_io_alloc() 116 io = list_first_entry(&io_pool->freelist, struct efct_io, in efct_io_pool_io_alloc() 160 list_add(&io->list_entry, &io_pool->freelist); in efct_io_pool_io_free()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/en/ |
H A D | ptp.h | 113 struct mlx5e_ptp_metadata_fifo *freelist; in mlx5e_ptpsq_metadata_freelist_empty() local 118 freelist = &ptpsq->metadata_freelist; in mlx5e_ptpsq_metadata_freelist_empty() 120 return freelist->pc == freelist->cc; in mlx5e_ptpsq_metadata_freelist_empty()
|
/kernel/linux/linux-5.10/drivers/md/bcache/ |
H A D | util.h | 245 * freelist as a stack - allocating and freeing push and pop off the freelist. 250 type *freelist; \ 256 typeof((array)->freelist) _ret = (array)->freelist; \ 259 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 266 typeof((array)->freelist) _ptr = ptr; \ 268 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ [all...] |
/kernel/linux/linux-6.6/drivers/md/bcache/ |
H A D | util.h | 243 * freelist as a stack - allocating and freeing push and pop off the freelist. 248 type *freelist; \ 254 typeof((array)->freelist) _ret = (array)->freelist; \ 257 (array)->freelist = *((typeof((array)->freelist) *) _ret);\ 264 typeof((array)->freelist) _ptr = ptr; \ 266 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \ [all...] |
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | rethook.c | 45 rhn = container_of(node, struct rethook_node, freelist); in rethook_free_rcu() 129 freelist_add(&node->freelist, &rh->pool); in rethook_add_node() 155 freelist_add(&node->freelist, &node->rethook->pool); in rethook_recycle() 190 return container_of(fn, struct rethook_node, freelist); in rethook_try_get()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | rethook.h | 9 #include <linux/freelist.h> 45 * @freelist: The freelist, linked to struct rethook::pool. 57 struct freelist_node freelist; member
|
/kernel/linux/linux-5.10/drivers/iommu/amd/ |
H A D | iommu.c | 1400 static void free_page_list(struct page *freelist) in free_page_list() argument 1402 while (freelist != NULL) { in free_page_list() 1403 unsigned long p = (unsigned long)page_address(freelist); in free_page_list() 1404 freelist = freelist->freelist; in free_page_list() 1409 static struct page *free_pt_page(unsigned long pt, struct page *freelist) in free_pt_page() argument 1413 p->freelist = freelist; in free_pt_page() 1419 static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \ 1450 free_sub_pt(unsigned long root, int mode, struct page *freelist) free_sub_pt() argument 1484 struct page *freelist = NULL; free_pagetable() local 1704 free_clear_pte(u64 *pte, u64 pteval, struct page *freelist) free_clear_pte() argument 1737 struct page *freelist = NULL; iommu_map_page() local [all...] |
/kernel/linux/linux-5.10/drivers/nvdimm/ |
H A D | btt.c | 358 * It does _not_ prepare the freelist entry for the next write 359 * btt_flog_write is the wrapper for updating the freelist elements 392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; in btt_flog_write() 393 if (++(arena->freelist[lane].seq) == 4) in btt_flog_write() 394 arena->freelist[lane].seq = 1; in btt_flog_write() 396 arena->freelist[lane].has_err = 1; in btt_flog_write() 397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); in btt_flog_write() 509 if (arena->freelist[lane].has_err) { in arena_clear_freelist_error() 511 u32 lba = arena->freelist[lan in arena_clear_freelist_error() [all...] |
/kernel/linux/linux-6.6/drivers/nvdimm/ |
H A D | btt.c | 358 * It does _not_ prepare the freelist entry for the next write 359 * btt_flog_write is the wrapper for updating the freelist elements 392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; in btt_flog_write() 393 if (++(arena->freelist[lane].seq) == 4) in btt_flog_write() 394 arena->freelist[lane].seq = 1; in btt_flog_write() 396 arena->freelist[lane].has_err = 1; in btt_flog_write() 397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); in btt_flog_write() 509 if (arena->freelist[lane].has_err) { in arena_clear_freelist_error() 511 u32 lba = arena->freelist[lan in arena_clear_freelist_error() [all...] |
/kernel/linux/linux-5.10/drivers/iommu/intel/ |
H A D | iommu.c | 1214 struct page *freelist) in dma_pte_list_pagetables() 1219 pg->freelist = freelist; in dma_pte_list_pagetables() 1220 freelist = pg; in dma_pte_list_pagetables() 1223 return freelist; in dma_pte_list_pagetables() 1228 freelist = dma_pte_list_pagetables(domain, level - 1, in dma_pte_list_pagetables() 1229 pte, freelist); in dma_pte_list_pagetables() 1233 return freelist; in dma_pte_list_pagetables() 1240 struct page *freelist) in dma_pte_clear_level() 1261 freelist in dma_pte_clear_level() 1212 dma_pte_list_pagetables(struct dmar_domain *domain, int level, struct dma_pte *pte, struct page *freelist) dma_pte_list_pagetables() argument 1236 dma_pte_clear_level(struct dmar_domain *domain, int level, struct dma_pte *pte, unsigned long pfn, unsigned long start_pfn, unsigned long last_pfn, struct page *freelist) dma_pte_clear_level() argument 1292 struct page *freelist; domain_unmap() local 1314 dma_free_pagelist(struct page *freelist) dma_free_pagelist() argument 1326 struct page *freelist = (struct page *)data; iova_entry_free() local 2082 struct page *freelist; domain_exit() local 3652 struct page *freelist; intel_unmap() local 4725 struct page *freelist; intel_iommu_memory_notifier() local 5703 struct page *freelist = NULL; intel_iommu_unmap() local [all...] |
/kernel/linux/linux-5.10/fs/jfs/ |
H A D | jfs_dtree.h | 139 s8 freelist; /* 1: freelist header */ member 178 * end of entry stot list or freelist is marked with -1. 188 s8 freelist; /* 1: slot index of head of freelist */ member
|
H A D | jfs_dtree.c | 12 * directory entry slots initialized as a freelist 15 * from the freelist as required to store variable length data 17 * are returned to freelist. 1421 /* init freelist */ in dtSplitPage() 1423 rp->header.freelist = fsi; in dtSplitPage() 1447 * initialize freelist of new right page in dtSplitPage() 1563 * finalize freelist of new right page in dtSplitPage() 1565 fsi = rp->header.freelist; in dtSplitPage() 1774 * add old stbl region at head of freelist in dtExtendPage() 1778 last = sp->header.freelist; in dtExtendPage() [all...] |
/kernel/linux/linux-6.6/fs/jfs/ |
H A D | jfs_dtree.h | 139 s8 freelist; /* 1: freelist header */ member 178 * end of entry stot list or freelist is marked with -1. 188 s8 freelist; /* 1: slot index of head of freelist */ member
|
H A D | jfs_dtree.c | 12 * directory entry slots initialized as a freelist 15 * from the freelist as required to store variable length data 17 * are returned to freelist. 1421 /* init freelist */ in dtSplitPage() 1423 rp->header.freelist = fsi; in dtSplitPage() 1447 * initialize freelist of new right page in dtSplitPage() 1563 * finalize freelist of new right page in dtSplitPage() 1565 fsi = rp->header.freelist; in dtSplitPage() 1774 * add old stbl region at head of freelist in dtExtendPage() 1778 last = sp->header.freelist; in dtExtendPage() [all...] |