162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * mm/percpu-vm.c - vmalloc area based chunk allocation 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2010 SUSE Linux Products GmbH 662306a36Sopenharmony_ci * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 762306a36Sopenharmony_ci * 862306a36Sopenharmony_ci * Chunks are mapped into vmalloc areas and populated page by page. 962306a36Sopenharmony_ci * This is the default chunk allocator. 1062306a36Sopenharmony_ci */ 1162306a36Sopenharmony_ci#include "internal.h" 1262306a36Sopenharmony_ci 1362306a36Sopenharmony_cistatic struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 1462306a36Sopenharmony_ci unsigned int cpu, int page_idx) 1562306a36Sopenharmony_ci{ 1662306a36Sopenharmony_ci /* must not be used on pre-mapped chunk */ 1762306a36Sopenharmony_ci WARN_ON(chunk->immutable); 1862306a36Sopenharmony_ci 1962306a36Sopenharmony_ci return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 2062306a36Sopenharmony_ci} 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci/** 2362306a36Sopenharmony_ci * pcpu_get_pages - get temp pages array 2462306a36Sopenharmony_ci * 2562306a36Sopenharmony_ci * Returns pointer to array of pointers to struct page which can be indexed 2662306a36Sopenharmony_ci * with pcpu_page_idx(). Note that there is only one array and accesses 2762306a36Sopenharmony_ci * should be serialized by pcpu_alloc_mutex. 2862306a36Sopenharmony_ci * 2962306a36Sopenharmony_ci * RETURNS: 3062306a36Sopenharmony_ci * Pointer to temp pages array on success. 3162306a36Sopenharmony_ci */ 3262306a36Sopenharmony_cistatic struct page **pcpu_get_pages(void) 3362306a36Sopenharmony_ci{ 3462306a36Sopenharmony_ci static struct page **pages; 3562306a36Sopenharmony_ci size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 3662306a36Sopenharmony_ci 3762306a36Sopenharmony_ci lockdep_assert_held(&pcpu_alloc_mutex); 3862306a36Sopenharmony_ci 3962306a36Sopenharmony_ci if (!pages) 4062306a36Sopenharmony_ci pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); 4162306a36Sopenharmony_ci return pages; 4262306a36Sopenharmony_ci} 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_ci/** 4562306a36Sopenharmony_ci * pcpu_free_pages - free pages which were allocated for @chunk 4662306a36Sopenharmony_ci * @chunk: chunk pages were allocated for 4762306a36Sopenharmony_ci * @pages: array of pages to be freed, indexed by pcpu_page_idx() 4862306a36Sopenharmony_ci * @page_start: page index of the first page to be freed 4962306a36Sopenharmony_ci * @page_end: page index of the last page to be freed + 1 5062306a36Sopenharmony_ci * 5162306a36Sopenharmony_ci * Free pages [@page_start and @page_end) in @pages for all units. 5262306a36Sopenharmony_ci * The pages were allocated for @chunk. 5362306a36Sopenharmony_ci */ 5462306a36Sopenharmony_cistatic void pcpu_free_pages(struct pcpu_chunk *chunk, 5562306a36Sopenharmony_ci struct page **pages, int page_start, int page_end) 5662306a36Sopenharmony_ci{ 5762306a36Sopenharmony_ci unsigned int cpu; 5862306a36Sopenharmony_ci int i; 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 6162306a36Sopenharmony_ci for (i = page_start; i < page_end; i++) { 6262306a36Sopenharmony_ci struct page *page = pages[pcpu_page_idx(cpu, i)]; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci if (page) 6562306a36Sopenharmony_ci __free_page(page); 6662306a36Sopenharmony_ci } 6762306a36Sopenharmony_ci } 6862306a36Sopenharmony_ci} 6962306a36Sopenharmony_ci 7062306a36Sopenharmony_ci/** 7162306a36Sopenharmony_ci * pcpu_alloc_pages - allocates pages for @chunk 7262306a36Sopenharmony_ci * @chunk: target chunk 7362306a36Sopenharmony_ci * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 7462306a36Sopenharmony_ci * @page_start: page index of the first page to be allocated 7562306a36Sopenharmony_ci * @page_end: page index of the last page to be allocated + 1 7662306a36Sopenharmony_ci * @gfp: allocation flags passed to the underlying allocator 7762306a36Sopenharmony_ci * 7862306a36Sopenharmony_ci * Allocate pages [@page_start,@page_end) into @pages for all units. 7962306a36Sopenharmony_ci * The allocation is for @chunk. Percpu core doesn't care about the 8062306a36Sopenharmony_ci * content of @pages and will pass it verbatim to pcpu_map_pages(). 8162306a36Sopenharmony_ci */ 8262306a36Sopenharmony_cistatic int pcpu_alloc_pages(struct pcpu_chunk *chunk, 8362306a36Sopenharmony_ci struct page **pages, int page_start, int page_end, 8462306a36Sopenharmony_ci gfp_t gfp) 8562306a36Sopenharmony_ci{ 8662306a36Sopenharmony_ci unsigned int cpu, tcpu; 8762306a36Sopenharmony_ci int i; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci gfp |= __GFP_HIGHMEM; 9062306a36Sopenharmony_ci 9162306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 9262306a36Sopenharmony_ci for (i = page_start; i < page_end; i++) { 9362306a36Sopenharmony_ci struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 9662306a36Sopenharmony_ci if (!*pagep) 9762306a36Sopenharmony_ci goto err; 9862306a36Sopenharmony_ci } 9962306a36Sopenharmony_ci } 10062306a36Sopenharmony_ci return 0; 10162306a36Sopenharmony_ci 10262306a36Sopenharmony_cierr: 10362306a36Sopenharmony_ci while (--i >= page_start) 10462306a36Sopenharmony_ci __free_page(pages[pcpu_page_idx(cpu, i)]); 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci for_each_possible_cpu(tcpu) { 10762306a36Sopenharmony_ci if (tcpu == cpu) 10862306a36Sopenharmony_ci break; 10962306a36Sopenharmony_ci for (i = page_start; i < page_end; i++) 11062306a36Sopenharmony_ci __free_page(pages[pcpu_page_idx(tcpu, i)]); 11162306a36Sopenharmony_ci } 11262306a36Sopenharmony_ci return -ENOMEM; 11362306a36Sopenharmony_ci} 11462306a36Sopenharmony_ci 11562306a36Sopenharmony_ci/** 11662306a36Sopenharmony_ci * pcpu_pre_unmap_flush - flush cache prior to unmapping 11762306a36Sopenharmony_ci * @chunk: chunk the regions to be flushed belongs to 11862306a36Sopenharmony_ci * @page_start: page index of the first page to be flushed 11962306a36Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 12062306a36Sopenharmony_ci * 12162306a36Sopenharmony_ci * Pages in [@page_start,@page_end) of @chunk are about to be 12262306a36Sopenharmony_ci * unmapped. Flush cache. As each flushing trial can be very 12362306a36Sopenharmony_ci * expensive, issue flush on the whole region at once rather than 12462306a36Sopenharmony_ci * doing it for each cpu. This could be an overkill but is more 12562306a36Sopenharmony_ci * scalable. 12662306a36Sopenharmony_ci */ 12762306a36Sopenharmony_cistatic void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 12862306a36Sopenharmony_ci int page_start, int page_end) 12962306a36Sopenharmony_ci{ 13062306a36Sopenharmony_ci flush_cache_vunmap( 13162306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 13262306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 13362306a36Sopenharmony_ci} 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_cistatic void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 13662306a36Sopenharmony_ci{ 13762306a36Sopenharmony_ci vunmap_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT)); 13862306a36Sopenharmony_ci} 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci/** 14162306a36Sopenharmony_ci * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 14262306a36Sopenharmony_ci * @chunk: chunk of interest 14362306a36Sopenharmony_ci * @pages: pages array which can be used to pass information to free 14462306a36Sopenharmony_ci * @page_start: page index of the first page to unmap 14562306a36Sopenharmony_ci * @page_end: page index of the last page to unmap + 1 14662306a36Sopenharmony_ci * 14762306a36Sopenharmony_ci * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 14862306a36Sopenharmony_ci * Corresponding elements in @pages were cleared by the caller and can 14962306a36Sopenharmony_ci * be used to carry information to pcpu_free_pages() which will be 15062306a36Sopenharmony_ci * called after all unmaps are finished. The caller should call 15162306a36Sopenharmony_ci * proper pre/post flush functions. 15262306a36Sopenharmony_ci */ 15362306a36Sopenharmony_cistatic void pcpu_unmap_pages(struct pcpu_chunk *chunk, 15462306a36Sopenharmony_ci struct page **pages, int page_start, int page_end) 15562306a36Sopenharmony_ci{ 15662306a36Sopenharmony_ci unsigned int cpu; 15762306a36Sopenharmony_ci int i; 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 16062306a36Sopenharmony_ci for (i = page_start; i < page_end; i++) { 16162306a36Sopenharmony_ci struct page *page; 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci page = pcpu_chunk_page(chunk, cpu, i); 16462306a36Sopenharmony_ci WARN_ON(!page); 16562306a36Sopenharmony_ci pages[pcpu_page_idx(cpu, i)] = page; 16662306a36Sopenharmony_ci } 16762306a36Sopenharmony_ci __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 16862306a36Sopenharmony_ci page_end - page_start); 16962306a36Sopenharmony_ci } 17062306a36Sopenharmony_ci} 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci/** 17362306a36Sopenharmony_ci * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 17462306a36Sopenharmony_ci * @chunk: pcpu_chunk the regions to be flushed belong to 17562306a36Sopenharmony_ci * @page_start: page index of the first page to be flushed 17662306a36Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 17762306a36Sopenharmony_ci * 17862306a36Sopenharmony_ci * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 17962306a36Sopenharmony_ci * TLB for the regions. This can be skipped if the area is to be 18062306a36Sopenharmony_ci * returned to vmalloc as vmalloc will handle TLB flushing lazily. 18162306a36Sopenharmony_ci * 18262306a36Sopenharmony_ci * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 18362306a36Sopenharmony_ci * for the whole region. 18462306a36Sopenharmony_ci */ 18562306a36Sopenharmony_cistatic void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 18662306a36Sopenharmony_ci int page_start, int page_end) 18762306a36Sopenharmony_ci{ 18862306a36Sopenharmony_ci flush_tlb_kernel_range( 18962306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 19062306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 19162306a36Sopenharmony_ci} 19262306a36Sopenharmony_ci 19362306a36Sopenharmony_cistatic int __pcpu_map_pages(unsigned long addr, struct page **pages, 19462306a36Sopenharmony_ci int nr_pages) 19562306a36Sopenharmony_ci{ 19662306a36Sopenharmony_ci return vmap_pages_range_noflush(addr, addr + (nr_pages << PAGE_SHIFT), 19762306a36Sopenharmony_ci PAGE_KERNEL, pages, PAGE_SHIFT); 19862306a36Sopenharmony_ci} 19962306a36Sopenharmony_ci 20062306a36Sopenharmony_ci/** 20162306a36Sopenharmony_ci * pcpu_map_pages - map pages into a pcpu_chunk 20262306a36Sopenharmony_ci * @chunk: chunk of interest 20362306a36Sopenharmony_ci * @pages: pages array containing pages to be mapped 20462306a36Sopenharmony_ci * @page_start: page index of the first page to map 20562306a36Sopenharmony_ci * @page_end: page index of the last page to map + 1 20662306a36Sopenharmony_ci * 20762306a36Sopenharmony_ci * For each cpu, map pages [@page_start,@page_end) into @chunk. The 20862306a36Sopenharmony_ci * caller is responsible for calling pcpu_post_map_flush() after all 20962306a36Sopenharmony_ci * mappings are complete. 21062306a36Sopenharmony_ci * 21162306a36Sopenharmony_ci * This function is responsible for setting up whatever is necessary for 21262306a36Sopenharmony_ci * reverse lookup (addr -> chunk). 21362306a36Sopenharmony_ci */ 21462306a36Sopenharmony_cistatic int pcpu_map_pages(struct pcpu_chunk *chunk, 21562306a36Sopenharmony_ci struct page **pages, int page_start, int page_end) 21662306a36Sopenharmony_ci{ 21762306a36Sopenharmony_ci unsigned int cpu, tcpu; 21862306a36Sopenharmony_ci int i, err; 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 22162306a36Sopenharmony_ci err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 22262306a36Sopenharmony_ci &pages[pcpu_page_idx(cpu, page_start)], 22362306a36Sopenharmony_ci page_end - page_start); 22462306a36Sopenharmony_ci if (err < 0) 22562306a36Sopenharmony_ci goto err; 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci for (i = page_start; i < page_end; i++) 22862306a36Sopenharmony_ci pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 22962306a36Sopenharmony_ci chunk); 23062306a36Sopenharmony_ci } 23162306a36Sopenharmony_ci return 0; 23262306a36Sopenharmony_cierr: 23362306a36Sopenharmony_ci for_each_possible_cpu(tcpu) { 23462306a36Sopenharmony_ci if (tcpu == cpu) 23562306a36Sopenharmony_ci break; 23662306a36Sopenharmony_ci __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 23762306a36Sopenharmony_ci page_end - page_start); 23862306a36Sopenharmony_ci } 23962306a36Sopenharmony_ci pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); 24062306a36Sopenharmony_ci return err; 24162306a36Sopenharmony_ci} 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci/** 24462306a36Sopenharmony_ci * pcpu_post_map_flush - flush cache after mapping 24562306a36Sopenharmony_ci * @chunk: pcpu_chunk the regions to be flushed belong to 24662306a36Sopenharmony_ci * @page_start: page index of the first page to be flushed 24762306a36Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 24862306a36Sopenharmony_ci * 24962306a36Sopenharmony_ci * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 25062306a36Sopenharmony_ci * cache. 25162306a36Sopenharmony_ci * 25262306a36Sopenharmony_ci * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 25362306a36Sopenharmony_ci * for the whole region. 25462306a36Sopenharmony_ci */ 25562306a36Sopenharmony_cistatic void pcpu_post_map_flush(struct pcpu_chunk *chunk, 25662306a36Sopenharmony_ci int page_start, int page_end) 25762306a36Sopenharmony_ci{ 25862306a36Sopenharmony_ci flush_cache_vmap( 25962306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 26062306a36Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 26162306a36Sopenharmony_ci} 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci/** 26462306a36Sopenharmony_ci * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 26562306a36Sopenharmony_ci * @chunk: chunk of interest 26662306a36Sopenharmony_ci * @page_start: the start page 26762306a36Sopenharmony_ci * @page_end: the end page 26862306a36Sopenharmony_ci * @gfp: allocation flags passed to the underlying memory allocator 26962306a36Sopenharmony_ci * 27062306a36Sopenharmony_ci * For each cpu, populate and map pages [@page_start,@page_end) into 27162306a36Sopenharmony_ci * @chunk. 27262306a36Sopenharmony_ci * 27362306a36Sopenharmony_ci * CONTEXT: 27462306a36Sopenharmony_ci * pcpu_alloc_mutex, does GFP_KERNEL allocation. 27562306a36Sopenharmony_ci */ 27662306a36Sopenharmony_cistatic int pcpu_populate_chunk(struct pcpu_chunk *chunk, 27762306a36Sopenharmony_ci int page_start, int page_end, gfp_t gfp) 27862306a36Sopenharmony_ci{ 27962306a36Sopenharmony_ci struct page **pages; 28062306a36Sopenharmony_ci 28162306a36Sopenharmony_ci pages = pcpu_get_pages(); 28262306a36Sopenharmony_ci if (!pages) 28362306a36Sopenharmony_ci return -ENOMEM; 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_ci if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) 28662306a36Sopenharmony_ci return -ENOMEM; 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci if (pcpu_map_pages(chunk, pages, page_start, page_end)) { 28962306a36Sopenharmony_ci pcpu_free_pages(chunk, pages, page_start, page_end); 29062306a36Sopenharmony_ci return -ENOMEM; 29162306a36Sopenharmony_ci } 29262306a36Sopenharmony_ci pcpu_post_map_flush(chunk, page_start, page_end); 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci return 0; 29562306a36Sopenharmony_ci} 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_ci/** 29862306a36Sopenharmony_ci * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 29962306a36Sopenharmony_ci * @chunk: chunk to depopulate 30062306a36Sopenharmony_ci * @page_start: the start page 30162306a36Sopenharmony_ci * @page_end: the end page 30262306a36Sopenharmony_ci * 30362306a36Sopenharmony_ci * For each cpu, depopulate and unmap pages [@page_start,@page_end) 30462306a36Sopenharmony_ci * from @chunk. 30562306a36Sopenharmony_ci * 30662306a36Sopenharmony_ci * Caller is required to call pcpu_post_unmap_tlb_flush() if not returning the 30762306a36Sopenharmony_ci * region back to vmalloc() which will lazily flush the tlb. 30862306a36Sopenharmony_ci * 30962306a36Sopenharmony_ci * CONTEXT: 31062306a36Sopenharmony_ci * pcpu_alloc_mutex. 31162306a36Sopenharmony_ci */ 31262306a36Sopenharmony_cistatic void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 31362306a36Sopenharmony_ci int page_start, int page_end) 31462306a36Sopenharmony_ci{ 31562306a36Sopenharmony_ci struct page **pages; 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci /* 31862306a36Sopenharmony_ci * If control reaches here, there must have been at least one 31962306a36Sopenharmony_ci * successful population attempt so the temp pages array must 32062306a36Sopenharmony_ci * be available now. 32162306a36Sopenharmony_ci */ 32262306a36Sopenharmony_ci pages = pcpu_get_pages(); 32362306a36Sopenharmony_ci BUG_ON(!pages); 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci /* unmap and free */ 32662306a36Sopenharmony_ci pcpu_pre_unmap_flush(chunk, page_start, page_end); 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_ci pcpu_unmap_pages(chunk, pages, page_start, page_end); 32962306a36Sopenharmony_ci 33062306a36Sopenharmony_ci pcpu_free_pages(chunk, pages, page_start, page_end); 33162306a36Sopenharmony_ci} 33262306a36Sopenharmony_ci 33362306a36Sopenharmony_cistatic struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp) 33462306a36Sopenharmony_ci{ 33562306a36Sopenharmony_ci struct pcpu_chunk *chunk; 33662306a36Sopenharmony_ci struct vm_struct **vms; 33762306a36Sopenharmony_ci 33862306a36Sopenharmony_ci chunk = pcpu_alloc_chunk(gfp); 33962306a36Sopenharmony_ci if (!chunk) 34062306a36Sopenharmony_ci return NULL; 34162306a36Sopenharmony_ci 34262306a36Sopenharmony_ci vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 34362306a36Sopenharmony_ci pcpu_nr_groups, pcpu_atom_size); 34462306a36Sopenharmony_ci if (!vms) { 34562306a36Sopenharmony_ci pcpu_free_chunk(chunk); 34662306a36Sopenharmony_ci return NULL; 34762306a36Sopenharmony_ci } 34862306a36Sopenharmony_ci 34962306a36Sopenharmony_ci chunk->data = vms; 35062306a36Sopenharmony_ci chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; 35162306a36Sopenharmony_ci 35262306a36Sopenharmony_ci pcpu_stats_chunk_alloc(); 35362306a36Sopenharmony_ci trace_percpu_create_chunk(chunk->base_addr); 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_ci return chunk; 35662306a36Sopenharmony_ci} 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_cistatic void pcpu_destroy_chunk(struct pcpu_chunk *chunk) 35962306a36Sopenharmony_ci{ 36062306a36Sopenharmony_ci if (!chunk) 36162306a36Sopenharmony_ci return; 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_ci pcpu_stats_chunk_dealloc(); 36462306a36Sopenharmony_ci trace_percpu_destroy_chunk(chunk->base_addr); 36562306a36Sopenharmony_ci 36662306a36Sopenharmony_ci if (chunk->data) 36762306a36Sopenharmony_ci pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); 36862306a36Sopenharmony_ci pcpu_free_chunk(chunk); 36962306a36Sopenharmony_ci} 37062306a36Sopenharmony_ci 37162306a36Sopenharmony_cistatic struct page *pcpu_addr_to_page(void *addr) 37262306a36Sopenharmony_ci{ 37362306a36Sopenharmony_ci return vmalloc_to_page(addr); 37462306a36Sopenharmony_ci} 37562306a36Sopenharmony_ci 37662306a36Sopenharmony_cistatic int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) 37762306a36Sopenharmony_ci{ 37862306a36Sopenharmony_ci /* no extra restriction */ 37962306a36Sopenharmony_ci return 0; 38062306a36Sopenharmony_ci} 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci/** 38362306a36Sopenharmony_ci * pcpu_should_reclaim_chunk - determine if a chunk should go into reclaim 38462306a36Sopenharmony_ci * @chunk: chunk of interest 38562306a36Sopenharmony_ci * 38662306a36Sopenharmony_ci * This is the entry point for percpu reclaim. If a chunk qualifies, it is then 38762306a36Sopenharmony_ci * isolated and managed in separate lists at the back of pcpu_slot: sidelined 38862306a36Sopenharmony_ci * and to_depopulate respectively. The to_depopulate list holds chunks slated 38962306a36Sopenharmony_ci * for depopulation. They no longer contribute to pcpu_nr_empty_pop_pages once 39062306a36Sopenharmony_ci * they are on this list. Once depopulated, they are moved onto the sidelined 39162306a36Sopenharmony_ci * list which enables them to be pulled back in for allocation if no other chunk 39262306a36Sopenharmony_ci * can suffice the allocation. 39362306a36Sopenharmony_ci */ 39462306a36Sopenharmony_cistatic bool pcpu_should_reclaim_chunk(struct pcpu_chunk *chunk) 39562306a36Sopenharmony_ci{ 39662306a36Sopenharmony_ci /* do not reclaim either the first chunk or reserved chunk */ 39762306a36Sopenharmony_ci if (chunk == pcpu_first_chunk || chunk == pcpu_reserved_chunk) 39862306a36Sopenharmony_ci return false; 39962306a36Sopenharmony_ci 40062306a36Sopenharmony_ci /* 40162306a36Sopenharmony_ci * If it is isolated, it may be on the sidelined list so move it back to 40262306a36Sopenharmony_ci * the to_depopulate list. If we hit at least 1/4 pages empty pages AND 40362306a36Sopenharmony_ci * there is no system-wide shortage of empty pages aside from this 40462306a36Sopenharmony_ci * chunk, move it to the to_depopulate list. 40562306a36Sopenharmony_ci */ 40662306a36Sopenharmony_ci return ((chunk->isolated && chunk->nr_empty_pop_pages) || 40762306a36Sopenharmony_ci (pcpu_nr_empty_pop_pages > 40862306a36Sopenharmony_ci (PCPU_EMPTY_POP_PAGES_HIGH + chunk->nr_empty_pop_pages) && 40962306a36Sopenharmony_ci chunk->nr_empty_pop_pages >= chunk->nr_pages / 4)); 41062306a36Sopenharmony_ci} 411