18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * mm/percpu-vm.c - vmalloc area based chunk allocation 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2010 SUSE Linux Products GmbH 68c2ecf20Sopenharmony_ci * Copyright (C) 2010 Tejun Heo <tj@kernel.org> 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * Chunks are mapped into vmalloc areas and populated page by page. 98c2ecf20Sopenharmony_ci * This is the default chunk allocator. 108c2ecf20Sopenharmony_ci */ 118c2ecf20Sopenharmony_ci 128c2ecf20Sopenharmony_cistatic struct page *pcpu_chunk_page(struct pcpu_chunk *chunk, 138c2ecf20Sopenharmony_ci unsigned int cpu, int page_idx) 148c2ecf20Sopenharmony_ci{ 158c2ecf20Sopenharmony_ci /* must not be used on pre-mapped chunk */ 168c2ecf20Sopenharmony_ci WARN_ON(chunk->immutable); 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx)); 198c2ecf20Sopenharmony_ci} 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci/** 228c2ecf20Sopenharmony_ci * pcpu_get_pages - get temp pages array 238c2ecf20Sopenharmony_ci * 248c2ecf20Sopenharmony_ci * Returns pointer to array of pointers to struct page which can be indexed 258c2ecf20Sopenharmony_ci * with pcpu_page_idx(). Note that there is only one array and accesses 268c2ecf20Sopenharmony_ci * should be serialized by pcpu_alloc_mutex. 278c2ecf20Sopenharmony_ci * 288c2ecf20Sopenharmony_ci * RETURNS: 298c2ecf20Sopenharmony_ci * Pointer to temp pages array on success. 308c2ecf20Sopenharmony_ci */ 318c2ecf20Sopenharmony_cistatic struct page **pcpu_get_pages(void) 328c2ecf20Sopenharmony_ci{ 338c2ecf20Sopenharmony_ci static struct page **pages; 348c2ecf20Sopenharmony_ci size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); 358c2ecf20Sopenharmony_ci 368c2ecf20Sopenharmony_ci lockdep_assert_held(&pcpu_alloc_mutex); 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_ci if (!pages) 398c2ecf20Sopenharmony_ci pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); 408c2ecf20Sopenharmony_ci return pages; 418c2ecf20Sopenharmony_ci} 428c2ecf20Sopenharmony_ci 438c2ecf20Sopenharmony_ci/** 448c2ecf20Sopenharmony_ci * pcpu_free_pages - free pages which were allocated for @chunk 458c2ecf20Sopenharmony_ci * @chunk: chunk pages were allocated for 468c2ecf20Sopenharmony_ci * @pages: array of pages to be freed, indexed by pcpu_page_idx() 478c2ecf20Sopenharmony_ci * @page_start: page index of the first page to be freed 488c2ecf20Sopenharmony_ci * @page_end: page index of the last page to be freed + 1 498c2ecf20Sopenharmony_ci * 508c2ecf20Sopenharmony_ci * Free pages [@page_start and @page_end) in @pages for all units. 518c2ecf20Sopenharmony_ci * The pages were allocated for @chunk. 528c2ecf20Sopenharmony_ci */ 538c2ecf20Sopenharmony_cistatic void pcpu_free_pages(struct pcpu_chunk *chunk, 548c2ecf20Sopenharmony_ci struct page **pages, int page_start, int page_end) 558c2ecf20Sopenharmony_ci{ 568c2ecf20Sopenharmony_ci unsigned int cpu; 578c2ecf20Sopenharmony_ci int i; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) { 608c2ecf20Sopenharmony_ci for (i = page_start; i < page_end; i++) { 618c2ecf20Sopenharmony_ci struct page *page = pages[pcpu_page_idx(cpu, i)]; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci if (page) 648c2ecf20Sopenharmony_ci __free_page(page); 658c2ecf20Sopenharmony_ci } 668c2ecf20Sopenharmony_ci } 678c2ecf20Sopenharmony_ci} 688c2ecf20Sopenharmony_ci 698c2ecf20Sopenharmony_ci/** 708c2ecf20Sopenharmony_ci * pcpu_alloc_pages - allocates pages for @chunk 718c2ecf20Sopenharmony_ci * @chunk: target chunk 728c2ecf20Sopenharmony_ci * @pages: array to put the allocated pages into, indexed by pcpu_page_idx() 738c2ecf20Sopenharmony_ci * @page_start: page index of the first page to be allocated 748c2ecf20Sopenharmony_ci * @page_end: page index of the last page to be allocated + 1 758c2ecf20Sopenharmony_ci * @gfp: allocation flags passed to the underlying allocator 768c2ecf20Sopenharmony_ci * 778c2ecf20Sopenharmony_ci * Allocate pages [@page_start,@page_end) into @pages for all units. 788c2ecf20Sopenharmony_ci * The allocation is for @chunk. Percpu core doesn't care about the 798c2ecf20Sopenharmony_ci * content of @pages and will pass it verbatim to pcpu_map_pages(). 808c2ecf20Sopenharmony_ci */ 818c2ecf20Sopenharmony_cistatic int pcpu_alloc_pages(struct pcpu_chunk *chunk, 828c2ecf20Sopenharmony_ci struct page **pages, int page_start, int page_end, 838c2ecf20Sopenharmony_ci gfp_t gfp) 848c2ecf20Sopenharmony_ci{ 858c2ecf20Sopenharmony_ci unsigned int cpu, tcpu; 868c2ecf20Sopenharmony_ci int i; 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_ci gfp |= __GFP_HIGHMEM; 898c2ecf20Sopenharmony_ci 908c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) { 918c2ecf20Sopenharmony_ci for (i = page_start; i < page_end; i++) { 928c2ecf20Sopenharmony_ci struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); 958c2ecf20Sopenharmony_ci if (!*pagep) 968c2ecf20Sopenharmony_ci goto err; 978c2ecf20Sopenharmony_ci } 988c2ecf20Sopenharmony_ci } 998c2ecf20Sopenharmony_ci return 0; 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_cierr: 1028c2ecf20Sopenharmony_ci while (--i >= page_start) 1038c2ecf20Sopenharmony_ci __free_page(pages[pcpu_page_idx(cpu, i)]); 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci for_each_possible_cpu(tcpu) { 1068c2ecf20Sopenharmony_ci if (tcpu == cpu) 1078c2ecf20Sopenharmony_ci break; 1088c2ecf20Sopenharmony_ci for (i = page_start; i < page_end; i++) 1098c2ecf20Sopenharmony_ci __free_page(pages[pcpu_page_idx(tcpu, i)]); 1108c2ecf20Sopenharmony_ci } 1118c2ecf20Sopenharmony_ci return -ENOMEM; 1128c2ecf20Sopenharmony_ci} 1138c2ecf20Sopenharmony_ci 1148c2ecf20Sopenharmony_ci/** 1158c2ecf20Sopenharmony_ci * pcpu_pre_unmap_flush - flush cache prior to unmapping 1168c2ecf20Sopenharmony_ci * @chunk: chunk the regions to be flushed belongs to 1178c2ecf20Sopenharmony_ci * @page_start: page index of the first page to be flushed 1188c2ecf20Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 1198c2ecf20Sopenharmony_ci * 1208c2ecf20Sopenharmony_ci * Pages in [@page_start,@page_end) of @chunk are about to be 1218c2ecf20Sopenharmony_ci * unmapped. Flush cache. As each flushing trial can be very 1228c2ecf20Sopenharmony_ci * expensive, issue flush on the whole region at once rather than 1238c2ecf20Sopenharmony_ci * doing it for each cpu. This could be an overkill but is more 1248c2ecf20Sopenharmony_ci * scalable. 1258c2ecf20Sopenharmony_ci */ 1268c2ecf20Sopenharmony_cistatic void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk, 1278c2ecf20Sopenharmony_ci int page_start, int page_end) 1288c2ecf20Sopenharmony_ci{ 1298c2ecf20Sopenharmony_ci flush_cache_vunmap( 1308c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 1318c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 1328c2ecf20Sopenharmony_ci} 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_cistatic void __pcpu_unmap_pages(unsigned long addr, int nr_pages) 1358c2ecf20Sopenharmony_ci{ 1368c2ecf20Sopenharmony_ci unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT); 1378c2ecf20Sopenharmony_ci} 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci/** 1408c2ecf20Sopenharmony_ci * pcpu_unmap_pages - unmap pages out of a pcpu_chunk 1418c2ecf20Sopenharmony_ci * @chunk: chunk of interest 1428c2ecf20Sopenharmony_ci * @pages: pages array which can be used to pass information to free 1438c2ecf20Sopenharmony_ci * @page_start: page index of the first page to unmap 1448c2ecf20Sopenharmony_ci * @page_end: page index of the last page to unmap + 1 1458c2ecf20Sopenharmony_ci * 1468c2ecf20Sopenharmony_ci * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. 1478c2ecf20Sopenharmony_ci * Corresponding elements in @pages were cleared by the caller and can 1488c2ecf20Sopenharmony_ci * be used to carry information to pcpu_free_pages() which will be 1498c2ecf20Sopenharmony_ci * called after all unmaps are finished. The caller should call 1508c2ecf20Sopenharmony_ci * proper pre/post flush functions. 1518c2ecf20Sopenharmony_ci */ 1528c2ecf20Sopenharmony_cistatic void pcpu_unmap_pages(struct pcpu_chunk *chunk, 1538c2ecf20Sopenharmony_ci struct page **pages, int page_start, int page_end) 1548c2ecf20Sopenharmony_ci{ 1558c2ecf20Sopenharmony_ci unsigned int cpu; 1568c2ecf20Sopenharmony_ci int i; 1578c2ecf20Sopenharmony_ci 1588c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) { 1598c2ecf20Sopenharmony_ci for (i = page_start; i < page_end; i++) { 1608c2ecf20Sopenharmony_ci struct page *page; 1618c2ecf20Sopenharmony_ci 1628c2ecf20Sopenharmony_ci page = pcpu_chunk_page(chunk, cpu, i); 1638c2ecf20Sopenharmony_ci WARN_ON(!page); 1648c2ecf20Sopenharmony_ci pages[pcpu_page_idx(cpu, i)] = page; 1658c2ecf20Sopenharmony_ci } 1668c2ecf20Sopenharmony_ci __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start), 1678c2ecf20Sopenharmony_ci page_end - page_start); 1688c2ecf20Sopenharmony_ci } 1698c2ecf20Sopenharmony_ci} 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_ci/** 1728c2ecf20Sopenharmony_ci * pcpu_post_unmap_tlb_flush - flush TLB after unmapping 1738c2ecf20Sopenharmony_ci * @chunk: pcpu_chunk the regions to be flushed belong to 1748c2ecf20Sopenharmony_ci * @page_start: page index of the first page to be flushed 1758c2ecf20Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 1768c2ecf20Sopenharmony_ci * 1778c2ecf20Sopenharmony_ci * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush 1788c2ecf20Sopenharmony_ci * TLB for the regions. This can be skipped if the area is to be 1798c2ecf20Sopenharmony_ci * returned to vmalloc as vmalloc will handle TLB flushing lazily. 1808c2ecf20Sopenharmony_ci * 1818c2ecf20Sopenharmony_ci * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 1828c2ecf20Sopenharmony_ci * for the whole region. 1838c2ecf20Sopenharmony_ci */ 1848c2ecf20Sopenharmony_cistatic void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk, 1858c2ecf20Sopenharmony_ci int page_start, int page_end) 1868c2ecf20Sopenharmony_ci{ 1878c2ecf20Sopenharmony_ci flush_tlb_kernel_range( 1888c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 1898c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 1908c2ecf20Sopenharmony_ci} 1918c2ecf20Sopenharmony_ci 1928c2ecf20Sopenharmony_cistatic int __pcpu_map_pages(unsigned long addr, struct page **pages, 1938c2ecf20Sopenharmony_ci int nr_pages) 1948c2ecf20Sopenharmony_ci{ 1958c2ecf20Sopenharmony_ci return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT, 1968c2ecf20Sopenharmony_ci PAGE_KERNEL, pages); 1978c2ecf20Sopenharmony_ci} 1988c2ecf20Sopenharmony_ci 1998c2ecf20Sopenharmony_ci/** 2008c2ecf20Sopenharmony_ci * pcpu_map_pages - map pages into a pcpu_chunk 2018c2ecf20Sopenharmony_ci * @chunk: chunk of interest 2028c2ecf20Sopenharmony_ci * @pages: pages array containing pages to be mapped 2038c2ecf20Sopenharmony_ci * @page_start: page index of the first page to map 2048c2ecf20Sopenharmony_ci * @page_end: page index of the last page to map + 1 2058c2ecf20Sopenharmony_ci * 2068c2ecf20Sopenharmony_ci * For each cpu, map pages [@page_start,@page_end) into @chunk. The 2078c2ecf20Sopenharmony_ci * caller is responsible for calling pcpu_post_map_flush() after all 2088c2ecf20Sopenharmony_ci * mappings are complete. 2098c2ecf20Sopenharmony_ci * 2108c2ecf20Sopenharmony_ci * This function is responsible for setting up whatever is necessary for 2118c2ecf20Sopenharmony_ci * reverse lookup (addr -> chunk). 2128c2ecf20Sopenharmony_ci */ 2138c2ecf20Sopenharmony_cistatic int pcpu_map_pages(struct pcpu_chunk *chunk, 2148c2ecf20Sopenharmony_ci struct page **pages, int page_start, int page_end) 2158c2ecf20Sopenharmony_ci{ 2168c2ecf20Sopenharmony_ci unsigned int cpu, tcpu; 2178c2ecf20Sopenharmony_ci int i, err; 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) { 2208c2ecf20Sopenharmony_ci err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start), 2218c2ecf20Sopenharmony_ci &pages[pcpu_page_idx(cpu, page_start)], 2228c2ecf20Sopenharmony_ci page_end - page_start); 2238c2ecf20Sopenharmony_ci if (err < 0) 2248c2ecf20Sopenharmony_ci goto err; 2258c2ecf20Sopenharmony_ci 2268c2ecf20Sopenharmony_ci for (i = page_start; i < page_end; i++) 2278c2ecf20Sopenharmony_ci pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)], 2288c2ecf20Sopenharmony_ci chunk); 2298c2ecf20Sopenharmony_ci } 2308c2ecf20Sopenharmony_ci return 0; 2318c2ecf20Sopenharmony_cierr: 2328c2ecf20Sopenharmony_ci for_each_possible_cpu(tcpu) { 2338c2ecf20Sopenharmony_ci if (tcpu == cpu) 2348c2ecf20Sopenharmony_ci break; 2358c2ecf20Sopenharmony_ci __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), 2368c2ecf20Sopenharmony_ci page_end - page_start); 2378c2ecf20Sopenharmony_ci } 2388c2ecf20Sopenharmony_ci pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); 2398c2ecf20Sopenharmony_ci return err; 2408c2ecf20Sopenharmony_ci} 2418c2ecf20Sopenharmony_ci 2428c2ecf20Sopenharmony_ci/** 2438c2ecf20Sopenharmony_ci * pcpu_post_map_flush - flush cache after mapping 2448c2ecf20Sopenharmony_ci * @chunk: pcpu_chunk the regions to be flushed belong to 2458c2ecf20Sopenharmony_ci * @page_start: page index of the first page to be flushed 2468c2ecf20Sopenharmony_ci * @page_end: page index of the last page to be flushed + 1 2478c2ecf20Sopenharmony_ci * 2488c2ecf20Sopenharmony_ci * Pages [@page_start,@page_end) of @chunk have been mapped. Flush 2498c2ecf20Sopenharmony_ci * cache. 2508c2ecf20Sopenharmony_ci * 2518c2ecf20Sopenharmony_ci * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once 2528c2ecf20Sopenharmony_ci * for the whole region. 2538c2ecf20Sopenharmony_ci */ 2548c2ecf20Sopenharmony_cistatic void pcpu_post_map_flush(struct pcpu_chunk *chunk, 2558c2ecf20Sopenharmony_ci int page_start, int page_end) 2568c2ecf20Sopenharmony_ci{ 2578c2ecf20Sopenharmony_ci flush_cache_vmap( 2588c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start), 2598c2ecf20Sopenharmony_ci pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end)); 2608c2ecf20Sopenharmony_ci} 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci/** 2638c2ecf20Sopenharmony_ci * pcpu_populate_chunk - populate and map an area of a pcpu_chunk 2648c2ecf20Sopenharmony_ci * @chunk: chunk of interest 2658c2ecf20Sopenharmony_ci * @page_start: the start page 2668c2ecf20Sopenharmony_ci * @page_end: the end page 2678c2ecf20Sopenharmony_ci * @gfp: allocation flags passed to the underlying memory allocator 2688c2ecf20Sopenharmony_ci * 2698c2ecf20Sopenharmony_ci * For each cpu, populate and map pages [@page_start,@page_end) into 2708c2ecf20Sopenharmony_ci * @chunk. 2718c2ecf20Sopenharmony_ci * 2728c2ecf20Sopenharmony_ci * CONTEXT: 2738c2ecf20Sopenharmony_ci * pcpu_alloc_mutex, does GFP_KERNEL allocation. 2748c2ecf20Sopenharmony_ci */ 2758c2ecf20Sopenharmony_cistatic int pcpu_populate_chunk(struct pcpu_chunk *chunk, 2768c2ecf20Sopenharmony_ci int page_start, int page_end, gfp_t gfp) 2778c2ecf20Sopenharmony_ci{ 2788c2ecf20Sopenharmony_ci struct page **pages; 2798c2ecf20Sopenharmony_ci 2808c2ecf20Sopenharmony_ci pages = pcpu_get_pages(); 2818c2ecf20Sopenharmony_ci if (!pages) 2828c2ecf20Sopenharmony_ci return -ENOMEM; 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp)) 2858c2ecf20Sopenharmony_ci return -ENOMEM; 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci if (pcpu_map_pages(chunk, pages, page_start, page_end)) { 2888c2ecf20Sopenharmony_ci pcpu_free_pages(chunk, pages, page_start, page_end); 2898c2ecf20Sopenharmony_ci return -ENOMEM; 2908c2ecf20Sopenharmony_ci } 2918c2ecf20Sopenharmony_ci pcpu_post_map_flush(chunk, page_start, page_end); 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_ci return 0; 2948c2ecf20Sopenharmony_ci} 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci/** 2978c2ecf20Sopenharmony_ci * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk 2988c2ecf20Sopenharmony_ci * @chunk: chunk to depopulate 2998c2ecf20Sopenharmony_ci * @page_start: the start page 3008c2ecf20Sopenharmony_ci * @page_end: the end page 3018c2ecf20Sopenharmony_ci * 3028c2ecf20Sopenharmony_ci * For each cpu, depopulate and unmap pages [@page_start,@page_end) 3038c2ecf20Sopenharmony_ci * from @chunk. 3048c2ecf20Sopenharmony_ci * 3058c2ecf20Sopenharmony_ci * CONTEXT: 3068c2ecf20Sopenharmony_ci * pcpu_alloc_mutex. 3078c2ecf20Sopenharmony_ci */ 3088c2ecf20Sopenharmony_cistatic void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, 3098c2ecf20Sopenharmony_ci int page_start, int page_end) 3108c2ecf20Sopenharmony_ci{ 3118c2ecf20Sopenharmony_ci struct page **pages; 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci /* 3148c2ecf20Sopenharmony_ci * If control reaches here, there must have been at least one 3158c2ecf20Sopenharmony_ci * successful population attempt so the temp pages array must 3168c2ecf20Sopenharmony_ci * be available now. 3178c2ecf20Sopenharmony_ci */ 3188c2ecf20Sopenharmony_ci pages = pcpu_get_pages(); 3198c2ecf20Sopenharmony_ci BUG_ON(!pages); 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_ci /* unmap and free */ 3228c2ecf20Sopenharmony_ci pcpu_pre_unmap_flush(chunk, page_start, page_end); 3238c2ecf20Sopenharmony_ci 3248c2ecf20Sopenharmony_ci pcpu_unmap_pages(chunk, pages, page_start, page_end); 3258c2ecf20Sopenharmony_ci 3268c2ecf20Sopenharmony_ci /* no need to flush tlb, vmalloc will handle it lazily */ 3278c2ecf20Sopenharmony_ci 3288c2ecf20Sopenharmony_ci pcpu_free_pages(chunk, pages, page_start, page_end); 3298c2ecf20Sopenharmony_ci} 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_cistatic struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type, 3328c2ecf20Sopenharmony_ci gfp_t gfp) 3338c2ecf20Sopenharmony_ci{ 3348c2ecf20Sopenharmony_ci struct pcpu_chunk *chunk; 3358c2ecf20Sopenharmony_ci struct vm_struct **vms; 3368c2ecf20Sopenharmony_ci 3378c2ecf20Sopenharmony_ci chunk = pcpu_alloc_chunk(type, gfp); 3388c2ecf20Sopenharmony_ci if (!chunk) 3398c2ecf20Sopenharmony_ci return NULL; 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes, 3428c2ecf20Sopenharmony_ci pcpu_nr_groups, pcpu_atom_size); 3438c2ecf20Sopenharmony_ci if (!vms) { 3448c2ecf20Sopenharmony_ci pcpu_free_chunk(chunk); 3458c2ecf20Sopenharmony_ci return NULL; 3468c2ecf20Sopenharmony_ci } 3478c2ecf20Sopenharmony_ci 3488c2ecf20Sopenharmony_ci chunk->data = vms; 3498c2ecf20Sopenharmony_ci chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0]; 3508c2ecf20Sopenharmony_ci 3518c2ecf20Sopenharmony_ci pcpu_stats_chunk_alloc(); 3528c2ecf20Sopenharmony_ci trace_percpu_create_chunk(chunk->base_addr); 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci return chunk; 3558c2ecf20Sopenharmony_ci} 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_cistatic void pcpu_destroy_chunk(struct pcpu_chunk *chunk) 3588c2ecf20Sopenharmony_ci{ 3598c2ecf20Sopenharmony_ci if (!chunk) 3608c2ecf20Sopenharmony_ci return; 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci pcpu_stats_chunk_dealloc(); 3638c2ecf20Sopenharmony_ci trace_percpu_destroy_chunk(chunk->base_addr); 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci if (chunk->data) 3668c2ecf20Sopenharmony_ci pcpu_free_vm_areas(chunk->data, pcpu_nr_groups); 3678c2ecf20Sopenharmony_ci pcpu_free_chunk(chunk); 3688c2ecf20Sopenharmony_ci} 3698c2ecf20Sopenharmony_ci 3708c2ecf20Sopenharmony_cistatic struct page *pcpu_addr_to_page(void *addr) 3718c2ecf20Sopenharmony_ci{ 3728c2ecf20Sopenharmony_ci return vmalloc_to_page(addr); 3738c2ecf20Sopenharmony_ci} 3748c2ecf20Sopenharmony_ci 3758c2ecf20Sopenharmony_cistatic int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai) 3768c2ecf20Sopenharmony_ci{ 3778c2ecf20Sopenharmony_ci /* no extra restriction */ 3788c2ecf20Sopenharmony_ci return 0; 3798c2ecf20Sopenharmony_ci} 380