Lines Matching refs:chunk

3  * mm/percpu-vm.c - vmalloc area based chunk allocation
9 * This is the default chunk allocator.
12 static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
15 /* must not be used on pre-mapped chunk */
16 WARN_ON(chunk->immutable);
18 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
44 * pcpu_free_pages - free pages which were allocated for @chunk
45 * @chunk: chunk pages were allocated for
51 * The pages were allocated for @chunk.
53 static void pcpu_free_pages(struct pcpu_chunk *chunk,
70 * pcpu_alloc_pages - allocates pages for @chunk
71 * @chunk: target chunk
78 * The allocation is for @chunk. Percpu core doesn't care about the
81 static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
116 * @chunk: chunk the regions to be flushed belongs to
120 * Pages in [@page_start,@page_end) of @chunk are about to be
126 static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
130 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
131 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
141 * @chunk: chunk of interest
146 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
152 static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
162 page = pcpu_chunk_page(chunk, cpu, i);
166 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
173 * @chunk: pcpu_chunk the regions to be flushed belong to
177 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
184 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
188 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
189 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
201 * @chunk: chunk of interest
206 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
211 * reverse lookup (addr -> chunk).
213 static int pcpu_map_pages(struct pcpu_chunk *chunk,
220 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
228 chunk);
235 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
238 pcpu_post_unmap_tlb_flush(chunk, page_start, page_end);
244 * @chunk: pcpu_chunk the regions to be flushed belong to
248 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
254 static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
258 pcpu_chunk_addr(chunk, pcpu_low_unit_cpu, page_start),
259 pcpu_chunk_addr(chunk, pcpu_high_unit_cpu, page_end));
264 * @chunk: chunk of interest
270 * @chunk.
275 static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
284 if (pcpu_alloc_pages(chunk, pages, page_start, page_end, gfp))
287 if (pcpu_map_pages(chunk, pages, page_start, page_end)) {
288 pcpu_free_pages(chunk, pages, page_start, page_end);
291 pcpu_post_map_flush(chunk, page_start, page_end);
298 * @chunk: chunk to depopulate
303 * from @chunk.
308 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
322 pcpu_pre_unmap_flush(chunk, page_start, page_end);
324 pcpu_unmap_pages(chunk, pages, page_start, page_end);
328 pcpu_free_pages(chunk, pages, page_start, page_end);
334 struct pcpu_chunk *chunk;
337 chunk = pcpu_alloc_chunk(type, gfp);
338 if (!chunk)
344 pcpu_free_chunk(chunk);
348 chunk->data = vms;
349 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
352 trace_percpu_create_chunk(chunk->base_addr);
354 return chunk;
357 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
359 if (!chunk)
363 trace_percpu_destroy_chunk(chunk->base_addr);
365 if (chunk->data)
366 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
367 pcpu_free_chunk(chunk);