Lines Matching refs:ai

1542 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
2240 * Allocate ai which is large enough for @nr_groups groups containing
2241 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2253 struct pcpu_alloc_info *ai;
2258 base_size = ALIGN(struct_size(ai, groups, nr_groups),
2259 __alignof__(ai->groups[0].cpu_map[0]));
2260 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2265 ai = ptr;
2268 ai->groups[0].cpu_map = ptr;
2271 ai->groups[0].cpu_map[unit] = NR_CPUS;
2273 ai->nr_groups = nr_groups;
2274 ai->__ai_size = PFN_ALIGN(ai_size);
2276 return ai;
2281 * @ai: pcpu_alloc_info to free
2283 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2285 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2287 memblock_free_early(__pa(ai), ai->__ai_size);
2293 * @ai: allocation info to dump
2295 * Print out information about @ai using loglevel @lvl.
2298 const struct pcpu_alloc_info *ai)
2306 v = ai->nr_groups;
2315 upa = ai->alloc_size / ai->unit_size;
2320 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2321 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2323 for (group = 0; group < ai->nr_groups; group++) {
2324 const struct pcpu_group_info *gi = &ai->groups[group];
2349 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2356 * @ai contains all information necessary to initialize the first
2359 * @ai->static_size is the size of static percpu area.
2361 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2369 * @ai->dyn_size determines the number of bytes available for dynamic
2370 * allocation in the first chunk. The area between @ai->static_size +
2371 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2373 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2374 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2375 * @ai->dyn_size.
2377 * @ai->atom_size is the allocation atom size and used as alignment
2380 * @ai->alloc_size is the allocation size and always multiple of
2381 * @ai->atom_size. This is larger than @ai->atom_size if
2382 * @ai->unit_size is larger than @ai->atom_size.
2384 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2387 * groupings. If @ai->nr_groups is zero, a single group containing
2401 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2404 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2423 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2429 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2431 PCPU_SETUP_BUG_ON(!ai->static_size);
2436 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2437 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2438 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2439 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2440 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2441 PCPU_SETUP_BUG_ON(!ai->dyn_size);
2442 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2445 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2448 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2454 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2478 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2479 const struct pcpu_group_info *gi = &ai->groups[group];
2482 group_sizes[group] = gi->nr_units * ai->unit_size;
2494 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2512 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2514 pcpu_nr_groups = ai->nr_groups;
2521 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2523 pcpu_atom_size = ai->atom_size;
2527 pcpu_stats_save_ai(ai);
2555 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2556 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2567 map_size = ai->reserved_size ?: dyn_size;
2571 if (ai->reserved_size) {
2575 ai->reserved_size;
2678 struct pcpu_alloc_info *ai;
2762 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2763 if (!ai)
2765 cpu_map = ai->groups[0].cpu_map;
2768 ai->groups[group].cpu_map = cpu_map;
2772 ai->static_size = static_size;
2773 ai->reserved_size = reserved_size;
2774 ai->dyn_size = dyn_size;
2775 ai->unit_size = alloc_size / upa;
2776 ai->atom_size = atom_size;
2777 ai->alloc_size = alloc_size;
2780 struct pcpu_group_info *gi = &ai->groups[group];
2787 gi->base_offset = unit * ai->unit_size;
2797 return ai;
2842 struct pcpu_alloc_info *ai;
2847 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2849 if (IS_ERR(ai))
2850 return PTR_ERR(ai);
2852 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2853 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2863 for (group = 0; group < ai->nr_groups; group++) {
2864 struct pcpu_group_info *gi = &ai->groups[group];
2873 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2887 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2905 for (group = 0; group < ai->nr_groups; group++) {
2906 struct pcpu_group_info *gi = &ai->groups[group];
2909 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2912 free_fn(ptr, ai->unit_size);
2916 memcpy(ptr, __per_cpu_load, ai->static_size);
2917 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2922 for (group = 0; group < ai->nr_groups; group++) {
2923 ai->groups[group].base_offset = areas[group] - base;
2927 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2928 ai->dyn_size, ai->unit_size);
2930 pcpu_setup_first_chunk(ai, base);
2934 for (group = 0; group < ai->nr_groups; group++)
2937 ai->groups[group].nr_units * ai->unit_size);
2939 pcpu_free_alloc_info(ai);
2969 struct pcpu_alloc_info *ai;
2980 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2981 if (IS_ERR(ai))
2982 return PTR_ERR(ai);
2983 BUG_ON(ai->nr_groups != 1);
2984 upa = ai->alloc_size/ai->unit_size;
2986 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
2987 pcpu_free_alloc_info(ai);
2991 unit_pages = ai->unit_size >> PAGE_SHIFT;
3004 unsigned int cpu = ai->groups[0].cpu_map[unit];
3022 vm.size = num_possible_cpus() * ai->unit_size;
3027 (unsigned long)vm.addr + unit * ai->unit_size;
3047 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3052 unit_pages, psize_str, ai->static_size,
3053 ai->reserved_size, ai->dyn_size);
3055 pcpu_setup_first_chunk(ai, vm.addr);
3064 pcpu_free_alloc_info(ai);
3132 struct pcpu_alloc_info *ai;
3135 ai = pcpu_alloc_alloc_info(1, 1);
3137 if (!ai || !fc)
3142 ai->dyn_size = unit_size;
3143 ai->unit_size = unit_size;
3144 ai->atom_size = unit_size;
3145 ai->alloc_size = unit_size;
3146 ai->groups[0].nr_units = 1;
3147 ai->groups[0].cpu_map[0] = 0;
3149 pcpu_setup_first_chunk(ai, fc);
3150 pcpu_free_alloc_info(ai);