Lines Matching refs:ai
1583 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
2420 * Allocate ai which is large enough for @nr_groups groups containing
2421 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2433 struct pcpu_alloc_info *ai;
2438 base_size = ALIGN(struct_size(ai, groups, nr_groups),
2439 __alignof__(ai->groups[0].cpu_map[0]));
2440 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2445 ai = ptr;
2448 ai->groups[0].cpu_map = ptr;
2451 ai->groups[0].cpu_map[unit] = NR_CPUS;
2453 ai->nr_groups = nr_groups;
2454 ai->__ai_size = PFN_ALIGN(ai_size);
2456 return ai;
2461 * @ai: pcpu_alloc_info to free
2463 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2465 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2467 memblock_free(ai, ai->__ai_size);
2473 * @ai: allocation info to dump
2475 * Print out information about @ai using loglevel @lvl.
2478 const struct pcpu_alloc_info *ai)
2486 v = ai->nr_groups;
2495 upa = ai->alloc_size / ai->unit_size;
2500 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2501 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2503 for (group = 0; group < ai->nr_groups; group++) {
2504 const struct pcpu_group_info *gi = &ai->groups[group];
2529 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2536 * @ai contains all information necessary to initialize the first
2539 * @ai->static_size is the size of static percpu area.
2541 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2549 * @ai->dyn_size determines the number of bytes available for dynamic
2550 * allocation in the first chunk. The area between @ai->static_size +
2551 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2553 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2554 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2555 * @ai->dyn_size.
2557 * @ai->atom_size is the allocation atom size and used as alignment
2560 * @ai->alloc_size is the allocation size and always multiple of
2561 * @ai->atom_size. This is larger than @ai->atom_size if
2562 * @ai->unit_size is larger than @ai->atom_size.
2564 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2567 * groupings. If @ai->nr_groups is zero, a single group containing
2581 void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2584 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2600 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2606 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2608 PCPU_SETUP_BUG_ON(!ai->static_size);
2613 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2614 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2615 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2616 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2617 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2618 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2621 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2624 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2630 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2654 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2655 const struct pcpu_group_info *gi = &ai->groups[group];
2658 group_sizes[group] = gi->nr_units * ai->unit_size;
2670 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2688 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2690 pcpu_nr_groups = ai->nr_groups;
2697 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2699 pcpu_atom_size = ai->atom_size;
2703 pcpu_stats_save_ai(ai);
2733 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2734 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2748 if (ai->reserved_size)
2750 ai->reserved_size);
2751 tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
2851 struct pcpu_alloc_info *ai;
2941 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2942 if (!ai)
2944 cpu_map = ai->groups[0].cpu_map;
2947 ai->groups[group].cpu_map = cpu_map;
2951 ai->static_size = static_size;
2952 ai->reserved_size = reserved_size;
2953 ai->dyn_size = dyn_size;
2954 ai->unit_size = alloc_size / upa;
2955 ai->atom_size = atom_size;
2956 ai->alloc_size = alloc_size;
2959 struct pcpu_group_info *gi = &ai->groups[group];
2966 gi->base_offset = unit * ai->unit_size;
2976 return ai;
3055 struct pcpu_alloc_info *ai;
3060 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3062 if (IS_ERR(ai))
3063 return PTR_ERR(ai);
3065 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3066 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3076 for (group = 0; group < ai->nr_groups; group++) {
3077 struct pcpu_group_info *gi = &ai->groups[group];
3086 ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3100 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3118 for (group = 0; group < ai->nr_groups; group++) {
3119 struct pcpu_group_info *gi = &ai->groups[group];
3122 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3125 pcpu_fc_free(ptr, ai->unit_size);
3129 memcpy(ptr, __per_cpu_load, ai->static_size);
3130 pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3135 for (group = 0; group < ai->nr_groups; group++) {
3136 ai->groups[group].base_offset = areas[group] - base;
3140 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3141 ai->dyn_size, ai->unit_size);
3143 pcpu_setup_first_chunk(ai, base);
3147 for (group = 0; group < ai->nr_groups; group++)
3150 ai->groups[group].nr_units * ai->unit_size);
3152 pcpu_free_alloc_info(ai);
3240 struct pcpu_alloc_info *ai;
3251 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3252 if (IS_ERR(ai))
3253 return PTR_ERR(ai);
3254 BUG_ON(ai->nr_groups != 1);
3255 upa = ai->alloc_size/ai->unit_size;
3257 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3258 pcpu_free_alloc_info(ai);
3262 unit_pages = ai->unit_size >> PAGE_SHIFT;
3275 unsigned int cpu = ai->groups[0].cpu_map[unit];
3293 vm.size = num_possible_cpus() * ai->unit_size;
3298 (unsigned long)vm.addr + unit * ai->unit_size;
3309 flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
3312 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3317 unit_pages, psize_str, ai->static_size,
3318 ai->reserved_size, ai->dyn_size);
3320 pcpu_setup_first_chunk(ai, vm.addr);
3329 pcpu_free_alloc_info(ai);
3385 struct pcpu_alloc_info *ai;
3388 ai = pcpu_alloc_alloc_info(1, 1);
3390 if (!ai || !fc)
3395 ai->dyn_size = unit_size;
3396 ai->unit_size = unit_size;
3397 ai->atom_size = unit_size;
3398 ai->alloc_size = unit_size;
3399 ai->groups[0].nr_units = 1;
3400 ai->groups[0].cpu_map[0] = 0;
3402 pcpu_setup_first_chunk(ai, fc);
3403 pcpu_free_alloc_info(ai);