Lines Matching refs:cpu

42 static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, struct cpumask *groupmask)
53 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
54 printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
56 if (group && !cpumask_test_cpu(cpu, sched_group_span(group))) {
57 printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
130 static void sched_domain_debug(struct sched_domain *sd, int cpu)
139 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
143 printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
146 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) {
159 #define sched_domain_debug(sd, cpu) \
260 static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
263 if (cpumask_test_cpu(cpu, perf_domain_span(pd))) {
272 static struct perf_domain *pd_init(int cpu)
274 struct em_perf_domain *obj = em_cpu_get(cpu);
279 pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
363 int cpu = cpumask_first(cpu_map);
364 struct root_domain *rd = cpu_rq(cpu)->rd;
373 if (!per_cpu(sd_asym_cpucapacity, cpu)) {
480 if (cpumask_test_cpu(rq->cpu, old_rd->online)) {
484 cpumask_clear_cpu(rq->cpu, old_rd->span);
499 cpumask_set_cpu(rq->cpu, rd->span);
500 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) {
678 static void update_top_cache_domain(int cpu)
682 int id = cpu;
685 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
692 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
693 per_cpu(sd_llc_size, cpu) = size;
694 per_cpu(sd_llc_id, cpu) = id;
695 rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
697 sd = lowest_flag_domain(cpu, SD_NUMA);
698 rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
700 sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
701 rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
703 sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
704 rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
708 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
711 static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
713 struct rq *rq = cpu_rq(cpu);
756 sched_domain_debug(sd, cpu);
761 dirty_sched_domain_sysctl(cpu);
764 update_top_cache_domain(cpu);
935 static struct sched_group *build_group_from_child_sched_domain(struct sched_domain *sd, int cpu)
940 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), GFP_KERNEL, cpu_to_node(cpu));
961 int cpu;
964 cpu = cpumask_first_and(sched_group_span(sg), mask);
966 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1006 static int build_overlap_sched_groups(struct sched_domain *sd, int cpu)
1017 for_each_cpu_wrap(i, span, cpu)
1075 sg = build_group_from_child_sched_domain(sibling, cpu);
1175 static struct sched_group *get_group(int cpu, struct sd_data *sdd)
1177 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1183 cpu = cpumask_first(sched_domain_span(child));
1186 sg = *per_cpu_ptr(sdd->sg, cpu);
1187 sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
1203 cpumask_set_cpu(cpu, sched_group_span(sg));
1204 cpumask_set_cpu(cpu, group_balance_mask(sg));
1221 static int build_sched_groups(struct sched_domain *sd, int cpu)
1234 for_each_cpu_wrap(i, span, cpu)
1270 void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
1280 int cpu, max_cpu = -1;
1293 for_each_cpu(cpu, sched_group_span(sg))
1296 max_cpu = cpu;
1297 } else if (sched_asym_prefer(cpu, max_cpu)) {
1298 max_cpu = cpu;
1307 if (cpu != group_balance_cpu(sg)) {
1311 update_group_capacity(sd, cpu);
1397 static void claim_allocations(int cpu, struct sched_domain *sd)
1401 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
1402 *per_cpu_ptr(sdd->sd, cpu) = NULL;
1404 if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) {
1405 *per_cpu_ptr(sdd->sds, cpu) = NULL;
1408 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) {
1409 *per_cpu_ptr(sdd->sg, cpu) = NULL;
1412 if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) {
1413 *per_cpu_ptr(sdd->sgc, cpu) = NULL;
1448 struct sched_domain *child, int dflags, int cpu)
1451 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
1461 sd_weight = cpumask_weight(tl->mask(cpu));
1495 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1574 static const struct cpumask *sd_numa_mask(int cpu)
1576 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
1822 void sched_domains_numa_masks_set(unsigned int cpu)
1824 int node = cpu_to_node(cpu);
1830 cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
1836 void sched_domains_numa_masks_clear(unsigned int cpu)
1842 cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]);
1848 * sched_numa_find_closest() - given the NUMA topology, find the cpu
1849 * closest to @cpu from @cpumask.
1850 * cpumask: cpumask to find a cpu from
1851 * cpu: cpu to be close to
1853 * returns: cpu, or nr_cpu_ids when nothing found.
1855 int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1857 int i, j = cpu_to_node(cpu);
1860 cpu = cpumask_any_and(cpus, sched_domains_numa_masks[i][j]);
1861 if (cpu < nr_cpu_ids) {
1862 return cpu;
1986 int cpu)
1988 struct sched_domain *sd = sd_init(tl, cpu_map, child, dflags, cpu);
2013 static bool topology_span_sane(struct sched_domain_topology_level *tl, const struct cpumask *cpu_map, int cpu)
2030 if (i == cpu) {
2039 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && cpumask_intersects(tl->mask(cpu), tl->mask(i))) {
2319 unsigned int cpu = cpumask_any(cpu_map);
2322 if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) {