Lines Matching defs:mask
99 printk(KERN_CONT " mask=%*pbl", cpumask_pr_args(group_balance_mask(group)));
168 /* Generate a mask of SD flags with the SDF_NEEDS_GROUPS metaflag */
781 * of this group that's also in the balance mask.
783 * The balance mask are all those CPUs that could actually end up at this
814 * construct the mask of all nodes reachable in @level hops.
846 * To correct this we have to introduce the group balance mask. This mask
889 * Build the balance mask; it contains only those CPUs that can arrive at this
897 static void build_balance_mask(struct sched_domain *sd, struct sched_group *sg, struct cpumask *mask)
904 cpumask_clear(mask);
911 * unused. The mask will not be empty because those CPUs that
923 cpumask_set_cpu(i, mask);
927 WARN_ON_ONCE(cpumask_empty(mask));
958 struct cpumask *mask = sched_domains_tmpmask2;
963 build_balance_mask(sd, sg, mask);
964 cpu = cpumask_first_and(sched_group_span(sg), mask);
968 cpumask_copy(group_balance_mask(sg), mask);
970 WARN_ON_ONCE(!cpumask_equal(group_balance_mask(sg), mask));
1123 * denote the ever growing mask of CPUs belonging to that level of topology.
1168 * mask is redundant and set all 1s; all CPUs in a group will end up at _that_
1461 sd_weight = cpumask_weight(tl->mask(cpu));
1495 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
1561 #define for_each_sd_topology(tl) for (tl = sched_domain_topology; (tl)->mask; (tl)++)
1746 * Now for each level, construct a mask per node which contains all
1756 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
1759 if (!mask) {
1763 sched_domains_numa_masks[i][j] = mask;
1775 cpumask_or(mask, mask, cpumask_of_node(k));
1781 for (i = 0; sched_domain_topology[i].mask; i++) {
1793 for (i = 0; sched_domain_topology[i].mask; i++) {
1800 tl[i++] = (struct sched_domain_topology_level) {.mask = sd_numa_mask, .numa_level = 0, SD_INIT_NAME(NODE)};
1807 .mask = sd_numa_mask,
1875 for (tl = sched_domain_topology; (tl)->mask; (tl)++) {
1949 for (tl = sched_domain_topology; (tl)->mask; (tl)++) {
2039 if (!cpumask_equal(tl->mask(cpu), tl->mask(i)) && cpumask_intersects(tl->mask(cpu), tl->mask(i))) {
2083 for (tl = sched_domain_topology; (tl)->mask; (tl)++) {
2088 for_each_cpu_and(j, tl->mask(i), cpu_map) {
2140 for (tl = sched_domain_topology; (tl)->mask; (tl)++) {
2354 * sched domain for each mask. CPUs not in any of the cpumasks will