/kernel/linux/linux-6.6/arch/riscv/kernel/ |
H A D | sbi.c | 22 static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask, 72 static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask) in __sbi_v01_cpumask_to_hartmask() argument 84 for_each_cpu(cpuid, cpu_mask) { in __sbi_v01_cpumask_to_hartmask() 158 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, in __sbi_rfence_v01() argument 165 if (!cpu_mask || cpumask_empty(cpu_mask)) in __sbi_rfence_v01() 166 cpu_mask = cpu_online_mask; in __sbi_rfence_v01() 167 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask); in __sbi_rfence_v01() 210 static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask, in __sbi_rfence_v01() argument 301 static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, in __sbi_rfence_v02() argument 373 sbi_remote_fence_i(const struct cpumask *cpu_mask) sbi_remote_fence_i() argument 392 sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid) sbi_remote_sfence_vma_asid() argument 415 sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size) sbi_remote_hfence_gvma() argument 435 sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long vmid) sbi_remote_hfence_gvma_vmid() argument 454 sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, unsigned long start, unsigned long size) sbi_remote_hfence_vvma() argument 475 sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, unsigned long start, unsigned long size, unsigned long asid) sbi_remote_hfence_vvma_asid() argument [all...] |
/kernel/linux/linux-5.10/scripts/gdb/linux/ |
H A D | cpus.py | 53 cpu_mask = {} variable 57 global cpu_mask 58 cpu_mask = {} 65 global cpu_mask 67 if mask_name in cpu_mask: 68 mask = cpu_mask[mask_name] 72 cpu_mask[mask_name] = mask
|
/kernel/linux/linux-6.6/arch/powerpc/platforms/pseries/ |
H A D | hotplug-cpu.c | 149 * @cpu_mask: the returned CPU mask. 154 cpumask_var_t *cpu_mask) in find_cpu_id_range() 163 cpumask_clear(*cpu_mask); in find_cpu_id_range() 165 cpumask_set_cpu(cpu, *cpu_mask); in find_cpu_id_range() 189 while (!cpumask_empty(*cpu_mask)) { in find_cpu_id_range() 190 if (cpumask_subset(*cpu_mask, candidate_mask)) in find_cpu_id_range() 193 cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads); in find_cpu_id_range() 196 if (!cpumask_empty(*cpu_mask)) in find_cpu_id_range() 215 cpumask_var_t cpu_mask; in pseries_add_processor() local 153 find_cpu_id_range(unsigned int nthreads, int assigned_node, cpumask_var_t *cpu_mask) find_cpu_id_range() argument [all...] |
/kernel/linux/linux-6.6/arch/riscv/include/asm/ |
H A D | sbi.h | 275 int sbi_remote_fence_i(const struct cpumask *cpu_mask); 277 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, 281 int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, 284 int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, 288 int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, 291 int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, 326 static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } in sbi_remote_fence_i() argument
|
/kernel/linux/linux-6.6/scripts/gdb/linux/ |
H A D | cpus.py | 56 cpu_mask = {} variable 60 global cpu_mask 61 cpu_mask = {} 68 global cpu_mask 70 if mask_name in cpu_mask: 71 mask = cpu_mask[mask_name] 75 cpu_mask[mask_name] = mask
|
/kernel/linux/linux-6.6/tools/power/x86/amd_pstate_tracer/ |
H A D | amd_pstate_trace.py | 156 def store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask): 161 if cpu_mask[cpu_int] == 0: 186 def read_trace_data(file_name, cpu_mask): 235 store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask) 240 ipt.split_csv(current_max_cpu, cpu_mask) 264 cpu_mask = zeros((MAX_CPUS,), dtype=int) variable 298 cpu_mask[int(p)] = 1 301 cpu_mask[i] = 1 331 read_trace_data(file_name, cpu_mask)
|
/kernel/linux/linux-6.6/arch/x86/kernel/cpu/resctrl/ |
H A D | rdtgroup.c | 293 mask = &rdtgrp->plr->d->cpu_mask; in rdtgroup_cpus_show() 300 cpumask_pr_args(&rdtgrp->cpu_mask)); in rdtgroup_cpus_show() 334 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 339 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) in update_closid_rmid() argument 341 on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); in update_closid_rmid() 351 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); in cpus_mon_write() 358 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); in cpus_mon_write() 361 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); in cpus_mon_write() 369 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); in cpus_mon_write() 2128 cpumask_var_t cpu_mask; set_cache_qos_cfg() local 2638 cpumask_var_t cpu_mask; reset_all_ctrls() local [all...] |
H A D | ctrlmondata.c | 270 cpumask_var_t cpu_mask) in apply_config() 275 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask); in apply_config() 292 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) in resctrl_arch_update_one() 311 cpumask_var_t cpu_mask; in resctrl_arch_update_domains() local 315 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) in resctrl_arch_update_domains() 327 if (!apply_config(hw_dom, cfg, idx, cpu_mask)) in resctrl_arch_update_domains() 341 if (cpumask_empty(cpu_mask)) in resctrl_arch_update_domains() 345 on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); in resctrl_arch_update_domains() 348 free_cpumask_var(cpu_mask); in resctrl_arch_update_domains() 268 apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) apply_config() argument [all...] |
/kernel/linux/linux-5.10/arch/sparc/mm/ |
H A D | srmmu.c | 1660 cpumask_t cpu_mask; in smp_flush_cache_mm() local 1661 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_cache_mm() 1662 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); in smp_flush_cache_mm() local 1663 if (!cpumask_empty(&cpu_mask)) in smp_flush_cache_mm() 1672 cpumask_t cpu_mask; in smp_flush_tlb_mm() local 1673 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_tlb_mm() 1674 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); in smp_flush_tlb_mm() local 1675 if (!cpumask_empty(&cpu_mask)) { in smp_flush_tlb_mm() 1692 cpumask_t cpu_mask; in smp_flush_cache_range() local 1693 cpumask_copy(&cpu_mask, mm_cpumas in smp_flush_cache_range() 1694 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_cache_range() local 1709 cpumask_t cpu_mask; smp_flush_tlb_range() local 1711 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_tlb_range() local 1724 cpumask_t cpu_mask; smp_flush_cache_page() local 1726 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_cache_page() local 1739 cpumask_t cpu_mask; smp_flush_tlb_page() local 1741 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_tlb_page() local 1765 cpumask_t cpu_mask; smp_flush_sig_insns() local 1767 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_sig_insns() local [all...] |
/kernel/linux/linux-6.6/arch/sparc/mm/ |
H A D | srmmu.c | 1659 cpumask_t cpu_mask; in smp_flush_cache_mm() local 1660 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_cache_mm() 1661 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); in smp_flush_cache_mm() local 1662 if (!cpumask_empty(&cpu_mask)) in smp_flush_cache_mm() 1671 cpumask_t cpu_mask; in smp_flush_tlb_mm() local 1672 cpumask_copy(&cpu_mask, mm_cpumask(mm)); in smp_flush_tlb_mm() 1673 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); in smp_flush_tlb_mm() local 1674 if (!cpumask_empty(&cpu_mask)) { in smp_flush_tlb_mm() 1691 cpumask_t cpu_mask; in smp_flush_cache_range() local 1692 cpumask_copy(&cpu_mask, mm_cpumas in smp_flush_cache_range() 1693 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_cache_range() local 1708 cpumask_t cpu_mask; smp_flush_tlb_range() local 1710 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_tlb_range() local 1723 cpumask_t cpu_mask; smp_flush_cache_page() local 1725 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_cache_page() local 1737 cpumask_t cpu_mask; smp_flush_tlb_page() local 1739 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_tlb_page() local 1762 cpumask_t cpu_mask; smp_flush_sig_insns() local 1764 cpumask_clear_cpu(smp_processor_id(), &cpu_mask); smp_flush_sig_insns() local [all...] |
/kernel/linux/linux-5.10/arch/x86/kernel/cpu/resctrl/ |
H A D | rdtgroup.c | 277 mask = &rdtgrp->plr->d->cpu_mask; in rdtgroup_cpus_show() 284 cpumask_pr_args(&rdtgrp->cpu_mask)); in rdtgroup_cpus_show() 318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, 323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) in update_closid_rmid() argument 327 if (cpumask_test_cpu(cpu, cpu_mask)) in update_closid_rmid() 329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1); in update_closid_rmid() 340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); in cpus_mon_write() 347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); in cpus_mon_write() 350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmas in cpus_mon_write() 1883 cpumask_var_t cpu_mask; set_cache_qos_cfg() local 2267 cpumask_var_t cpu_mask; reset_all_ctrls() local [all...] |
H A D | ctrlmondata.c | 242 cpumask_var_t cpu_mask; in update_domains() local 248 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) in update_domains() 259 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); in update_domains() 268 if (cpumask_empty(cpu_mask) || mba_sc) in update_domains() 271 /* Update resource control msr on this CPU if it's in cpu_mask. */ in update_domains() 272 if (cpumask_test_cpu(cpu, cpu_mask)) in update_domains() 275 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1); in update_domains() 279 free_cpumask_var(cpu_mask); in update_domains() 446 smp_call_function_any(&d->cpu_mask, mon_event_coun in mon_event_read() [all...] |
/kernel/linux/linux-5.10/kernel/irq/ |
H A D | affinity.c | 125 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]' 130 const struct cpumask *cpu_mask, in alloc_nodes_vectors() 145 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); in alloc_nodes_vectors() 251 const struct cpumask *cpu_mask, in __irq_build_affinity_masks() 261 if (!cpumask_weight(cpu_mask)) in __irq_build_affinity_masks() 264 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); in __irq_build_affinity_masks() 273 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); in __irq_build_affinity_masks() 288 alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask, in __irq_build_affinity_masks() 299 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); in __irq_build_affinity_masks() 128 alloc_nodes_vectors(unsigned int numvecs, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, const nodemask_t nodemsk, struct cpumask *nmsk, struct node_vectors *node_vectors) alloc_nodes_vectors() argument 247 __irq_build_affinity_masks(unsigned int startvec, unsigned int numvecs, unsigned int firstvec, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, struct cpumask *nmsk, struct irq_affinity_desc *masks) __irq_build_affinity_masks() argument
|
/kernel/linux/linux-6.6/lib/ |
H A D | group_cpus.c | 127 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]' 132 const struct cpumask *cpu_mask, in alloc_nodes_groups() 147 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); in alloc_nodes_groups() 251 const struct cpumask *cpu_mask, in __group_cpus_evenly() 260 if (cpumask_empty(cpu_mask)) in __group_cpus_evenly() 263 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); in __group_cpus_evenly() 272 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); in __group_cpus_evenly() 287 alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask, in __group_cpus_evenly() 297 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); in __group_cpus_evenly() 130 alloc_nodes_groups(unsigned int numgrps, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, const nodemask_t nodemsk, struct cpumask *nmsk, struct node_groups *node_groups) alloc_nodes_groups() argument 249 __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps, cpumask_var_t *node_to_cpumask, const struct cpumask *cpu_mask, struct cpumask *nmsk, struct cpumask *masks) __group_cpus_evenly() argument
|
/kernel/linux/linux-6.6/tools/power/x86/intel_pstate_tracer/ |
H A D | intel_pstate_tracer.py | 326 def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask): 331 if cpu_mask[cpu_int] == 0: 345 def split_csv(current_max_cpu, cpu_mask): 350 if cpu_mask[int(index)] != 0: 420 def read_trace_data(filename, cpu_mask): 480 store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask) 486 split_csv(current_max_cpu, cpu_mask) 511 cpu_mask = zeros((MAX_CPUS,), dtype=int) 544 cpu_mask[int(p)] = 1 547 cpu_mask[ [all...] |
/kernel/linux/linux-6.6/arch/arm/mach-omap2/ |
H A D | clkt2xxx_virt_prcm_set.c | 42 static u16 cpu_mask; variable 82 if (!(ptr->flags & cpu_mask)) in omap2_round_to_table_rate() 106 if (!(prcm->flags & cpu_mask)) in omap2_select_table_rate() 179 if (!(prcm->flags & cpu_mask)) in omap2xxx_clkt_vps_check_bootloader_rates()
|
/kernel/linux/linux-5.10/arch/x86/events/amd/ |
H A D | power.c | 38 * MSR_F15H_CU_PWR_ACCUMULATOR. cpu_mask represents CPU bit map of all cores 41 static cpumask_t cpu_mask; variable 149 return cpumap_print_to_pagebuf(true, buf, &cpu_mask); in get_attr_cpumask() 223 if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) in power_cpu_exit() 233 cpumask_set_cpu(target, &cpu_mask); in power_cpu_exit() 244 * 1) If any CPU is set at cpu_mask in the same compute unit, do in power_cpu_init() 246 * 2) If no CPU is set at cpu_mask in the same compute unit, in power_cpu_init() 250 * sibling mask, then it is also in cpu_mask. in power_cpu_init() 254 cpumask_set_cpu(cpu, &cpu_mask); in power_cpu_init()
|
/kernel/linux/linux-6.6/arch/x86/events/amd/ |
H A D | power.c | 38 * MSR_F15H_CU_PWR_ACCUMULATOR. cpu_mask represents CPU bit map of all cores 41 static cpumask_t cpu_mask; variable 149 return cpumap_print_to_pagebuf(true, buf, &cpu_mask); in get_attr_cpumask() 223 if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) in power_cpu_exit() 233 cpumask_set_cpu(target, &cpu_mask); in power_cpu_exit() 244 * 1) If any CPU is set at cpu_mask in the same compute unit, do in power_cpu_init() 246 * 2) If no CPU is set at cpu_mask in the same compute unit, in power_cpu_init() 250 * sibling mask, then it is also in cpu_mask. in power_cpu_init() 254 cpumask_set_cpu(cpu, &cpu_mask); in power_cpu_init()
|
/kernel/linux/linux-6.6/kernel/trace/ |
H A D | preemptirq_delay_test.c | 122 struct cpumask cpu_mask; in preemptirq_delay_run() local 125 cpumask_clear(&cpu_mask); in preemptirq_delay_run() 126 cpumask_set_cpu(cpu_affinity, &cpu_mask); in preemptirq_delay_run() 127 if (set_cpus_allowed_ptr(current, &cpu_mask)) in preemptirq_delay_run()
|
/kernel/linux/linux-5.10/arch/arm/mach-omap2/ |
H A D | clkt2xxx_virt_prcm_set.c | 80 if (!(ptr->flags & cpu_mask)) in omap2_round_to_table_rate() 104 if (!(prcm->flags & cpu_mask)) in omap2_select_table_rate() 177 if (!(prcm->flags & cpu_mask)) in omap2xxx_clkt_vps_check_bootloader_rates()
|
/kernel/linux/linux-5.10/drivers/irqchip/ |
H A D | irq-vf610-mscm-ir.c | 43 u16 cpu_mask; member 95 WARN_ON(irsprc & ~chip_data->cpu_mask); in vf610_mscm_ir_enable() 97 writew_relaxed(chip_data->cpu_mask, in vf610_mscm_ir_enable() 210 mscm_ir_data->cpu_mask = 0x1 << cpuid; in vf610_mscm_ir_of_init()
|
/kernel/linux/linux-5.10/tools/perf/tests/ |
H A D | perf-record.c | 54 cpu_set_t cpu_mask; in test__PERF_RECORD() local 55 size_t cpu_mask_size = sizeof(cpu_mask); in test__PERF_RECORD() 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD()
|
/kernel/linux/linux-6.6/drivers/irqchip/ |
H A D | irq-vf610-mscm-ir.c | 43 u16 cpu_mask; member 95 WARN_ON(irsprc & ~chip_data->cpu_mask); in vf610_mscm_ir_enable() 97 writew_relaxed(chip_data->cpu_mask, in vf610_mscm_ir_enable() 210 mscm_ir_data->cpu_mask = 0x1 << cpuid; in vf610_mscm_ir_of_init()
|
/kernel/linux/linux-6.6/tools/perf/tests/ |
H A D | perf-record.c | 54 cpu_set_t cpu_mask; in test__PERF_RECORD() local 55 size_t cpu_mask_size = sizeof(cpu_mask); in test__PERF_RECORD() 114 err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask); in test__PERF_RECORD() 126 if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) { in test__PERF_RECORD()
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/prog_tests/ |
H A D | cpu_mask.c | 33 const char *cpu_mask; member 65 err = parse_cpu_mask_str(test_cases[i].cpu_mask, &mask, &n); in test_cpu_mask()
|