Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 - 25 of 816) sorted by relevance

12345678910>>...33

/kernel/linux/linux-5.10/kernel/sched/
H A Dsched_avg.c56 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); in sched_get_nr_running_avg()
58 diff = curr_time - per_cpu(last_time, cpu); in sched_get_nr_running_avg()
61 tmp_nr = per_cpu(nr_prod_sum, cpu); in sched_get_nr_running_avg()
62 tmp_nr += per_cpu(nr, cpu) * diff; in sched_get_nr_running_avg()
65 tmp_misfit = per_cpu(nr_big_prod_sum, cpu); in sched_get_nr_running_avg()
77 stats[cpu].nr_max = per_cpu(nr_max, cpu); in sched_get_nr_running_avg()
82 per_cpu(last_time, cpu) = curr_time; in sched_get_nr_running_avg()
83 per_cpu(nr_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
84 per_cpu(nr_big_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
85 per_cpu(iowait_prod_su in sched_get_nr_running_avg()
[all...]
/kernel/linux/linux-6.6/kernel/sched/
H A Dsched_avg.c56 spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags); in sched_get_nr_running_avg()
58 diff = curr_time - per_cpu(last_time, cpu); in sched_get_nr_running_avg()
61 tmp_nr = per_cpu(nr_prod_sum, cpu); in sched_get_nr_running_avg()
62 tmp_nr += per_cpu(nr, cpu) * diff; in sched_get_nr_running_avg()
65 tmp_misfit = per_cpu(nr_big_prod_sum, cpu); in sched_get_nr_running_avg()
77 stats[cpu].nr_max = per_cpu(nr_max, cpu); in sched_get_nr_running_avg()
82 per_cpu(last_time, cpu) = curr_time; in sched_get_nr_running_avg()
83 per_cpu(nr_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
84 per_cpu(nr_big_prod_sum, cpu) = 0; in sched_get_nr_running_avg()
85 per_cpu(iowait_prod_su in sched_get_nr_running_avg()
[all...]
/kernel/linux/linux-5.10/arch/x86/xen/
H A Dsmp.c35 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
36 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
39 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
41 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
42 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
45 per_cpu(xen_callfunc_ir in xen_smp_intr_free()
[all...]
H A Dsmp_pv.c103 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
104 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
105 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
106 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv()
107 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
110 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
111 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
112 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
113 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv()
114 per_cpu(xen_pmu_ir in xen_smp_intr_free_pv()
[all...]
H A Dspinlock.c25 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick()
74 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
75 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
78 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
88 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
101 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
102 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
107 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu()
112 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
/kernel/linux/linux-6.6/arch/x86/xen/
H A Dsmp.c35 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
36 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free()
38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free()
39 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
41 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
42 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free()
43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free()
44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free()
45 per_cpu(xen_callfunc_ir in xen_smp_intr_free()
[all...]
H A Dsmp_pv.c101 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
102 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
103 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
104 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); in xen_smp_intr_free_pv()
105 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
108 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
109 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
110 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
111 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); in xen_smp_intr_free_pv()
112 per_cpu(xen_pmu_ir in xen_smp_intr_free_pv()
[all...]
H A Dspinlock.c25 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick()
74 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
75 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
78 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
88 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
101 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
102 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
107 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu()
112 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
/kernel/linux/linux-5.10/arch/x86/oprofile/
H A Dnmi_int.c156 kfree(per_cpu(cpu_msrs, i).multiplex); in nmi_shutdown_mux()
157 per_cpu(cpu_msrs, i).multiplex = NULL; in nmi_shutdown_mux()
158 per_cpu(switch_index, i) = 0; in nmi_shutdown_mux()
172 per_cpu(cpu_msrs, i).multiplex = in nmi_setup_mux()
174 if (!per_cpu(cpu_msrs, i).multiplex) in nmi_setup_mux()
197 per_cpu(switch_index, cpu) = 0; in nmi_cpu_setup_mux()
229 int si = per_cpu(switch_index, cpu); in nmi_cpu_switch()
230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); in nmi_cpu_switch()
238 per_cpu(switch_index, cpu) = 0; in nmi_cpu_switch()
240 per_cpu(switch_inde in nmi_cpu_switch()
[all...]
/kernel/linux/linux-6.6/arch/powerpc/kernel/
H A Dirq.c101 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts()
106 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); in arch_show_interrupts()
111 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts()
116 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts()
121 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts()
126 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts()
140 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts()
146 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); in arch_show_interrupts()
154 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts()
167 u64 sum = per_cpu(irq_sta in arch_irq_stat_cpu()
[all...]
/kernel/linux/linux-5.10/drivers/cpufreq/
H A Dspeedstep-centrino.c261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
298 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock()
304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock()
309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock()
312 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock()
313 return per_cpu(centrino_model, cpu)-> in extract_clock()
317 return per_cpu(centrino_mode in extract_clock()
[all...]
/kernel/linux/linux-6.6/drivers/cpufreq/
H A Dspeedstep-centrino.c261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table()
296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock()
297 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock()
298 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock()
303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock()
304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock()
309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock()
312 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock()
313 return per_cpu(centrino_model, cpu)-> in extract_clock()
317 return per_cpu(centrino_mode in extract_clock()
[all...]
/kernel/linux/linux-5.10/kernel/
H A Dsmpboot.c32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get()
41 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu()
52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init()
59 per_cpu(idle_threads, cpu) = tsk; in idle_init()
283 * smpboot_register_percpu_thread - Register a per_cpu thread related
313 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state()
355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare()
359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare()
364 atomic_set(&per_cpu(cpu_hotplug_stat in cpu_check_up_prepare()
[all...]
H A Dsoftirq.c630 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
631 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
632 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
633 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
682 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate()
687 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate()
700 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
701 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
702 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_ve in takeover_tasklets()
[all...]
/kernel/linux/linux-5.10/drivers/perf/
H A Darm_pmu_acpi.c161 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs()
169 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs()
180 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs()
181 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs()
195 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_alloc_pmu()
227 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches()
259 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting()
262 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_cpu_starting()
268 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_cpu_starting()
272 per_cpu(hw_event in arm_pmu_acpi_cpu_starting()
[all...]
/kernel/linux/linux-5.10/arch/arm/mm/
H A Dcontext.c67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
155 per_cpu(reserved_asids, i) = asid; in flush_context()
180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
255 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
271 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
/kernel/linux/linux-5.10/arch/x86/kernel/apic/
H A Dx2apic_cluster.c30 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_send_IPI()
57 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
61 dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu); in __x2apic_send_IPI_mask()
97 return per_cpu(x86_cpu_to_logical_apicid, cpu); in x2apic_calc_apicid()
113 cmsk = per_cpu(cluster_masks, cpu); in init_x2apic_ldr()
128 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask()
152 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) in x2apic_prepare_cpu()
159 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu()
163 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
/kernel/linux/linux-6.6/arch/arm/mm/
H A Dcontext.c67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
155 per_cpu(reserved_asids, i) = asid; in flush_context()
180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
270 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
/kernel/linux/linux-6.6/drivers/perf/
H A Darm_pmu_acpi.c220 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs()
230 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs()
241 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs()
242 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs()
256 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_pmu()
279 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches()
298 int irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_associate_pmu_cpu()
300 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_associate_pmu_cpu()
305 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_associate_pmu_cpu()
325 if (per_cpu(probed_pmu in arm_pmu_acpi_cpu_starting()
[all...]
/kernel/linux/linux-5.10/arch/powerpc/kernel/
H A Dirq.c565 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts()
570 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); in arch_show_interrupts()
575 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts()
580 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts()
585 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts()
590 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts()
604 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts()
610 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); in arch_show_interrupts()
618 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts()
631 u64 sum = per_cpu(irq_sta in arch_irq_stat_cpu()
[all...]
/kernel/linux/linux-6.6/kernel/
H A Dsoftirq.c898 per_cpu(tasklet_vec, cpu).tail = in softirq_init()
899 &per_cpu(tasklet_vec, cpu).head; in softirq_init()
900 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init()
901 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init()
936 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets()
937 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
938 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets()
939 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets()
940 per_cpu(tasklet_ve in takeover_tasklets()
[all...]
/kernel/linux/linux-5.10/arch/x86/kvm/vmx/
H A Dposted_intr.c124 raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in __pi_post_block()
126 raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in __pi_post_block()
157 raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in pi_pre_block()
159 &per_cpu(blocked_vcpu_on_cpu, in pi_pre_block()
161 raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu)); in pi_pre_block()
218 raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); in pi_wakeup_handler()
219 list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu), in pi_wakeup_handler()
226 raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu)); in pi_wakeup_handler()
231 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu)); in pi_init_cpu()
232 raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_loc in pi_init_cpu()
[all...]
/kernel/linux/linux-5.10/drivers/oprofile/
H A Doprofile_perf.c43 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler()
79 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter()
96 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter()
103 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter()
107 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter()
262 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit()
267 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit()
301 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init()
303 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
/kernel/linux/linux-6.6/arch/x86/kernel/apic/
H A Dx2apic_cluster.c58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); in prefill_clustermask()
135 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask()
150 cmsk = per_cpu(cluster_masks, cpu_i); in alloc_clustermask()
156 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
170 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
186 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) in x2apic_prepare_cpu()
193 struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu()
197 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
/kernel/linux/linux-6.6/arch/x86/kvm/vmx/
H A Dposted_intr.c92 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in vmx_vcpu_pi_load()
94 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in vmx_vcpu_pi_load()
155 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler()
157 &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu)); in pi_enable_wakeup_handler()
158 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler()
221 struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu); in pi_wakeup_handler()
222 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu); in pi_wakeup_handler()
236 INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu)); in pi_init_cpu()
237 raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu)); in pi_init_cpu()

Completed in 12 milliseconds

12345678910>>...33