/kernel/linux/linux-5.10/arch/x86/include/asm/trace/ |
H A D | irq_vectors.h | 156 unsigned int prev_cpu), 158 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), 165 __field( unsigned int, prev_cpu ) 173 __entry->prev_cpu = prev_cpu; 177 TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u", 179 __entry->prev_vector, __entry->prev_cpu) 186 unsigned int prev_cpu), \ 187 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
|
/kernel/linux/linux-6.6/arch/x86/include/asm/trace/ |
H A D | irq_vectors.h | 156 unsigned int prev_cpu), 158 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), 165 __field( unsigned int, prev_cpu ) 173 __entry->prev_cpu = prev_cpu; 177 TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u", 179 __entry->prev_vector, __entry->prev_cpu) 186 unsigned int prev_cpu), \ 187 TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL); \
|
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
H A D | cpumap.c | 193 int n, id, cpu, prev_cpu, last_cpu, level; in build_cpuinfo_tree() local 204 prev_cpu = cpu = cpumask_first(cpu_online_mask); in build_cpuinfo_tree() 268 (cpu == last_cpu) ? cpu : prev_cpu; in build_cpuinfo_tree() 290 prev_cpu = cpu; in build_cpuinfo_tree()
|
/kernel/linux/linux-6.6/arch/sparc/kernel/ |
H A D | cpumap.c | 193 int n, id, cpu, prev_cpu, last_cpu, level; in build_cpuinfo_tree() local 204 prev_cpu = cpu = cpumask_first(cpu_online_mask); in build_cpuinfo_tree() 268 (cpu == last_cpu) ? cpu : prev_cpu; in build_cpuinfo_tree() 290 prev_cpu = cpu; in build_cpuinfo_tree()
|
/kernel/linux/linux-5.10/arch/x86/kernel/apic/ |
H A D | vector.c | 31 unsigned int prev_cpu; member 163 apicd->prev_cpu = apicd->cpu; in apic_update_vector() 346 apicd->prev_cpu); in clear_irq_vector() 357 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector() 358 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); in clear_irq_vector() 633 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show() 858 unsigned int cpu = apicd->prev_cpu; in free_moved_vector() 918 cpu = apicd->prev_cpu; in __send_cleanup_vector()
|
/kernel/linux/linux-6.6/arch/x86/kernel/apic/ |
H A D | vector.c | 31 unsigned int prev_cpu; member 174 apicd->prev_cpu = apicd->cpu; in apic_update_vector() 357 apicd->prev_cpu); in clear_irq_vector() 368 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; in clear_irq_vector() 369 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); in clear_irq_vector() 648 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); in x86_vector_debug_show() 938 unsigned int cpu = apicd->prev_cpu; in free_moved_vector() 1011 unsigned int cpu = apicd->prev_cpu; in __vector_schedule_cleanup()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | fair.c | 1117 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 7086 wake_affine_idle(int this_cpu, int prev_cpu, int sync) in wake_affine_idle() argument 7094 * If the prev_cpu is idle and cache affine then avoid a migration. in wake_affine_idle() 7096 * is more important than cache hot data on the prev_cpu and from in wake_affine_idle() 7100 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) in wake_affine_idle() 7101 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; in wake_affine_idle() 7106 if (available_idle_cpu(prev_cpu)) in wake_affine_idle() 7107 return prev_cpu; in wake_affine_idle() 7114 int this_cpu, int prev_cpu, in in wake_affine_weight() 7113 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) wake_affine_weight() argument 7155 wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) wake_affine() argument 7241 find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) find_idlest_cpu() argument 7902 eenv_task_busy_time(struct energy_env *eenv, struct task_struct *p, int prev_cpu) eenv_task_busy_time() argument 8062 find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) find_energy_efficient_cpu() argument 8261 select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags) select_task_rq_fair() argument 12872 int prev_cpu = task_cpu(p); check_for_migration_fair() local [all...] |
H A D | rt.c | 1917 int prev_cpu = task_cpu(task); in find_cas_cpu() local 2038 if (target_cpu_util == util && target_cpu == prev_cpu) in find_cas_cpu() 2059 trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu); in find_cas_cpu()
|
/kernel/linux/linux-5.10/kernel/sched/ |
H A D | fair.c | 775 static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); 6133 wake_affine_idle(int this_cpu, int prev_cpu, int sync) in wake_affine_idle() argument 6141 * If the prev_cpu is idle and cache affine then avoid a migration. in wake_affine_idle() 6143 * is more important than cache hot data on the prev_cpu and from in wake_affine_idle() 6147 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) in wake_affine_idle() 6148 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; in wake_affine_idle() 6158 int this_cpu, int prev_cpu, int sync) in wake_affine_weight() 6179 this_eff_load *= capacity_of(prev_cpu); in wake_affine_weight() 6181 prev_eff_load = cpu_load(cpu_rq(prev_cpu)); in wake_affine_weight() 6157 wake_affine_weight(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) wake_affine_weight() argument 6199 wake_affine(struct sched_domain *sd, struct task_struct *p, int this_cpu, int prev_cpu, int sync) wake_affine() argument 6281 find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, int cpu, int prev_cpu, int sd_flag) find_idlest_cpu() argument 6993 find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) find_energy_efficient_cpu() argument 7130 select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) select_task_rq_fair() argument 11435 int prev_cpu = task_cpu(p); check_for_migration_fair() local [all...] |
H A D | rt.c | 1743 int prev_cpu = task_cpu(task); in find_cas_cpu() local 1864 if (target_cpu_util == util && target_cpu == prev_cpu) in find_cas_cpu() 1885 trace_sched_find_cas_cpu(task, lowest_mask, boosted_tutil, prev_cpu, target_cpu); in find_cas_cpu()
|
/kernel/linux/linux-6.6/arch/powerpc/lib/ |
H A D | qspinlock.c | 386 int prev_cpu = decode_tail_cpu(val); in yield_to_prev() local 432 yield_count = yield_count_of(prev_cpu); in yield_to_prev() 444 yield_to_preempted(prev_cpu, yield_count); in yield_to_prev()
|
/kernel/linux/linux-5.10/kernel/sched/rtg/ |
H A D | rtg.c | 921 int prev_cpu; in sched_set_group_normalized_util() local 945 prev_cpu = cpumask_first(&grp->preferred_cluster->cpus); in sched_set_group_normalized_util() 966 cpufreq_update_util(cpu_rq(prev_cpu), in sched_set_group_normalized_util()
|
/kernel/linux/linux-6.6/kernel/sched/rtg/ |
H A D | rtg.c | 922 int prev_cpu; in sched_set_group_normalized_util() local 946 prev_cpu = cpumask_first(&grp->preferred_cluster->cpus); in sched_set_group_normalized_util() 967 cpufreq_update_util(cpu_rq(prev_cpu), in sched_set_group_normalized_util()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_hv.c | 3023 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv() 3230 int prev_cpu; in kvmppc_prepare_radix_vcpu() local 3236 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu() 3238 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu() 3251 if (prev_cpu != pcpu) { in kvmppc_prepare_radix_vcpu() 3252 if (prev_cpu >= 0) { in kvmppc_prepare_radix_vcpu() 3253 if (cpu_first_tlb_thread_sibling(prev_cpu) != in kvmppc_prepare_radix_vcpu() 3255 radix_flush_cpu(kvm, prev_cpu, vcp in kvmppc_prepare_radix_vcpu() [all...] |
H A D | book3s_hv_nested.c | 706 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); in kvmhv_alloc_nested()
|
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_hv.c | 2428 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv() 2608 int prev_cpu; in kvmppc_prepare_radix_vcpu() local 2614 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu() 2616 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu() 2630 if (prev_cpu != pcpu) { in kvmppc_prepare_radix_vcpu() 2631 if (prev_cpu >= 0 && in kvmppc_prepare_radix_vcpu() 2632 cpu_first_tlb_thread_sibling(prev_cpu) != in kvmppc_prepare_radix_vcpu() 2634 radix_flush_cpu(kvm, prev_cpu, vcp in kvmppc_prepare_radix_vcpu() [all...] |
H A D | book3s_hv_nested.c | 613 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu)); in kvmhv_alloc_nested()
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 47 short prev_cpu[NR_CPUS]; member
|
H A D | kvm_host.h | 765 int prev_cpu; member
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 34 short prev_cpu[NR_CPUS]; member
|
H A D | kvm_host.h | 771 int prev_cpu; member
|
/kernel/linux/linux-5.10/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 1665 int cpu, prev_cpu; in its_set_affinity() local 1671 prev_cpu = its_dev->event_map.col_map[id]; in its_set_affinity() 1672 its_dec_lpi_count(d, prev_cpu); in its_set_affinity() 1683 if (cpu != prev_cpu) { in its_set_affinity() 1695 its_inc_lpi_count(d, prev_cpu); in its_set_affinity()
|
/kernel/linux/linux-6.6/drivers/irqchip/ |
H A D | irq-gic-v3-its.c | 1679 int cpu, prev_cpu; in its_set_affinity() local 1685 prev_cpu = its_dev->event_map.col_map[id]; in its_set_affinity() 1686 its_dec_lpi_count(d, prev_cpu); in its_set_affinity() 1697 if (cpu != prev_cpu) { in its_set_affinity() 1709 its_inc_lpi_count(d, prev_cpu); in its_set_affinity()
|