Home
last modified time | relevance | path

Searched refs:new_cpu (Results 1 - 25 of 40) sorted by relevance

12

/kernel/linux/linux-5.10/arch/ia64/kernel/
H A Dirq.c82 int irq, new_cpu; in migrate_irqs() local
108 new_cpu = cpumask_any(cpu_online_mask); in migrate_irqs()
117 cpumask_of(new_cpu), false); in migrate_irqs()
/kernel/linux/linux-6.6/arch/ia64/kernel/
H A Dirq.c82 int irq, new_cpu; in migrate_irqs() local
108 new_cpu = cpumask_any(cpu_online_mask); in migrate_irqs()
117 cpumask_of(new_cpu), false); in migrate_irqs()
/kernel/linux/linux-5.10/drivers/irqchip/
H A Dirq-bcm6345-l1.c199 unsigned int new_cpu; in bcm6345_l1_set_affinity() local
207 new_cpu = cpumask_any_and(&valid, cpu_online_mask); in bcm6345_l1_set_affinity()
208 if (new_cpu >= nr_cpu_ids) in bcm6345_l1_set_affinity()
211 dest = cpumask_of(new_cpu); in bcm6345_l1_set_affinity()
214 if (old_cpu != new_cpu) { in bcm6345_l1_set_affinity()
226 irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); in bcm6345_l1_set_affinity()
/kernel/linux/linux-6.6/drivers/irqchip/
H A Dirq-bcm6345-l1.c194 unsigned int new_cpu; in bcm6345_l1_set_affinity() local
202 new_cpu = cpumask_any_and(&valid, cpu_online_mask); in bcm6345_l1_set_affinity()
203 if (new_cpu >= nr_cpu_ids) in bcm6345_l1_set_affinity()
206 dest = cpumask_of(new_cpu); in bcm6345_l1_set_affinity()
209 if (old_cpu != new_cpu) { in bcm6345_l1_set_affinity()
221 irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); in bcm6345_l1_set_affinity()
/kernel/linux/linux-5.10/arch/x86/hyperv/
H A Dhv_init.c214 unsigned int new_cpu; in hv_cpu_die() local
239 new_cpu = cpumask_any_but(cpu_online_mask, cpu); in hv_cpu_die()
241 if (new_cpu < nr_cpu_ids) in hv_cpu_die()
242 re_ctrl.target_vp = hv_vp_index[new_cpu]; in hv_cpu_die()
/kernel/linux/linux-5.10/kernel/sched/
H A Dcpudeadline.c176 int old_idx, new_cpu; in cpudl_clear() local
191 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_clear()
193 cp->elements[old_idx].cpu = new_cpu; in cpudl_clear()
195 cp->elements[new_cpu].idx = old_idx; in cpudl_clear()
H A Dwalt.h151 extern void fixup_busy_time(struct task_struct *p, int new_cpu);
229 static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } in fixup_busy_time() argument
H A Dfair.c2972 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
2975 int dst_nid = cpu_to_node(new_cpu); in update_scan_period()
3019 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
6284 int new_cpu = cpu; in find_idlest_cpu() local
6312 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
6313 if (new_cpu == cpu) { in find_idlest_cpu()
6319 /* Now try balancing at a lower domain level of 'new_cpu': */ in find_idlest_cpu()
6320 cpu = new_cpu; in find_idlest_cpu()
6331 return new_cpu; in find_idlest_cpu()
7134 int new_cpu in select_task_rq_fair() local
7201 migrate_task_rq_fair(struct task_struct *p, int new_cpu) migrate_task_rq_fair() argument
11409 kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) kick_active_balance() argument
11434 int new_cpu = -1; check_for_migration_fair() local
[all...]
H A Dwalt.c436 (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) in inter_cluster_migration_fixup()
438 struct rq *dest_rq = cpu_rq(new_cpu); in inter_cluster_migration_fixup()
441 if (same_freq_domain(new_cpu, task_cpu)) in inter_cluster_migration_fixup()
444 p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window; in inter_cluster_migration_fixup()
445 p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window; in inter_cluster_migration_fixup()
475 void fixup_busy_time(struct task_struct *p, int new_cpu) in fixup_busy_time() argument
478 struct rq *dest_rq = cpu_rq(new_cpu); in fixup_busy_time()
572 inter_cluster_migration_fixup(p, new_cpu, in fixup_busy_time()
578 if (!same_freq_domain(new_cpu, task_cpu(p))) in fixup_busy_time()
435 inter_cluster_migration_fixup(struct task_struct *p, int new_cpu, int task_cpu, bool new_task) inter_cluster_migration_fixup() argument
/kernel/linux/linux-6.6/kernel/sched/
H A Dcpudeadline.c175 int old_idx, new_cpu; in cpudl_clear() local
190 new_cpu = cp->elements[cp->size - 1].cpu; in cpudl_clear()
192 cp->elements[old_idx].cpu = new_cpu; in cpudl_clear()
194 cp->elements[new_cpu].idx = old_idx; in cpudl_clear()
H A Dwalt.h152 extern void fixup_busy_time(struct task_struct *p, int new_cpu);
230 static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { } in fixup_busy_time() argument
H A Dfair.c3541 static void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
3544 int dst_nid = cpu_to_node(new_cpu); in update_scan_period()
3588 static inline void update_scan_period(struct task_struct *p, int new_cpu) in update_scan_period() argument
7244 int new_cpu = cpu; in find_idlest_cpu() local
7272 new_cpu = find_idlest_group_cpu(group, p, cpu); in find_idlest_cpu()
7273 if (new_cpu == cpu) { in find_idlest_cpu()
7279 /* Now try balancing at a lower domain level of 'new_cpu': */ in find_idlest_cpu()
7280 cpu = new_cpu; in find_idlest_cpu()
7291 return new_cpu; in find_idlest_cpu()
8266 int new_cpu in select_task_rq_fair() local
8341 migrate_task_rq_fair(struct task_struct *p, int new_cpu) migrate_task_rq_fair() argument
12846 kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu) kick_active_balance() argument
12871 int new_cpu = -1; check_for_migration_fair() local
[all...]
H A Dwalt.c437 (struct task_struct *p, int new_cpu, int task_cpu, bool new_task) in inter_cluster_migration_fixup()
439 struct rq *dest_rq = cpu_rq(new_cpu); in inter_cluster_migration_fixup()
442 if (same_freq_domain(new_cpu, task_cpu)) in inter_cluster_migration_fixup()
445 p->ravg.curr_window_cpu[new_cpu] = p->ravg.curr_window; in inter_cluster_migration_fixup()
446 p->ravg.prev_window_cpu[new_cpu] = p->ravg.prev_window; in inter_cluster_migration_fixup()
476 void fixup_busy_time(struct task_struct *p, int new_cpu) in fixup_busy_time() argument
479 struct rq *dest_rq = cpu_rq(new_cpu); in fixup_busy_time()
573 inter_cluster_migration_fixup(p, new_cpu, in fixup_busy_time()
579 if (!same_freq_domain(new_cpu, task_cpu(p))) in fixup_busy_time()
436 inter_cluster_migration_fixup(struct task_struct *p, int new_cpu, int task_cpu, bool new_task) inter_cluster_migration_fixup() argument
/kernel/linux/linux-5.10/tools/perf/scripts/python/
H A Dsched-migration.py191 def migrate(self, ts_list, new, old_cpu, new_cpu):
192 if old_cpu == new_cpu:
199 new_rq = self.prev.rqs[new_cpu]
201 self.rqs[new_cpu] = in_rq
208 self.event_cpus.append(new_cpu)
/kernel/linux/linux-6.6/tools/perf/scripts/python/
H A Dsched-migration.py191 def migrate(self, ts_list, new, old_cpu, new_cpu):
192 if old_cpu == new_cpu:
199 new_rq = self.prev.rqs[new_cpu]
201 self.rqs[new_cpu] = in_rq
208 self.event_cpus.append(new_cpu)
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device.c931 int cpu, new_cpu; in kfd_queue_work() local
933 cpu = new_cpu = smp_processor_id(); in kfd_queue_work()
935 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; in kfd_queue_work()
936 if (cpu_to_node(new_cpu) == numa_node_id()) in kfd_queue_work()
938 } while (cpu != new_cpu); in kfd_queue_work()
940 queue_work_on(new_cpu, wq, work); in kfd_queue_work()
/kernel/linux/linux-6.6/arch/x86/hyperv/
H A Dhv_init.c236 unsigned int new_cpu; in hv_cpu_die() local
275 new_cpu = cpumask_any_but(cpu_online_mask, cpu); in hv_cpu_die()
277 if (new_cpu < nr_cpu_ids) in hv_cpu_die()
278 re_ctrl.target_vp = hv_vp_index[new_cpu]; in hv_cpu_die()
/kernel/linux/linux-5.10/drivers/hv/
H A Dhyperv_vmbus.h441 unsigned int new_cpu) in hv_update_alloced_cpus()
443 hv_set_alloced_cpu(new_cpu); in hv_update_alloced_cpus()
440 hv_update_alloced_cpus(unsigned int old_cpu, unsigned int new_cpu) hv_update_alloced_cpus() argument
/kernel/linux/linux-6.6/drivers/hv/
H A Dhyperv_vmbus.h455 unsigned int new_cpu) in hv_update_allocated_cpus()
457 hv_set_allocated_cpu(new_cpu); in hv_update_allocated_cpus()
454 hv_update_allocated_cpus(unsigned int old_cpu, unsigned int new_cpu) hv_update_allocated_cpus() argument
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device.c1003 int cpu, new_cpu; in kfd_queue_work() local
1005 cpu = new_cpu = smp_processor_id(); in kfd_queue_work()
1007 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; in kfd_queue_work()
1008 if (cpu_to_node(new_cpu) == numa_node_id()) in kfd_queue_work()
1010 } while (cpu != new_cpu); in kfd_queue_work()
1012 queue_work_on(new_cpu, wq, work); in kfd_queue_work()
/kernel/linux/linux-5.10/drivers/perf/
H A Dthunderx2_pmu.c939 int new_cpu; in tx2_uncore_pmu_offline_cpu() local
954 new_cpu = cpumask_any_and( in tx2_uncore_pmu_offline_cpu()
958 tx2_pmu->cpu = new_cpu; in tx2_uncore_pmu_offline_cpu()
959 if (new_cpu >= nr_cpu_ids) in tx2_uncore_pmu_offline_cpu()
961 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); in tx2_uncore_pmu_offline_cpu()
/kernel/linux/linux-6.6/drivers/perf/
H A Dthunderx2_pmu.c935 int new_cpu; in tx2_uncore_pmu_offline_cpu() local
950 new_cpu = cpumask_any_and( in tx2_uncore_pmu_offline_cpu()
954 tx2_pmu->cpu = new_cpu; in tx2_uncore_pmu_offline_cpu()
955 if (new_cpu >= nr_cpu_ids) in tx2_uncore_pmu_offline_cpu()
957 perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); in tx2_uncore_pmu_offline_cpu()
/kernel/linux/linux-5.10/arch/x86/events/intel/
H A Duncore.c1318 int new_cpu) in uncore_change_type_ctx()
1324 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); in uncore_change_type_ctx()
1332 box->cpu = new_cpu; in uncore_change_type_ctx()
1338 if (new_cpu < 0) in uncore_change_type_ctx()
1342 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); in uncore_change_type_ctx()
1343 box->cpu = new_cpu; in uncore_change_type_ctx()
1348 int old_cpu, int new_cpu) in uncore_change_context()
1351 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); in uncore_change_context()
1317 uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) uncore_change_type_ctx() argument
1347 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) uncore_change_context() argument
/kernel/linux/linux-6.6/arch/x86/events/intel/
H A Duncore.c1457 int new_cpu) in uncore_change_type_ctx()
1463 die = topology_logical_die_id(old_cpu < 0 ? new_cpu : old_cpu); in uncore_change_type_ctx()
1471 box->cpu = new_cpu; in uncore_change_type_ctx()
1477 if (new_cpu < 0) in uncore_change_type_ctx()
1481 perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); in uncore_change_type_ctx()
1482 box->cpu = new_cpu; in uncore_change_type_ctx()
1487 int old_cpu, int new_cpu) in uncore_change_context()
1490 uncore_change_type_ctx(*uncores, old_cpu, new_cpu); in uncore_change_context()
1456 uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) uncore_change_type_ctx() argument
1486 uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) uncore_change_context() argument
/kernel/linux/linux-5.10/drivers/scsi/lpfc/
H A Dlpfc_init.c10942 int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; in lpfc_cpu_affinity_check() local
11010 new_cpu = start_cpu; in lpfc_cpu_affinity_check()
11012 new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; in lpfc_cpu_affinity_check()
11017 new_cpu = cpumask_next( in lpfc_cpu_affinity_check()
11018 new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check()
11019 if (new_cpu == nr_cpumask_bits) in lpfc_cpu_affinity_check()
11020 new_cpu = first_cpu; in lpfc_cpu_affinity_check()
11032 start_cpu = cpumask_next(new_cpu, cpu_present_mask); in lpfc_cpu_affinity_check()
11040 cpu, cpup->eq, new_cpu, in lpfc_cpu_affinity_check()
11061 new_cpu in lpfc_cpu_affinity_check()
[all...]

Completed in 64 milliseconds

12