Lines Matching refs:cpu

11 #include <linux/cpu.h>

195 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
275 int cpu;
278 cpu = vpe_to_cpuid_lock(map->vpe, flags);
282 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
287 return cpu;
1423 int cpu;
1438 cpu = irq_to_cpuid_lock(d, &flags);
1439 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1440 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1444 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1510 static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu)
1513 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1516 return atomic_read(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1519 static void its_inc_lpi_count(struct irq_data *d, int cpu)
1522 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1524 atomic_inc(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1528 static void its_dec_lpi_count(struct irq_data *d, int cpu)
1531 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->managed);
1533 atomic_dec(&per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged);
1539 unsigned int cpu = nr_cpu_ids, tmp;
1546 cpu = tmp;
1551 return cpu;
1562 int cpu, node;
1592 cpu = cpumask_pick_least_loaded(d, tmpmask);
1593 if (cpu < nr_cpu_ids) {
1613 cpu = cpumask_pick_least_loaded(d, tmpmask);
1622 cpu = cpumask_pick_least_loaded(d, tmpmask);
1627 pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu);
1628 return cpu;
1636 int cpu, prev_cpu;
1647 cpu = its_select_cpu(d, mask_val);
1649 cpu = cpumask_pick_least_loaded(d, mask_val);
1652 if (cpu < 0 || cpu >= nr_cpu_ids) {
1656 /* don't set the affinity when the target cpu is same as current one */
1657 if (cpu != prev_cpu) {
1658 target_col = &its_dev->its->collections[cpu];
1660 its_dev->event_map.col_map[id] = cpu;
1661 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1664 its_inc_lpi_count(d, cpu);
2702 int cpu;
2707 for_each_possible_cpu(cpu)
2709 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2711 if (!base || cpu == smp_processor_id()) {
2729 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2730 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2738 static bool allocate_vpe_l2_table(int cpu, u32 id)
2740 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2786 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2997 int err, cpu;
3019 for_each_possible_cpu(cpu)
3025 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
3029 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
3195 int cpu = smp_processor_id();
3202 cpu_node = of_get_cpu_node(cpu, NULL);
3225 its->collections[cpu].target_address = target;
3226 its->collections[cpu].col_id = cpu;
3228 its_send_mapc(its, &its->collections[cpu], 1);
3229 its_send_invall(its, &its->collections[cpu]);
3341 int cpu;
3377 for_each_possible_cpu(cpu)
3379 if (!allocate_vpe_l2_table(cpu, vpe_id)) {
3608 int cpu;
3610 cpu = its_select_cpu(d, cpu_online_mask);
3611 if (cpu < 0 || cpu >= nr_cpu_ids) {
3615 its_inc_lpi_count(d, cpu);
3616 its_dev->event_map.col_map[event] = cpu;
3617 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3800 int from, cpu = cpumask_first(mask_val);
3817 if (from == cpu) {
3821 vpe->col_idx = cpu;
3824 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3827 if (gic_data_rdist_cpu(cpu)->vpe_table_mask && cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) {
3832 its_vpe_db_proxy_move(vpe, from, cpu);
3835 irq_data_update_effective_affinity(d, cpumask_of(cpu));
4137 int cpu;
4143 cpu = vpe_to_cpuid_lock(vpe, &flags);
4144 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4145 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
4149 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4264 int cpu;
4280 cpu = vpe_to_cpuid_lock(vpe, &flags);
4281 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4282 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4300 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);