Lines Matching refs:vpe

261 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)

263 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
264 return vpe->col_idx;
267 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
269 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
278 cpu = vpe_to_cpuid_lock(map->vpe, flags);
295 vpe_to_cpuid_unlock(map->vpe, flags);
308 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
310 if (valid_col(its->collections + vpe->col_idx)) {
311 return vpe;
370 struct its_vpe *vpe;
374 struct its_vpe *vpe;
380 struct its_vpe *vpe;
388 struct its_vpe *vpe;
395 struct its_vpe *vpe;
402 struct its_vpe *vpe;
406 struct its_vpe *vpe;
735 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
739 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
749 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
754 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
761 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
772 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
774 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
781 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
786 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
794 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
801 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
808 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
816 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
823 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
830 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
841 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
846 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
851 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
866 return valid_vpe(its, map->vpe);
881 return valid_vpe(its, map->vpe);
896 return valid_vpe(its, map->vpe);
906 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
910 return valid_vpe(its, desc->its_invdb_cmd.vpe);
920 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
929 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1234 desc.its_vmapti_cmd.vpe = map->vpe;
1248 desc.its_vmovi_cmd.vpe = map->vpe;
1256 static void its_send_vmapp(struct its_node *its, struct its_vpe *vpe, bool valid)
1260 desc.its_vmapp_cmd.vpe = vpe;
1262 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1267 static void its_send_vmovp(struct its_vpe *vpe)
1272 int col_id = vpe->col_idx;
1274 desc.its_vmovp_cmd.vpe = vpe;
1294 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1303 if (!require_its_list_vmovp(vpe->its_vm, its)) {
1314 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1318 desc.its_vinvall_cmd.vpe = vpe;
1364 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1368 desc.its_invdb_cmd.vpe = vpe;
1431 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1768 struct its_vpe *vpe = vm->vpes[i];
1769 struct irq_data *d = irq_get_irq_data(vpe->irq);
1772 vpe->col_idx = cpumask_first(cpu_online_mask);
1773 its_send_vmapp(its, vpe, true);
1774 its_send_vinvall(its, vpe);
1775 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3695 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3703 if (vpe->vpe_proxy_event == -1) {
3707 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3708 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3718 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3721 vpe->vpe_proxy_event = -1;
3724 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3735 its_vpe_db_proxy_unmap_locked(vpe);
3740 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3748 if (vpe->vpe_proxy_event != -1) {
3758 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3759 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3762 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3763 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3766 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3780 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3788 its_vpe_db_proxy_map_locked(vpe);
3791 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3792 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3799 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3812 * protect us, and that we must ensure nobody samples vpe->col_idx
3814 * taken on any vLPI handling path that evaluates vpe->col_idx.
3816 from = vpe_to_cpuid_lock(vpe, &flags);
3821 vpe->col_idx = cpu;
3831 its_send_vmovp(vpe);
3832 its_vpe_db_proxy_move(vpe, from, cpu);
3836 vpe_to_cpuid_unlock(vpe, flags);
3854 static void its_vpe_schedule(struct its_vpe *vpe)
3860 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & GENMASK_ULL(0x33, 0xc);
3866 val = virt_to_phys(page_address(vpe->vpt_page)) & GENMASK_ULL(0x33, 0x10);
3872 * easily. So in the end, vpe->pending_last is only an
3879 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3884 static void its_vpe_deschedule(struct its_vpe *vpe)
3891 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3892 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3895 static void its_vpe_invall(struct its_vpe *vpe)
3905 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) {
3913 its_send_vinvall(its, vpe);
3920 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3925 its_vpe_schedule(vpe);
3929 its_vpe_deschedule(vpe);
3937 its_vpe_invall(vpe);
3945 static void its_vpe_send_cmd(struct its_vpe *vpe, void (*cmd)(struct its_device *, u32))
3951 its_vpe_db_proxy_map_locked(vpe);
3952 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3959 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3965 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3966 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3969 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3971 its_vpe_send_cmd(vpe, its_send_inv);
3996 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4005 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
4007 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
4009 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
4014 its_vpe_send_cmd(vpe, its_send_int);
4016 its_vpe_send_cmd(vpe, its_send_clear);
4029 .name = "GICv4-vpe",
4060 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4070 its_send_invdb(its, vpe);
4086 static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info)
4095 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4100 static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info)
4118 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4120 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4121 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4128 vpe->pending_last = true;
4132 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4140 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4143 cpu = vpe_to_cpuid_lock(vpe, &flags);
4150 vpe_to_cpuid_unlock(vpe, flags);
4155 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4160 its_vpe_4_1_schedule(vpe, info);
4164 its_vpe_4_1_deschedule(vpe, info);
4172 its_vpe_4_1_invall(vpe);
4181 .name = "GICv4.1-vpe",
4191 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4194 desc.its_vsgi_cmd.vpe = vpe;
4196 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4197 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4198 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4211 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4213 vpe->sgi_config[d->hwirq].enabled = false;
4219 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4221 vpe->sgi_config[d->hwirq].enabled = true;
4243 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4247 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4259 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4280 cpu = vpe_to_cpuid_lock(vpe, &flags);
4283 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4301 vpe_to_cpuid_unlock(vpe, flags);
4314 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4319 vpe->sgi_config[d->hwirq].priority = info->priority;
4320 vpe->sgi_config[d->hwirq].group = info->group;
4341 struct its_vpe *vpe = args;
4348 vpe->sgi_config[i].priority = 0;
4349 vpe->sgi_config[i].enabled = false;
4350 vpe->sgi_config[i].group = false;
4352 irq_domain_set_hwirq_and_chip(domain, virq + i, i, &its_sgi_irq_chip, vpe);
4373 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4385 vpe->sgi_config[d->hwirq].enabled = false;
4407 static int its_vpe_init(struct its_vpe *vpe)
4431 raw_spin_lock_init(&vpe->vpe_lock);
4432 vpe->vpe_id = vpe_id;
4433 vpe->vpt_page = vpt_page;
4435 atomic_set(&vpe->vmapp_count, 0);
4437 vpe->vpe_proxy_event = -1;
4443 static void its_vpe_teardown(struct its_vpe *vpe)
4445 its_vpe_db_proxy_unmap(vpe);
4446 its_vpe_id_free(vpe->vpe_id);
4447 its_free_pending_table(vpe->vpt_page);
4459 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4461 BUG_ON(vm != vpe->its_vm);
4464 its_vpe_teardown(vpe);
4537 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4550 vpe->col_idx = cpumask_first(cpu_online_mask);
4558 its_send_vmapp(its, vpe, true);
4559 its_send_vinvall(its, vpe);
4562 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4569 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4586 its_send_vmapp(its, vpe, false);