Lines Matching refs:vpe
259 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
261 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
262 return vpe->col_idx;
265 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
267 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
274 struct its_vpe *vpe = NULL;
278 vpe = irq_data_get_irq_chip_data(d);
282 vpe = map->vpe;
285 if (vpe) {
286 cpu = vpe_to_cpuid_lock(vpe, flags);
300 struct its_vpe *vpe = NULL;
303 vpe = irq_data_get_irq_chip_data(d);
307 vpe = map->vpe;
310 if (vpe)
311 vpe_to_cpuid_unlock(vpe, flags);
322 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
324 if (valid_col(its->collections + vpe->col_idx))
325 return vpe;
383 struct its_vpe *vpe;
387 struct its_vpe *vpe;
393 struct its_vpe *vpe;
401 struct its_vpe *vpe;
408 struct its_vpe *vpe;
415 struct its_vpe *vpe;
419 struct its_vpe *vpe;
771 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
775 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
787 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
792 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
799 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
809 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
811 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
818 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
823 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
833 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
839 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
846 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
856 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
862 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
869 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
882 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
887 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
892 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
910 return valid_vpe(its, map->vpe);
928 return valid_vpe(its, map->vpe);
946 return valid_vpe(its, map->vpe);
957 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
961 return valid_vpe(its, desc->its_invdb_cmd.vpe);
972 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
981 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
1264 desc.its_vmapti_cmd.vpe = map->vpe;
1278 desc.its_vmovi_cmd.vpe = map->vpe;
1287 struct its_vpe *vpe, bool valid)
1291 desc.its_vmapp_cmd.vpe = vpe;
1293 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1298 static void its_send_vmovp(struct its_vpe *vpe)
1303 int col_id = vpe->col_idx;
1305 desc.its_vmovp_cmd.vpe = vpe;
1325 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1332 if (!require_its_list_vmovp(vpe->its_vm, its))
1342 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1346 desc.its_vinvall_cmd.vpe = vpe;
1392 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1396 desc.its_invdb_cmd.vpe = vpe;
1472 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1791 struct its_vpe *vpe = vm->vpes[i];
1792 struct irq_data *d = irq_get_irq_data(vpe->irq);
1795 vpe->col_idx = cpumask_first(cpu_online_mask);
1796 its_send_vmapp(its, vpe, true);
1797 its_send_vinvall(its, vpe);
1798 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3686 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3693 if (vpe->vpe_proxy_event == -1)
3696 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3697 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3707 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3709 vpe->vpe_proxy_event = -1;
3712 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3722 its_vpe_db_proxy_unmap_locked(vpe);
3727 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3734 if (vpe->vpe_proxy_event != -1)
3742 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3743 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3746 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3747 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3750 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3763 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3771 its_vpe_db_proxy_map_locked(vpe);
3774 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3775 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3784 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3798 * protect us, and that we must ensure nobody samples vpe->col_idx
3800 * taken on any vLPI handling path that evaluates vpe->col_idx.
3802 from = vpe_to_cpuid_lock(vpe, &flags);
3817 vpe->col_idx = cpu;
3819 its_send_vmovp(vpe);
3820 its_vpe_db_proxy_move(vpe, from, cpu);
3824 vpe_to_cpuid_unlock(vpe, flags);
3843 static void its_vpe_schedule(struct its_vpe *vpe)
3849 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3856 val = virt_to_phys(page_address(vpe->vpt_page)) &
3863 * easily. So in the end, vpe->pending_last is only an
3870 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3877 static void its_vpe_deschedule(struct its_vpe *vpe)
3884 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3885 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3888 static void its_vpe_invall(struct its_vpe *vpe)
3896 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3903 its_send_vinvall(its, vpe);
3910 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3915 its_vpe_schedule(vpe);
3919 its_vpe_deschedule(vpe);
3923 its_vpe_invall(vpe);
3931 static void its_vpe_send_cmd(struct its_vpe *vpe,
3938 its_vpe_db_proxy_map_locked(vpe);
3939 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3946 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3951 its_vpe_send_cmd(vpe, its_send_inv);
3977 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3985 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3987 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3989 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3994 its_vpe_send_cmd(vpe, its_send_int);
3996 its_vpe_send_cmd(vpe, its_send_clear);
4008 .name = "GICv4-vpe",
4037 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4047 its_send_invdb(its, vpe);
4062 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
4072 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
4079 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
4098 raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
4102 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
4103 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
4112 vpe->pending_last = true;
4116 static void its_vpe_4_1_invall(struct its_vpe *vpe)
4124 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
4127 cpu = vpe_to_cpuid_lock(vpe, &flags);
4134 vpe_to_cpuid_unlock(vpe, flags);
4139 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4144 its_vpe_4_1_schedule(vpe, info);
4148 its_vpe_4_1_deschedule(vpe, info);
4152 its_vpe_4_1_invall(vpe);
4161 .name = "GICv4.1-vpe",
4171 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4174 desc.its_vsgi_cmd.vpe = vpe;
4176 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4177 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4178 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4191 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4193 vpe->sgi_config[d->hwirq].enabled = false;
4199 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4201 vpe->sgi_config[d->hwirq].enabled = true;
4226 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4230 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4243 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4263 cpu = vpe_to_cpuid_lock(vpe, &flags);
4266 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4283 vpe_to_cpuid_unlock(vpe, flags);
4295 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4300 vpe->sgi_config[d->hwirq].priority = info->priority;
4301 vpe->sgi_config[d->hwirq].group = info->group;
4324 struct its_vpe *vpe = args;
4331 vpe->sgi_config[i].priority = 0;
4332 vpe->sgi_config[i].enabled = false;
4333 vpe->sgi_config[i].group = false;
4336 &its_sgi_irq_chip, vpe);
4361 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4373 vpe->sgi_config[d->hwirq].enabled = false;
4395 static int its_vpe_init(struct its_vpe *vpe)
4418 raw_spin_lock_init(&vpe->vpe_lock);
4419 vpe->vpe_id = vpe_id;
4420 vpe->vpt_page = vpt_page;
4422 atomic_set(&vpe->vmapp_count, 0);
4424 vpe->vpe_proxy_event = -1;
4429 static void its_vpe_teardown(struct its_vpe *vpe)
4431 its_vpe_db_proxy_unmap(vpe);
4432 its_vpe_id_free(vpe->vpe_id);
4433 its_free_pending_table(vpe->vpt_page);
4448 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4450 BUG_ON(vm != vpe->its_vm);
4453 its_vpe_teardown(vpe);
4520 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4532 vpe->col_idx = cpumask_first(cpu_online_mask);
4538 its_send_vmapp(its, vpe, true);
4539 its_send_vinvall(its, vpe);
4542 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4550 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4564 its_send_vmapp(its, vpe, false);