Lines Matching defs:vcpu
25 static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
28 struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
38 value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
52 static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
56 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
63 vgic_kick_vcpus(vcpu->kvm);
72 static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
78 if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
90 vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
94 vgic_mmio_write_v2_misc(vcpu, addr, len, val);
98 static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
102 if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
103 vgic_mmio_write_group(vcpu, addr, len, val);
117 struct kvm_vcpu *vcpu;
134 kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
140 irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
151 static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
159 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
163 vgic_put_irq(vcpu->kvm, irq);
169 static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
174 u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
183 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
190 irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
193 vgic_put_irq(vcpu->kvm, irq);
197 static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
205 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
209 vgic_put_irq(vcpu->kvm, irq);
214 static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
223 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232 vgic_put_irq(vcpu->kvm, irq);
236 static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
245 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
253 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
257 vgic_put_irq(vcpu->kvm, irq);
264 static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
270 vgic_get_vmcr(vcpu, &vmcr);
311 static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
317 vgic_get_vmcr(vcpu, &vmcr);
348 vgic_set_vmcr(vcpu, &vmcr);
351 static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
362 return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
364 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
366 if (n > vgic_v3_max_apr_idx(vcpu))
376 static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
388 vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
390 struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
392 if (n > vgic_v3_max_apr_idx(vcpu))
491 struct kvm_vcpu *vcpu;
499 vcpu = reg_attr.vcpu;
521 region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
528 int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
537 return vgic_uaccess(vcpu, &dev, is_write, offset, val);
540 int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
549 return vgic_uaccess(vcpu, &dev, is_write, offset, val);