Lines Matching refs:arch
6 * Derived from arch/arm/kvm/guest.c:
137 return &vcpu->arch.ctxt.regs.regs[off];
140 return &vcpu->arch.ctxt.regs.sp;
143 return &vcpu->arch.ctxt.regs.pc;
146 return &vcpu->arch.ctxt.regs.pstate;
149 return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
152 return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
155 return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
158 return &vcpu->arch.ctxt.spsr_abt;
161 return &vcpu->arch.ctxt.spsr_und;
164 return &vcpu->arch.ctxt.spsr_irq;
167 return &vcpu->arch.ctxt.spsr_fiq;
173 return &vcpu->arch.ctxt.fp_regs.vregs[off];
176 return &vcpu->arch.ctxt.fp_regs.fpsr;
179 return &vcpu->arch.ctxt.fp_regs.fpcr;
320 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl)))
347 if (WARN_ON(vcpu->arch.sve_state))
376 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */
377 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq);
406 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */
502 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
528 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
764 /* We currently use nothing arch-specific in upper 32 bits */
784 /* We currently use nothing arch-specific in upper 32 bits */
817 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
930 vcpu->arch.external_debug_state = dbg->arch;
950 mutex_lock(&vcpu->kvm->arch.config_lock);
952 mutex_unlock(&vcpu->kvm->arch.config_lock);