Lines Matching refs:arch
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
186 if (vcpu->arch.intr_msr & MSR_LE)
188 if (shared_big_endian != vcpu->arch.shared_big_endian)
190 vcpu->arch.shared_big_endian = shared_big_endian;
199 vcpu->arch.disable_kernel_nx = true;
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
211 if ((vcpu->arch.magic_page_pa & 0xf000) !=
212 ((ulong)vcpu->arch.shared & 0xf000)) {
213 void *old_shared = vcpu->arch.shared;
214 ulong shared = (ulong)vcpu->arch.shared;
218 shared |= vcpu->arch.magic_page_pa & 0xf000;
221 vcpu->arch.shared = new_shared;
258 if (!vcpu->arch.pvr)
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
277 vcpu->arch.sane = r;
326 vcpu->arch.vaddr_accessed, dsisr);
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
378 void *magic = vcpu->arch.shared;
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
424 void *magic = vcpu->arch.shared;
469 kvm->arch.kvm_ops = kvm_ops;
499 module_put(kvm->arch.kvm_ops->owner);
595 if (kvm->arch.emul_smt_mode > 1)
596 r = kvm->arch.emul_smt_mode;
598 r = kvm->arch.smt_mode;
768 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
778 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
779 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
782 mutex_init(&vcpu->arch.exit_timing_lock);
792 rcuwait_init(&vcpu->arch.wait);
793 vcpu->arch.waitp = &vcpu->arch.wait;
808 hrtimer_cancel(&vcpu->arch.dec_timer);
810 switch (vcpu->arch.irq_type) {
812 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
845 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
854 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
877 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
878 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
890 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
891 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
930 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
931 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
949 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
966 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
986 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
987 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1055 vcpu->arch.mmio_vmx_offset);
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1071 vcpu->arch.mmio_vmx_offset);
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1087 vcpu->arch.mmio_vmx_offset);
1088 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1103 vcpu->arch.mmio_vmx_offset);
1104 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1153 if (!vcpu->arch.mmio_host_swabbed) {
1170 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1173 if (vcpu->arch.mmio_sign_extend) {
1189 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1191 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1194 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1195 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1197 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1201 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1204 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1205 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1210 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1211 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1213 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1215 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1217 else if (vcpu->arch.mmio_copy_type ==
1220 else if (vcpu->arch.mmio_copy_type ==
1227 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1228 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1230 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1232 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1234 else if (vcpu->arch.mmio_copy_type ==
1237 else if (vcpu->arch.mmio_copy_type ==
1246 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1273 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1277 vcpu->arch.io_gpr = rt;
1278 vcpu->arch.mmio_host_swabbed = host_swabbed;
1281 vcpu->arch.mmio_sign_extend = sign_extend;
1323 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1326 while (vcpu->arch.mmio_vsx_copy_nums) {
1333 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1335 vcpu->arch.mmio_vsx_copy_nums--;
1336 vcpu->arch.mmio_vsx_offset++;
1360 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1366 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1408 int copy_type = vcpu->arch.mmio_copy_type;
1414 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1431 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1463 vcpu->arch.io_gpr = rs;
1466 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1469 while (vcpu->arch.mmio_vsx_copy_nums) {
1479 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1481 vcpu->arch.mmio_vsx_copy_nums--;
1482 vcpu->arch.mmio_vsx_offset++;
1494 vcpu->arch.paddr_accessed += run->mmio.len;
1497 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1498 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1501 vcpu->arch.io_gpr, run->mmio.len, 1);
1529 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1532 while (vcpu->arch.mmio_vmx_copy_nums) {
1539 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1540 vcpu->arch.mmio_vmx_copy_nums--;
1541 vcpu->arch.mmio_vmx_offset++;
1554 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1572 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1590 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1608 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1626 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1629 vcpu->arch.io_gpr = rs;
1631 while (vcpu->arch.mmio_vmx_copy_nums) {
1632 switch (vcpu->arch.mmio_copy_type) {
1659 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1660 vcpu->arch.mmio_vmx_copy_nums--;
1661 vcpu->arch.mmio_vmx_offset++;
1673 vcpu->arch.paddr_accessed += run->mmio.len;
1677 vcpu->arch.io_gpr, run->mmio.len, 1);
1680 vcpu->arch.io_gpr, run->mmio.len, 1);
1722 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1729 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1732 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1773 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1780 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1787 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1811 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1812 vcpu->arch.mmio_vsx_copy_nums--;
1813 vcpu->arch.mmio_vsx_offset++;
1816 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1825 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1826 vcpu->arch.mmio_vmx_copy_nums--;
1827 vcpu->arch.mmio_vmx_offset++;
1830 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1838 } else if (vcpu->arch.osi_needed) {
1844 vcpu->arch.osi_needed = 0;
1845 } else if (vcpu->arch.hcall_needed) {
1851 vcpu->arch.hcall_needed = 0;
1853 } else if (vcpu->arch.epr_needed) {
1855 vcpu->arch.epr_needed = 0;
1908 vcpu->arch.osi_enabled = true;
1912 vcpu->arch.papr_enabled = true;
1917 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1919 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1924 vcpu->arch.watchdog_enabled = true;
2012 vcpu->kvm->arch.fwnmi_enabled = true;
2029 if (kvm->arch.mpic)
2033 if (kvm->arch.xics || kvm->arch.xive)
2165 ret = ret || (kvm->arch.mpic != NULL);
2168 ret = ret || (kvm->arch.xics != NULL);
2169 ret = ret || (kvm->arch.xive != NULL);
2208 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2210 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2219 if (kvm->arch.kvm_ops->set_smt_mode)
2220 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2227 !kvm->arch.kvm_ops->enable_nested)
2229 r = kvm->arch.kvm_ops->enable_nested(kvm);
2235 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2237 r = kvm->arch.kvm_ops->enable_svm(kvm);
2241 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2243 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2438 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2454 if (!kvm->arch.kvm_ops->configure_mmu)
2459 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2467 if (!kvm->arch.kvm_ops->get_rmmu_info)
2469 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2486 if (!kvm->arch.kvm_ops->svm_off)
2489 r = kvm->arch.kvm_ops->svm_off(kvm);
2494 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2543 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2544 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2549 if (kvm->arch.kvm_ops->create_vm_debugfs)
2550 kvm->arch.kvm_ops->create_vm_debugfs(kvm);