Lines Matching refs:arch

51 	return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
143 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
185 if (vcpu->arch.intr_msr & MSR_LE)
187 if (shared_big_endian != vcpu->arch.shared_big_endian)
189 vcpu->arch.shared_big_endian = shared_big_endian;
198 vcpu->arch.disable_kernel_nx = true;
202 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
203 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
210 if ((vcpu->arch.magic_page_pa & 0xf000) !=
211 ((ulong)vcpu->arch.shared & 0xf000)) {
212 void *old_shared = vcpu->arch.shared;
213 ulong shared = (ulong)vcpu->arch.shared;
217 shared |= vcpu->arch.magic_page_pa & 0xf000;
220 vcpu->arch.shared = new_shared;
258 if (!vcpu->arch.pvr)
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
277 vcpu->arch.sane = r;
327 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
333 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
334 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
354 void *magic = vcpu->arch.shared;
370 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
376 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
377 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
400 void *magic = vcpu->arch.shared;
458 kvm->arch.kvm_ops = kvm_ops;
493 module_put(kvm->arch.kvm_ops->owner);
583 if (kvm->arch.emul_smt_mode > 1)
584 r = kvm->arch.emul_smt_mode;
586 r = kvm->arch.smt_mode;
733 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
743 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
744 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
745 vcpu->arch.dec_expires = get_tb();
748 mutex_init(&vcpu->arch.exit_timing_lock);
758 vcpu->arch.waitp = &vcpu->wait;
774 hrtimer_cancel(&vcpu->arch.dec_timer);
778 switch (vcpu->arch.irq_type) {
780 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
813 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
822 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
845 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
846 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
858 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
859 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
898 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
899 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
917 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
934 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
954 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
955 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1023 vcpu->arch.mmio_vmx_offset);
1024 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1039 vcpu->arch.mmio_vmx_offset);
1040 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1055 vcpu->arch.mmio_vmx_offset);
1056 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1071 vcpu->arch.mmio_vmx_offset);
1072 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1123 if (!vcpu->arch.mmio_host_swabbed) {
1140 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1143 if (vcpu->arch.mmio_sign_extend) {
1159 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1161 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1164 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1165 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1167 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1171 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1174 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1175 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1180 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1181 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1183 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1185 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1187 else if (vcpu->arch.mmio_copy_type ==
1190 else if (vcpu->arch.mmio_copy_type ==
1197 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1198 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1200 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1202 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1204 else if (vcpu->arch.mmio_copy_type ==
1207 else if (vcpu->arch.mmio_copy_type ==
1216 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1245 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1249 vcpu->arch.io_gpr = rt;
1250 vcpu->arch.mmio_host_swabbed = host_swabbed;
1253 vcpu->arch.mmio_sign_extend = sign_extend;
1295 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1298 while (vcpu->arch.mmio_vsx_copy_nums) {
1305 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1307 vcpu->arch.mmio_vsx_copy_nums--;
1308 vcpu->arch.mmio_vsx_offset++;
1334 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1340 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1382 int copy_type = vcpu->arch.mmio_copy_type;
1388 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1405 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1437 vcpu->arch.io_gpr = rs;
1440 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1443 while (vcpu->arch.mmio_vsx_copy_nums) {
1453 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1455 vcpu->arch.mmio_vsx_copy_nums--;
1456 vcpu->arch.mmio_vsx_offset++;
1468 vcpu->arch.paddr_accessed += run->mmio.len;
1471 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1472 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1475 vcpu->arch.io_gpr, run->mmio.len, 1);
1503 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1506 while (vcpu->arch.mmio_vmx_copy_nums) {
1513 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1514 vcpu->arch.mmio_vmx_copy_nums--;
1515 vcpu->arch.mmio_vmx_offset++;
1528 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1546 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1564 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1582 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1600 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1603 vcpu->arch.io_gpr = rs;
1605 while (vcpu->arch.mmio_vmx_copy_nums) {
1606 switch (vcpu->arch.mmio_copy_type) {
1633 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1634 vcpu->arch.mmio_vmx_copy_nums--;
1635 vcpu->arch.mmio_vmx_offset++;
1647 vcpu->arch.paddr_accessed += run->mmio.len;
1651 vcpu->arch.io_gpr, run->mmio.len, 1);
1654 vcpu->arch.io_gpr, run->mmio.len, 1);
1696 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1703 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1706 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1747 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1754 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1761 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1785 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1786 vcpu->arch.mmio_vsx_copy_nums--;
1787 vcpu->arch.mmio_vsx_offset++;
1790 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1799 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1800 vcpu->arch.mmio_vmx_copy_nums--;
1801 vcpu->arch.mmio_vmx_offset++;
1804 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1812 } else if (vcpu->arch.osi_needed) {
1818 vcpu->arch.osi_needed = 0;
1819 } else if (vcpu->arch.hcall_needed) {
1825 vcpu->arch.hcall_needed = 0;
1827 } else if (vcpu->arch.epr_needed) {
1829 vcpu->arch.epr_needed = 0;
1874 vcpu->arch.osi_enabled = true;
1878 vcpu->arch.papr_enabled = true;
1883 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1885 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1890 vcpu->arch.watchdog_enabled = true;
1978 vcpu->kvm->arch.fwnmi_enabled = true;
1995 if (kvm->arch.mpic)
1999 if (kvm->arch.xics || kvm->arch.xive)
2159 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2161 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2170 if (kvm->arch.kvm_ops->set_smt_mode)
2171 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2178 !kvm->arch.kvm_ops->enable_nested)
2180 r = kvm->arch.kvm_ops->enable_nested(kvm);
2186 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2188 r = kvm->arch.kvm_ops->enable_svm(kvm);
2384 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2400 if (!kvm->arch.kvm_ops->configure_mmu)
2405 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2413 if (!kvm->arch.kvm_ops->get_rmmu_info)
2415 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2432 if (!kvm->arch.kvm_ops->svm_off)
2435 r = kvm->arch.kvm_ops->svm_off(kvm);
2440 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);