Lines Matching refs:arch
147 if (vcpu->arch.pv.pv_unhalted)
159 return !!(vcpu->arch.irq_pending) || kvm_vcpu_has_events(vcpu);
179 if (vcpu->arch.st.guest_addr == 0)
182 ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
183 &map, &vcpu->arch.st.cache, false);
188 st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
194 vcpu->arch.st.last_steal;
195 vcpu->arch.st.last_steal = current->sched_info.run_delay;
199 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
232 vcpu->arch.st.guest_addr = ipa;
247 ipa = vcpu->arch.st.guest_addr;
273 if (vcpu->arch.st.guest_addr == 0)
276 ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
277 &map, &vcpu->arch.st.cache, false);
282 st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
291 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
300 if (vcpu->arch.st.guest_addr == 0)
303 ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
304 &map, &vcpu->arch.st.cache, false);
309 st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
318 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
384 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
385 if (!kvm->arch.gpa_mm.pgd)
388 kvm->arch.cpucfg_lasx = (read_cpucfg(LOONGARCH_CPUCFG2) &
392 kvm->arch.vmcs = vmcs;
450 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
463 vcpu->arch.vpid[cpu] = vpid;
478 vcpu->arch.vpid[i] = 0;
496 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
497 migrated = (vcpu->arch.last_exec_cpu != cpu);
498 vcpu->arch.last_exec_cpu = cpu;
509 (vcpu->arch.vpid[cpu] ^ context->vpid_cache) &
512 trace_kvm_vpid_change(vcpu, vcpu->arch.vpid[cpu]);
514 gstinfo_gid = (vcpu->arch.vpid[cpu] & context->gid_mask) <<
526 unsigned long exst = vcpu->arch.host_estat;
529 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
606 vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
624 struct loongarch_csrs *csr = vcpu->arch.csr;
627 vcpu->arch.vpid[i] = 0;
629 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
630 vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
631 vcpu->arch.fpu_enabled = true;
632 vcpu->arch.lsx_enabled = true;
634 vcpu->kvm->arch.online_vcpus = vcpu->vcpu_id + 1;
636 vcpu->arch.host_eentry = kvm_csr_readq(KVM_CSR_EENTRY);
637 vcpu->arch.guest_eentry = (unsigned long)kvm_exception_entry;
638 vcpu->arch.vcpu_run = kvm_enter_guest;
639 vcpu->arch.handle_exit = _kvm_handle_exit;
640 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
645 vcpu->arch.host_ecfg = (kvm_read_csr_ecfg() & KVM_ECFG_VS);
647 if (!vcpu->arch.csr)
651 vcpu->arch.last_sched_cpu = -1;
652 vcpu->arch.last_exec_cpu = -1;
676 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
678 hrtimer_cancel(&vcpu->arch.swtimer);
680 if (vcpu->arch.st.guest_addr)
682 kfree(vcpu->arch.csr);
689 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
729 } else if (vcpu->arch.is_hypcall) {
731 vcpu->arch.gprs[REG_A0] = vcpu->run->hypercall.ret;
732 vcpu->arch.is_hypcall = 0;
824 if (hrtimer_cancel(&vcpu->arch.swtimer))
825 hrtimer_restart(&vcpu->arch.swtimer);
831 struct loongarch_csrs *csr = vcpu->arch.csr;
838 migrated = (vcpu->arch.last_sched_cpu != cpu);
844 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
865 kvm_write_csr_gcntc((ulong)vcpu->kvm->arch.stablecounter_gftoffset);
935 if (vcpu->arch.last_sched_cpu != cpu) {
937 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
954 struct loongarch_csrs *csr = vcpu->arch.csr;
1022 vcpu->arch.last_sched_cpu = cpu;
1034 struct loongarch_csrs *csr = vcpu->arch.csr;
1046 *v = vcpu->arch.lbt.scr0;
1049 *v = vcpu->arch.lbt.scr1;
1052 *v = vcpu->arch.lbt.scr2;
1055 *v = vcpu->arch.lbt.scr3;
1058 *v = vcpu->arch.lbt.eflags;
1061 *v = vcpu->arch.fpu.ftop;
1065 *v = drdtime() + vcpu->kvm->arch.stablecounter_gftoffset;
1084 struct loongarch_csrs *csr = vcpu->arch.csr;
1101 vcpu->arch.lbt.scr0 = val;
1104 vcpu->arch.lbt.scr1 = val;
1107 vcpu->arch.lbt.scr2 = val;
1110 vcpu->arch.lbt.scr3 = val;
1113 vcpu->arch.lbt.eflags = val;
1116 vcpu->arch.fpu.ftop = val;
1125 if (!vcpu->kvm->arch.stablecounter_gftoffset)
1126 vcpu->kvm->arch.stablecounter_gftoffset = (signed long)(v - drdtime());
1127 kvm_write_csr_gcntc((ulong)vcpu->kvm->arch.stablecounter_gftoffset);
1131 cache = &vcpu->arch.st.cache;
1135 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
1136 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
1138 if (vcpu->arch.st.guest_addr) {
1141 vcpu->arch.st.guest_addr = 0;
1143 vcpu->kvm->arch.stablecounter_gftoffset = 0;
1538 vcpu_state.online_vcpus = vcpu->kvm->arch.online_vcpus;
1541 vcpu_state.core_ext_ioisr[i] = vcpu->arch.core_ext_ioisr[i];
1543 vcpu_state.irq_pending = vcpu->arch.irq_pending;
1544 vcpu_state.irq_clear = vcpu->arch.irq_clear;
1561 vcpu->kvm->arch.online_vcpus = vcpu_state.online_vcpus;
1562 vcpu->kvm->arch.is_migrate = vcpu_state.is_migrate;
1564 vcpu->arch.core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i];
1566 vcpu->arch.irq_pending = vcpu_state.irq_pending;
1567 vcpu->arch.irq_clear = vcpu_state.irq_clear;
1620 memslot->arch.flags |= KVM_MEMSLOT_DISABLE_THP;
1637 if (kvm->arch.v_ioapic)
1659 /* Write kvm->irq_routing before kvm->arch.vpic. */
1725 if (copy_from_user(&kvm->arch.cpucfgs, argp, sizeof(struct kvm_cpucfg)))
1732 if (copy_to_user(argp, &kvm->arch.cpucfgs, sizeof(struct kvm_cpucfg)))
1800 fpu->fcsr = vcpu->arch.fpu.fcsr;
1801 fpu->fcc = vcpu->arch.fpu.fcc;
1803 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1813 vcpu->arch.fpu.fcsr = fpu->fcsr;
1814 vcpu->arch.fpu.fcc = fpu->fcc;
1816 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1900 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1901 kvm_debug("\texceptions: %08lx\n", vcpu->arch.irq_pending);
1905 vcpu->arch.gprs[i],
1906 vcpu->arch.gprs[i + 1],
1907 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1910 csr = vcpu->arch.csr;
1926 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1927 vcpu->arch.gprs[i] = regs->gpr[i];
1928 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1929 vcpu->arch.pc = regs->pc;
1941 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1942 regs->gpr[i] = vcpu->arch.gprs[i];
1944 regs->pc = vcpu->arch.pc;
1961 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1966 vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1984 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1987 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
2008 kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2013 vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
2026 kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2028 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
2048 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
2059 kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2061 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
2086 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
2100 if (vcpu->arch.aux_inuse & KVM_LARCH_FP_ALL) {
2101 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
2107 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
2112 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
2117 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
2123 vcpu->arch.aux_inuse &= ~KVM_LARCH_FP_ALL;
2131 if (vcpu->arch.aux_inuse & KVM_LARCH_PERF) {
2132 struct loongarch_csrs *csr = vcpu->arch.csr;
2147 vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF;
2164 if (vcpu->arch.aux_inuse & KVM_LARCH_PERF) {
2165 struct loongarch_csrs *csr = vcpu->arch.csr;