Lines Matching refs:arch
14 * This file is derived from arch/powerpc/kvm/book3s.c,
134 return kvm->arch.nested_enable && kvm_is_radix(kvm);
241 cpu = READ_ONCE(vcpu->arch.thread_cpu);
277 * Updates to busy_stolen are protected by arch.tbacct_lock;
307 struct kvmppc_vcore *vc = vcpu->arch.vcore;
319 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
320 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
321 vcpu->arch.busy_preempt != TB_NIL) {
322 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
323 vcpu->arch.busy_preempt = TB_NIL;
325 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
330 struct kvmppc_vcore *vc = vcpu->arch.vcore;
336 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
337 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
338 vcpu->arch.busy_preempt = mftb();
339 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
344 vcpu->arch.pvr = pvr;
353 struct kvmppc_vcore *vc = vcpu->arch.vcore;
414 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
420 vcpu->arch.regs.ctr, vcpu->arch.regs.link);
422 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
424 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
426 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
428 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
429 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
431 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
432 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
433 for (r = 0; r < vcpu->arch.slb_max; ++r)
435 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
437 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
438 vcpu->arch.last_inst);
458 spin_lock(&vcpu->arch.vpa_update_lock);
464 spin_unlock(&vcpu->arch.vpa_update_lock);
527 spin_lock(&tvcpu->arch.vpa_update_lock);
540 vpap = &tvcpu->arch.vpa;
551 if (!vpa_is_registered(&tvcpu->arch.vpa))
554 vpap = &tvcpu->arch.dtl;
561 if (!vpa_is_registered(&tvcpu->arch.vpa))
564 vpap = &tvcpu->arch.slb_shadow;
571 if (vpa_is_registered(&tvcpu->arch.dtl) ||
572 vpa_is_registered(&tvcpu->arch.slb_shadow))
575 vpap = &tvcpu->arch.vpa;
580 vpap = &tvcpu->arch.dtl;
585 vpap = &tvcpu->arch.slb_shadow;
596 spin_unlock(&tvcpu->arch.vpa_update_lock);
618 spin_unlock(&vcpu->arch.vpa_update_lock);
623 spin_lock(&vcpu->arch.vpa_update_lock);
653 if (!(vcpu->arch.vpa.update_pending ||
654 vcpu->arch.slb_shadow.update_pending ||
655 vcpu->arch.dtl.update_pending))
658 spin_lock(&vcpu->arch.vpa_update_lock);
659 if (vcpu->arch.vpa.update_pending) {
660 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
661 if (vcpu->arch.vpa.pinned_addr)
662 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
664 if (vcpu->arch.dtl.update_pending) {
665 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
666 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
667 vcpu->arch.dtl_index = 0;
669 if (vcpu->arch.slb_shadow.update_pending)
670 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
671 spin_unlock(&vcpu->arch.vpa_update_lock);
702 dt = vcpu->arch.dtl_ptr;
703 vpa = vcpu->arch.vpa.pinned_addr;
706 stolen = core_stolen - vcpu->arch.stolen_logged;
707 vcpu->arch.stolen_logged = core_stolen;
708 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
709 stolen += vcpu->arch.busy_stolen;
710 vcpu->arch.busy_stolen = 0;
711 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
716 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
720 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
722 if (dt == vcpu->arch.dtl.pinned_end)
723 dt = vcpu->arch.dtl.pinned_addr;
724 vcpu->arch.dtl_ptr = dt;
727 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
728 vcpu->arch.dtl.dirty = true;
737 if (vcpu->arch.doorbell_request)
745 vc = vcpu->arch.vcore;
752 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
754 if ((!vcpu->arch.vcore->arch_compat) &&
775 vcpu->arch.ciabr = value1;
786 vcpu->arch.dawr = value1;
787 vcpu->arch.dawrx = value2;
875 struct kvmppc_vcore *vcore = target->arch.vcore;
886 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
900 spin_lock(&vcpu->arch.vpa_update_lock);
901 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
904 spin_unlock(&vcpu->arch.vpa_update_lock);
917 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
930 tvcpu->arch.prodded = 1;
932 if (tvcpu->arch.ceded)
955 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
1041 if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
1057 vcpu->arch.hcall_needed = 0;
1061 vcpu->arch.hcall_needed = 0;
1111 * Instead the kvm->arch.secure_guest flag is checked inside
1121 vcpu->arch.hcall_needed = 0;
1133 vcpu->arch.shregs.msr |= MSR_EE;
1134 vcpu->arch.ceded = 1;
1136 if (vcpu->arch.prodded) {
1137 vcpu->arch.prodded = 0;
1139 vcpu->arch.ceded = 0;
1184 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1202 nthreads = vcpu->kvm->arch.emul_smt_mode;
1241 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1248 if (arg >= kvm->arch.emul_smt_mode)
1253 if (!tvcpu->arch.doorbell_request) {
1254 tvcpu->arch.doorbell_request = 1;
1262 vcpu->arch.vcore->dpdes = 0;
1263 vcpu->arch.doorbell_request = 0;
1301 if (vcpu->arch.shregs.msr & MSR_HV) {
1304 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1305 vcpu->arch.shregs.msr);
1308 run->hw.hardware_exit_reason = vcpu->arch.trap;
1313 switch (vcpu->arch.trap) {
1333 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1341 if (!vcpu->kvm->arch.fwnmi_enabled) {
1342 ulong flags = vcpu->arch.shregs.msr & 0x083c0000;
1350 run->hw.hardware_exit_reason = vcpu->arch.trap;
1354 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1370 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1388 vcpu->arch.hcall_needed = 1;
1403 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1404 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
1406 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1407 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1418 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1419 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1420 swab32(vcpu->arch.emul_inst) :
1421 vcpu->arch.emul_inst;
1438 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
1465 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1466 vcpu->arch.shregs.msr);
1467 run->hw.hardware_exit_reason = vcpu->arch.trap;
1490 if (vcpu->arch.shregs.msr & MSR_HV) {
1493 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1494 vcpu->arch.shregs.msr);
1498 switch (vcpu->arch.trap) {
1523 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1537 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1538 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1540 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
1541 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1560 vcpu->arch.trap = 0;
1579 sregs->pvr = vcpu->arch.pvr;
1580 for (i = 0; i < vcpu->arch.slb_max; i++) {
1581 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1582 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1594 if (sregs->pvr != vcpu->arch.pvr)
1598 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1600 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1601 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1605 vcpu->arch.slb_max = j;
1614 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1627 if (vcpu->arch.vcore != vc)
1630 vcpu->arch.intr_msr |= MSR_LE;
1632 vcpu->arch.intr_msr &= ~MSR_LE;
1672 *val = get_reg_val(id, vcpu->arch.dabr);
1675 *val = get_reg_val(id, vcpu->arch.dabrx);
1678 *val = get_reg_val(id, vcpu->arch.dscr);
1681 *val = get_reg_val(id, vcpu->arch.purr);
1684 *val = get_reg_val(id, vcpu->arch.spurr);
1687 *val = get_reg_val(id, vcpu->arch.amr);
1690 *val = get_reg_val(id, vcpu->arch.uamor);
1694 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1697 *val = get_reg_val(id, vcpu->arch.mmcr[2]);
1700 *val = get_reg_val(id, vcpu->arch.mmcra);
1703 *val = get_reg_val(id, vcpu->arch.mmcrs);
1706 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
1710 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1714 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1717 *val = get_reg_val(id, vcpu->arch.siar);
1720 *val = get_reg_val(id, vcpu->arch.sdar);
1723 *val = get_reg_val(id, vcpu->arch.sier[0]);
1726 *val = get_reg_val(id, vcpu->arch.sier[1]);
1729 *val = get_reg_val(id, vcpu->arch.sier[2]);
1732 *val = get_reg_val(id, vcpu->arch.iamr);
1735 *val = get_reg_val(id, vcpu->arch.pspb);
1744 *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
1745 vcpu->arch.doorbell_request);
1748 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1751 *val = get_reg_val(id, vcpu->arch.dawr);
1754 *val = get_reg_val(id, vcpu->arch.dawrx);
1757 *val = get_reg_val(id, vcpu->arch.ciabr);
1760 *val = get_reg_val(id, vcpu->arch.csigr);
1763 *val = get_reg_val(id, vcpu->arch.tacr);
1766 *val = get_reg_val(id, vcpu->arch.tcscr);
1769 *val = get_reg_val(id, vcpu->arch.pid);
1772 *val = get_reg_val(id, vcpu->arch.acop);
1775 *val = get_reg_val(id, vcpu->arch.wort);
1778 *val = get_reg_val(id, vcpu->arch.tid);
1781 *val = get_reg_val(id, vcpu->arch.psscr);
1784 spin_lock(&vcpu->arch.vpa_update_lock);
1785 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1786 spin_unlock(&vcpu->arch.vpa_update_lock);
1789 spin_lock(&vcpu->arch.vpa_update_lock);
1790 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1791 val->vpaval.length = vcpu->arch.slb_shadow.len;
1792 spin_unlock(&vcpu->arch.vpa_update_lock);
1795 spin_lock(&vcpu->arch.vpa_update_lock);
1796 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1797 val->vpaval.length = vcpu->arch.dtl.len;
1798 spin_unlock(&vcpu->arch.vpa_update_lock);
1801 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1805 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1808 *val = get_reg_val(id, vcpu->arch.ppr);
1812 *val = get_reg_val(id, vcpu->arch.tfhar);
1815 *val = get_reg_val(id, vcpu->arch.tfiar);
1818 *val = get_reg_val(id, vcpu->arch.texasr);
1822 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1830 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1833 val->vval = vcpu->arch.vr_tm.vr[i-32];
1840 *val = get_reg_val(id, vcpu->arch.cr_tm);
1843 *val = get_reg_val(id, vcpu->arch.xer_tm);
1846 *val = get_reg_val(id, vcpu->arch.lr_tm);
1849 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1852 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1855 *val = get_reg_val(id, vcpu->arch.amr_tm);
1858 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1861 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1865 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1870 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1873 *val = get_reg_val(id, vcpu->arch.tar_tm);
1877 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1880 *val = get_reg_val(id, vcpu->arch.dec_expires +
1881 vcpu->arch.vcore->tb_offset);
1884 *val = get_reg_val(id, vcpu->arch.online);
1887 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
1911 vcpu->arch.dabr = set_reg_val(id, *val);
1914 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1917 vcpu->arch.dscr = set_reg_val(id, *val);
1920 vcpu->arch.purr = set_reg_val(id, *val);
1923 vcpu->arch.spurr = set_reg_val(id, *val);
1926 vcpu->arch.amr = set_reg_val(id, *val);
1929 vcpu->arch.uamor = set_reg_val(id, *val);
1933 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1936 vcpu->arch.mmcr[2] = set_reg_val(id, *val);
1939 vcpu->arch.mmcra = set_reg_val(id, *val);
1942 vcpu->arch.mmcrs = set_reg_val(id, *val);
1945 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
1949 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1953 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1956 vcpu->arch.siar = set_reg_val(id, *val);
1959 vcpu->arch.sdar = set_reg_val(id, *val);
1962 vcpu->arch.sier[0] = set_reg_val(id, *val);
1965 vcpu->arch.sier[1] = set_reg_val(id, *val);
1968 vcpu->arch.sier[2] = set_reg_val(id, *val);
1971 vcpu->arch.iamr = set_reg_val(id, *val);
1974 vcpu->arch.pspb = set_reg_val(id, *val);
1977 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1980 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1983 vcpu->arch.dawr = set_reg_val(id, *val);
1986 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1989 vcpu->arch.ciabr = set_reg_val(id, *val);
1991 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1992 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1995 vcpu->arch.csigr = set_reg_val(id, *val);
1998 vcpu->arch.tacr = set_reg_val(id, *val);
2001 vcpu->arch.tcscr = set_reg_val(id, *val);
2004 vcpu->arch.pid = set_reg_val(id, *val);
2007 vcpu->arch.acop = set_reg_val(id, *val);
2010 vcpu->arch.wort = set_reg_val(id, *val);
2013 vcpu->arch.tid = set_reg_val(id, *val);
2016 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2021 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2022 vcpu->arch.dtl.next_gpa))
2024 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2030 if (addr && !vcpu->arch.vpa.next_gpa)
2032 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2039 !vcpu->arch.vpa.next_gpa))
2042 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2046 vcpu->arch.vcore->tb_offset =
2056 vcpu->arch.ppr = set_reg_val(id, *val);
2060 vcpu->arch.tfhar = set_reg_val(id, *val);
2063 vcpu->arch.tfiar = set_reg_val(id, *val);
2066 vcpu->arch.texasr = set_reg_val(id, *val);
2070 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2078 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2081 vcpu->arch.vr_tm.vr[i-32] = val->vval;
2087 vcpu->arch.cr_tm = set_reg_val(id, *val);
2090 vcpu->arch.xer_tm = set_reg_val(id, *val);
2093 vcpu->arch.lr_tm = set_reg_val(id, *val);
2096 vcpu->arch.ctr_tm = set_reg_val(id, *val);
2099 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2102 vcpu->arch.amr_tm = set_reg_val(id, *val);
2105 vcpu->arch.ppr_tm = set_reg_val(id, *val);
2108 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2112 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2117 vcpu->arch.dscr_tm = set_reg_val(id, *val);
2120 vcpu->arch.tar_tm = set_reg_val(id, *val);
2127 vcpu->arch.dec_expires = set_reg_val(id, *val) -
2128 vcpu->arch.vcore->tb_offset;
2132 if (i && !vcpu->arch.online)
2133 atomic_inc(&vcpu->arch.vcore->online_count);
2134 else if (!i && vcpu->arch.online)
2135 atomic_dec(&vcpu->arch.vcore->online_count);
2136 vcpu->arch.online = i;
2139 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2158 if (kvm->arch.threads_indep)
2176 vcore->lpcr = kvm->arch.lpcr;
2189 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2190 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2191 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2192 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2193 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2316 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
2317 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu,
2338 vcpu->arch.shared = &vcpu->arch.shregs;
2345 vcpu->arch.shared_big_endian = true;
2347 vcpu->arch.shared_big_endian = false;
2350 vcpu->arch.mmcr[0] = MMCR0_FC;
2351 vcpu->arch.ctrl = CTRL_RUNLATCH;
2354 spin_lock_init(&vcpu->arch.vpa_update_lock);
2355 spin_lock_init(&vcpu->arch.tbacct_lock);
2356 vcpu->arch.busy_preempt = TB_NIL;
2357 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2366 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
2369 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
2372 vcpu->arch.hfscr |= HFSCR_TM;
2376 vcpu->arch.hfscr |= HFSCR_TM;
2380 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2382 init_waitqueue_head(&vcpu->arch.cpu_run);
2388 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2392 BUG_ON(kvm->arch.smt_mode != 1);
2396 core = id / kvm->arch.smt_mode;
2399 vcore = kvm->arch.vcores[core];
2410 id & ~(kvm->arch.smt_mode - 1));
2411 mutex_lock(&kvm->arch.mmu_setup_lock);
2412 kvm->arch.vcores[core] = vcore;
2413 kvm->arch.online_vcores++;
2414 mutex_unlock(&kvm->arch.mmu_setup_lock);
2425 vcpu->arch.vcore = vcore;
2426 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
2427 vcpu->arch.thread_cpu = -1;
2428 vcpu->arch.prev_cpu = -1;
2430 vcpu->arch.cpu_type = KVM_CPU_3S_64;
2465 if (!kvm->arch.online_vcores) {
2466 kvm->arch.smt_mode = smt_mode;
2467 kvm->arch.emul_smt_mode = esmt;
2484 spin_lock(&vcpu->arch.vpa_update_lock);
2485 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2486 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2487 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2488 spin_unlock(&vcpu->arch.vpa_update_lock);
2502 if (now > vcpu->arch.dec_expires) {
2508 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
2509 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
2510 vcpu->arch.timer_running = 1;
2520 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2522 spin_lock_irq(&vcpu->arch.tbacct_lock);
2524 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2525 vcpu->arch.stolen_logged;
2526 vcpu->arch.busy_preempt = now;
2527 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2528 spin_unlock_irq(&vcpu->arch.tbacct_lock);
2530 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
2580 struct kvm_nested_guest *nested = vcpu->arch.nested;
2589 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2590 cpu_in_guest = &kvm->arch.cpu_in_guest;
2606 struct kvm_nested_guest *nested = vcpu->arch.nested;
2614 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
2616 prev_cpu = vcpu->arch.prev_cpu;
2636 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
2638 vcpu->arch.prev_cpu = pcpu;
2650 if (vcpu->arch.timer_running) {
2651 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2652 vcpu->arch.timer_running = 0;
2654 cpu += vcpu->arch.ptid;
2656 vcpu->arch.thread_cpu = cpu;
2657 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
2898 if (signal_pending(vcpu->arch.run_task))
2899 vcpu->arch.ret = -EINTR;
2900 else if (vcpu->arch.vpa.update_pending ||
2901 vcpu->arch.slb_shadow.update_pending ||
2902 vcpu->arch.dtl.update_pending)
2903 vcpu->arch.ret = RESUME_GUEST;
2907 wake_up(&vcpu->arch.cpu_run);
2921 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
2950 if (!vc->kvm->arch.mmu_ready)
2953 if (signal_pending(vcpu->arch.run_task))
2973 * so any vcpus becoming runnable will have their arch.trap
2978 if (now < vcpu->arch.dec_expires &&
2985 if (vcpu->arch.trap)
2987 vcpu->arch.run_task);
2989 vcpu->arch.ret = ret;
2990 vcpu->arch.trap = 0;
2993 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2994 if (vcpu->arch.pending_exceptions)
2996 if (vcpu->arch.ceded)
3002 wake_up(&vcpu->arch.cpu_run);
3018 wake_up(&vcpu->arch.cpu_run);
3114 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3142 (hpt_on_radix && vc->kvm->arch.threads_indep)) {
3144 vcpu->arch.ret = -EBUSY;
3146 wake_up(&vcpu->arch.cpu_run);
3235 split_info.lpidr_req = vc->kvm->arch.lpid;
3236 split_info.host_lpcr = vc->kvm->arch.host_lpcr;
3300 if (!vcpu->arch.ptid)
3302 active |= 1 << (thr + vcpu->arch.ptid);
3424 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
3452 struct kvmppc_vcore *vc = vcpu->arch.vcore;
3467 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
3472 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3494 mtspr(SPRN_PURR, vcpu->arch.purr);
3495 mtspr(SPRN_SPURR, vcpu->arch.spurr);
3498 mtspr(SPRN_DAWR0, vcpu->arch.dawr);
3499 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx);
3501 mtspr(SPRN_CIABR, vcpu->arch.ciabr);
3502 mtspr(SPRN_IC, vcpu->arch.ic);
3503 mtspr(SPRN_PID, vcpu->arch.pid);
3505 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
3508 mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
3510 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
3511 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
3512 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
3513 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
3522 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
3523 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
3531 purr - vcpu->arch.purr);
3533 spurr - vcpu->arch.spurr);
3534 vcpu->arch.purr = purr;
3535 vcpu->arch.spurr = spurr;
3537 vcpu->arch.ic = mfspr(SPRN_IC);
3538 vcpu->arch.pid = mfspr(SPRN_PID);
3539 vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
3541 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
3542 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
3543 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
3544 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
3568 mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
3587 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
3599 struct kvmppc_vcore *vc = vcpu->arch.vcore;
3617 vcpu->arch.ceded = 0;
3626 if (vcpu->arch.vpa.pinned_addr) {
3627 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3630 vcpu->arch.vpa.dirty = 1;
3635 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3640 if (vcpu->arch.vpa.pinned_addr) {
3641 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3652 load_fp_state(&vcpu->arch.fp);
3654 load_vr_state(&vcpu->arch.vr);
3656 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
3658 mtspr(SPRN_DSCR, vcpu->arch.dscr);
3659 mtspr(SPRN_IAMR, vcpu->arch.iamr);
3660 mtspr(SPRN_PSPB, vcpu->arch.pspb);
3661 mtspr(SPRN_FSCR, vcpu->arch.fscr);
3662 mtspr(SPRN_TAR, vcpu->arch.tar);
3663 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
3664 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
3665 mtspr(SPRN_BESCR, vcpu->arch.bescr);
3666 mtspr(SPRN_WORT, vcpu->arch.wort);
3667 mtspr(SPRN_TIDR, vcpu->arch.tid);
3668 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
3669 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
3670 mtspr(SPRN_AMR, vcpu->arch.amr);
3671 mtspr(SPRN_UAMOR, vcpu->arch.uamor);
3673 if (!(vcpu->arch.ctrl & 1))
3676 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
3690 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
3693 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
3695 if (vcpu->arch.nested) {
3696 hvregs.lpid = vcpu->arch.nested->shadow_lpid;
3697 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
3699 hvregs.lpid = vcpu->kvm->arch.lpid;
3704 __pa(&vcpu->arch.regs));
3706 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
3707 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
3708 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
3709 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
3713 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
3723 vcpu->arch.slb_max = 0;
3728 vcpu->arch.dec_expires = dec + tb;
3730 vcpu->arch.thread_cpu = -1;
3732 vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
3733 if (!(vcpu->arch.ctrl & 1))
3734 mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
3736 vcpu->arch.iamr = mfspr(SPRN_IAMR);
3737 vcpu->arch.pspb = mfspr(SPRN_PSPB);
3738 vcpu->arch.fscr = mfspr(SPRN_FSCR);
3739 vcpu->arch.tar = mfspr(SPRN_TAR);
3740 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
3741 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
3742 vcpu->arch.bescr = mfspr(SPRN_BESCR);
3743 vcpu->arch.wort = mfspr(SPRN_WORT);
3744 vcpu->arch.tid = mfspr(SPRN_TIDR);
3745 vcpu->arch.amr = mfspr(SPRN_AMR);
3746 vcpu->arch.uamor = mfspr(SPRN_UAMOR);
3747 vcpu->arch.dscr = mfspr(SPRN_DSCR);
3757 if (host_amr != vcpu->arch.amr)
3760 if (host_fscr != vcpu->arch.fscr)
3764 store_fp_state(&vcpu->arch.fp);
3766 store_vr_state(&vcpu->arch.vr);
3768 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
3772 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
3775 if (vcpu->arch.vpa.pinned_addr) {
3776 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
3779 vcpu->arch.vpa.dirty = 1;
3816 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
3817 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3822 finish_wait(&vcpu->arch.cpu_run, &wait);
3848 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
3849 vcpu->arch.xive_saved_state.cppr;
3860 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
3877 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
3994 mutex_lock(&kvm->arch.mmu_setup_lock);
3995 if (!kvm->arch.mmu_ready) {
4001 kvm->arch.mmu_ready = 1;
4004 mutex_unlock(&kvm->arch.mmu_setup_lock);
4018 vcpu->arch.ret = RESUME_GUEST;
4019 vcpu->arch.trap = 0;
4025 vc = vcpu->arch.vcore;
4027 vcpu->arch.ceded = 0;
4028 vcpu->arch.run_task = current;
4029 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4030 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4031 vcpu->arch.busy_preempt = TB_NIL;
4032 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4053 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4056 if (!vcpu->kvm->arch.mmu_ready) {
4064 vcpu->arch.ret = r;
4078 if (signal_pending(v->arch.run_task)) {
4082 v->arch.ret = -EINTR;
4083 wake_up(&v->arch.cpu_run);
4086 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4091 n_ceded += v->arch.ceded;
4093 v->arch.ceded = 0;
4110 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4119 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4123 vcpu->arch.ret = -EINTR;
4130 wake_up(&v->arch.cpu_run);
4135 return vcpu->arch.ret;
4146 struct kvm_nested_guest *nested = vcpu->arch.nested;
4151 vcpu->arch.ret = RESUME_GUEST;
4152 vcpu->arch.trap = 0;
4154 vc = vcpu->arch.vcore;
4155 vcpu->arch.ceded = 0;
4156 vcpu->arch.run_task = current;
4157 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4158 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4159 vcpu->arch.busy_preempt = TB_NIL;
4160 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4166 if (!kvm->arch.mmu_ready)
4186 if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
4191 if (vcpu->arch.doorbell_request) {
4194 vcpu->arch.doorbell_request = 0;
4197 &vcpu->arch.pending_exceptions))
4199 } else if (vcpu->arch.pending_exceptions ||
4200 vcpu->arch.doorbell_request ||
4202 vcpu->arch.ret = RESUME_HOST;
4219 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
4235 vcpu->arch.trap = trap;
4244 mtspr(SPRN_LPID, kvm->arch.host_lpid);
4269 cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
4279 ((get_tb() < vcpu->arch.dec_expires) ||
4292 vcpu->arch.ret = r;
4294 if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
4297 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
4301 vcpu->arch.ret = -EINTR;
4309 vcpu->arch.ceded = 0;
4318 return vcpu->arch.ret;
4323 vcpu->arch.ret = -EINTR;
4340 if (!vcpu->arch.sane) {
4372 if (!vcpu->arch.online) {
4373 atomic_inc(&vcpu->arch.vcore->online_count);
4374 vcpu->arch.online = 1;
4386 atomic_inc(&kvm->arch.vcpus_running);
4401 vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4402 vcpu->arch.pgdir = kvm->mm->pgd;
4403 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4414 if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
4417 vcpu->arch.vcore->lpcr);
4422 !(vcpu->arch.shregs.msr & MSR_PR)) {
4430 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
4450 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
4451 atomic_dec(&kvm->arch.vcpus_running);
4558 spin_lock(&vcpu->arch.vpa_update_lock);
4559 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
4560 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
4561 spin_unlock(&vcpu->arch.vpa_update_lock);
4576 vfree(slot->arch.rmap);
4577 slot->arch.rmap = NULL;
4588 unsigned long size = array_size(npages, sizeof(*slot->arch.rmap));
4593 slot->arch.rmap = vzalloc(size);
4594 if (!slot->arch.rmap)
4616 atomic64_inc(&kvm->arch.mmio_update);
4637 if (!kvm->arch.secure_guest)
4658 * Update LPCR values in kvm->arch and in vcores.
4659 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4660 * of kvm->arch.lpcr update).
4667 if ((kvm->arch.lpcr & mask) == lpcr)
4670 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
4673 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
4679 if (++cores_done >= kvm->arch.online_vcores)
4690 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
4691 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
4693 dw0 |= kvm->arch.sdr1;
4696 dw1 = kvm->arch.process_table;
4699 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
4700 dw1 = PATB_GR | kvm->arch.process_table;
4702 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
4707 * Must be called with kvm->arch.mmu_setup_lock held.
4721 if (!kvm->arch.hpt.virt) {
4770 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
4782 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
4796 * Must be called with kvm->arch.mmu_setup_lock held and
4804 kvm->arch.process_table = 0;
4807 kvm->arch.radix = 0;
4816 * Must be called with kvm->arch.mmu_setup_lock held and
4829 kvm->arch.radix = 1;
4831 kvmppc_free_hpt(&kvm->arch.hpt);
4923 mutex_init(&kvm->arch.uvmem_lock);
4924 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
4925 mutex_init(&kvm->arch.mmu_setup_lock);
4932 kvm->arch.lpid = lpid;
4946 cpumask_setall(&kvm->arch.need_tlb_flush);
4949 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
4950 sizeof(kvm->arch.enabled_hcalls));
4953 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
4957 kvm->arch.host_lpid = mfspr(SPRN_LPID);
4958 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
4965 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
4993 kvm->arch.radix = 1;
4994 kvm->arch.mmu_ready = 1;
4999 kvmppc_free_lpid(kvm->arch.lpid);
5005 kvm->arch.lpcr = lpcr;
5008 kvm->arch.resize_hpt = NULL;
5015 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
5017 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
5019 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
5021 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
5032 kvm->arch.threads_indep = true;
5034 kvm->arch.threads_indep = indep_threads_mode;
5037 if (!kvm->arch.threads_indep)
5048 kvm->arch.smt_mode = threads_per_subcore;
5050 kvm->arch.smt_mode = 1;
5051 kvm->arch.emul_smt_mode = 1;
5057 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
5070 kfree(kvm->arch.vcores[i]);
5071 kvm->arch.online_vcores = 0;
5076 debugfs_remove_recursive(kvm->arch.debugfs_dir);
5078 if (!kvm->arch.threads_indep)
5087 kvmppc_free_hpt(&kvm->arch.hpt);
5093 kvm->arch.process_table = 0;
5094 if (kvm->arch.secure_guest)
5095 uv_svm_terminate(kvm->arch.lpid);
5096 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5099 kvmppc_free_lpid(kvm->arch.lpid);
5140 kfree(kvm->arch.pimap);
5165 pimap = kvm->arch.pimap;
5173 kvm->arch.pimap = pimap;
5250 if (!kvm->arch.pimap)
5253 pimap = kvm->arch.pimap;
5461 mutex_lock(&kvm->arch.mmu_setup_lock);
5463 if (kvm->arch.mmu_ready) {
5464 kvm->arch.mmu_ready = 0;
5467 if (atomic_read(&kvm->arch.vcpus_running)) {
5468 kvm->arch.mmu_ready = 1;
5481 kvm->arch.process_table = cfg->process_table;
5489 mutex_unlock(&kvm->arch.mmu_setup_lock);
5502 kvm->arch.nested_enable = true;
5519 if (rc && vcpu->arch.nested)
5538 if (rc && vcpu->arch.nested)
5564 kvm->arch.svm_enabled = 1;
5584 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
5587 mutex_lock(&kvm->arch.mmu_setup_lock);
5588 mmu_was_ready = kvm->arch.mmu_ready;
5589 if (kvm->arch.mmu_ready) {
5590 kvm->arch.mmu_ready = 0;
5593 if (atomic_read(&kvm->arch.vcpus_running)) {
5594 kvm->arch.mmu_ready = 1;
5610 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
5615 ret = uv_svm_terminate(kvm->arch.lpid);
5632 spin_lock(&vcpu->arch.vpa_update_lock);
5633 unpin_vpa_reset(kvm, &vcpu->arch.dtl);
5634 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
5635 unpin_vpa_reset(kvm, &vcpu->arch.vpa);
5636 spin_unlock(&vcpu->arch.vpa_update_lock);
5640 kvm->arch.secure_guest = 0;
5641 kvm->arch.mmu_ready = mmu_was_ready;
5643 mutex_unlock(&kvm->arch.mmu_setup_lock);