Lines Matching refs:arch
14 * This file is derived from arch/powerpc/kvm/book3s.c,
243 cpu = READ_ONCE(vcpu->arch.thread_cpu);
280 * Updates to busy_stolen are protected by arch.tbacct_lock;
320 struct kvmppc_vcore *vc = vcpu->arch.vcore;
325 if (vcpu->arch.busy_preempt != TB_NIL) {
326 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST);
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt;
328 vcpu->arch.busy_preempt = TB_NIL;
344 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
345 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
346 vcpu->arch.busy_preempt != TB_NIL) {
347 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
348 vcpu->arch.busy_preempt = TB_NIL;
350 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
355 struct kvmppc_vcore *vc = vcpu->arch.vcore;
364 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE);
370 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
371 vcpu->arch.busy_preempt = mftb();
380 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
381 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
382 vcpu->arch.busy_preempt = now;
383 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
388 vcpu->arch.pvr = pvr;
397 struct kvmppc_vcore *vc = vcpu->arch.vcore;
458 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
464 vcpu->arch.regs.ctr, vcpu->arch.regs.link);
466 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
468 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
470 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
472 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
473 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
475 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
476 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
477 for (r = 0; r < vcpu->arch.slb_max; ++r)
479 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
481 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
482 vcpu->arch.last_inst);
502 spin_lock(&vcpu->arch.vpa_update_lock);
508 spin_unlock(&vcpu->arch.vpa_update_lock);
571 spin_lock(&tvcpu->arch.vpa_update_lock);
584 vpap = &tvcpu->arch.vpa;
595 if (!vpa_is_registered(&tvcpu->arch.vpa))
598 vpap = &tvcpu->arch.dtl;
605 if (!vpa_is_registered(&tvcpu->arch.vpa))
608 vpap = &tvcpu->arch.slb_shadow;
615 if (vpa_is_registered(&tvcpu->arch.dtl) ||
616 vpa_is_registered(&tvcpu->arch.slb_shadow))
619 vpap = &tvcpu->arch.vpa;
624 vpap = &tvcpu->arch.dtl;
629 vpap = &tvcpu->arch.slb_shadow;
640 spin_unlock(&tvcpu->arch.vpa_update_lock);
662 spin_unlock(&vcpu->arch.vpa_update_lock);
667 spin_lock(&vcpu->arch.vpa_update_lock);
697 if (!(vcpu->arch.vpa.update_pending ||
698 vcpu->arch.slb_shadow.update_pending ||
699 vcpu->arch.dtl.update_pending))
702 spin_lock(&vcpu->arch.vpa_update_lock);
703 if (vcpu->arch.vpa.update_pending) {
704 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
705 if (vcpu->arch.vpa.pinned_addr)
706 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
708 if (vcpu->arch.dtl.update_pending) {
709 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
710 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
711 vcpu->arch.dtl_index = 0;
713 if (vcpu->arch.slb_shadow.update_pending)
714 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
715 spin_unlock(&vcpu->arch.vpa_update_lock);
745 dt = vcpu->arch.dtl_ptr;
752 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
759 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
762 if (dt == vcpu->arch.dtl.pinned_end)
763 dt = vcpu->arch.dtl.pinned_addr;
764 vcpu->arch.dtl_ptr = dt;
767 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
769 /* vcpu->arch.dtl.dirty is set by the caller */
781 vpa = vcpu->arch.vpa.pinned_addr;
788 stolen = core_stolen - vcpu->arch.stolen_logged;
789 vcpu->arch.stolen_logged = core_stolen;
790 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
791 stolen += vcpu->arch.busy_stolen;
792 vcpu->arch.busy_stolen = 0;
793 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
799 vcpu->arch.vpa.dirty = true;
810 vpa = vcpu->arch.vpa.pinned_addr;
815 stolen_delta = stolen - vcpu->arch.stolen_logged;
816 vcpu->arch.stolen_logged = stolen;
822 vcpu->arch.vpa.dirty = true;
831 if (vcpu->arch.doorbell_request)
841 vc = vcpu->arch.vcore;
848 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
850 if ((!vcpu->arch.vcore->arch_compat) &&
892 if (!vcpu->kvm->arch.dawr1_enabled)
991 struct kvmppc_vcore *vcore = target->arch.vcore;
1006 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
1021 spin_lock(&vcpu->arch.vpa_update_lock);
1022 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
1025 spin_unlock(&vcpu->arch.vpa_update_lock);
1050 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
1086 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
1101 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
1160 tvcpu->arch.prodded = 1;
1162 if (tvcpu->arch.ceded)
1185 if (list_empty(&kvm->arch.rtas_tokens))
1271 if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
1295 vcpu->arch.hcall_needed = 0;
1299 vcpu->arch.hcall_needed = 0;
1349 * Instead the kvm->arch.secure_guest flag is checked inside
1360 vcpu->arch.hcall_needed = 0;
1374 vcpu->arch.ceded = 1;
1376 if (vcpu->arch.prodded) {
1377 vcpu->arch.prodded = 0;
1379 vcpu->arch.ceded = 0;
1431 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
1450 nthreads = vcpu->kvm->arch.emul_smt_mode;
1491 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1498 if (arg >= kvm->arch.emul_smt_mode)
1503 if (!tvcpu->arch.doorbell_request) {
1504 tvcpu->arch.doorbell_request = 1;
1512 vcpu->arch.vcore->dpdes = 0;
1513 vcpu->arch.doorbell_request = 0;
1544 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
1554 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
1564 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
1591 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1592 vcpu->arch.shregs.msr);
1595 run->hw.hardware_exit_reason = vcpu->arch.trap;
1600 switch (vcpu->arch.trap) {
1604 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
1630 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
1638 if (!vcpu->kvm->arch.fwnmi_enabled) {
1648 run->hw.hardware_exit_reason = vcpu->arch.trap;
1652 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1714 vcpu->arch.hcall_needed = 1;
1733 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
1749 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
1752 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1758 vsid = vcpu->kvm->arch.vrma_slb_v;
1760 vsid = vcpu->arch.fault_gpa;
1762 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1763 vsid, vcpu->arch.fault_dsisr, true);
1771 vcpu->arch.fault_dar, err);
1780 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1781 vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
1791 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
1796 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
1798 vcpu->arch.fault_dsisr |
1805 vsid = vcpu->kvm->arch.vrma_slb_v;
1807 vsid = vcpu->arch.fault_gpa;
1809 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
1810 vsid, vcpu->arch.fault_dsisr, false);
1831 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1832 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1833 swab32(vcpu->arch.emul_inst) :
1834 vcpu->arch.emul_inst;
1893 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1895 run->hw.hardware_exit_reason = vcpu->arch.trap;
1921 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1926 switch (vcpu->arch.trap) {
1943 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
1961 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
1976 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1977 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
1980 vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
2001 u64 cause = vcpu->arch.hfscr >> 56;
2009 if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
2010 (vcpu->arch.nested_hfscr & (1UL << cause))) {
2012 vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
2019 vcpu->arch.emul_inst = ppc_inst_val(pinst);
2032 vcpu->arch.trap = 0;
2068 sregs->pvr = vcpu->arch.pvr;
2069 for (i = 0; i < vcpu->arch.slb_max; i++) {
2070 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
2071 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
2083 if (sregs->pvr != vcpu->arch.pvr)
2087 for (i = 0; i < vcpu->arch.slb_nr; i++) {
2089 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
2090 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
2094 vcpu->arch.slb_max = j;
2146 struct kvmppc_vcore *vc = vcpu->arch.vcore;
2176 if (vcpu->arch.vcore != vc)
2179 vcpu->arch.intr_msr |= MSR_LE;
2181 vcpu->arch.intr_msr &= ~MSR_LE;
2204 *val = get_reg_val(id, vcpu->arch.dabr);
2207 *val = get_reg_val(id, vcpu->arch.dabrx);
2235 *val = get_reg_val(id, vcpu->arch.mmcrs);
2246 *val = get_reg_val(id, vcpu->arch.spmc[i]);
2277 *val = get_reg_val(id, vcpu->arch.doorbell_request);
2279 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
2282 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
2300 *val = get_reg_val(id, vcpu->arch.csigr);
2303 *val = get_reg_val(id, vcpu->arch.tacr);
2306 *val = get_reg_val(id, vcpu->arch.tcscr);
2309 *val = get_reg_val(id, vcpu->arch.pid);
2312 *val = get_reg_val(id, vcpu->arch.acop);
2318 *val = get_reg_val(id, vcpu->arch.tid);
2321 *val = get_reg_val(id, vcpu->arch.psscr);
2324 spin_lock(&vcpu->arch.vpa_update_lock);
2325 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
2326 spin_unlock(&vcpu->arch.vpa_update_lock);
2329 spin_lock(&vcpu->arch.vpa_update_lock);
2330 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
2331 val->vpaval.length = vcpu->arch.slb_shadow.len;
2332 spin_unlock(&vcpu->arch.vpa_update_lock);
2335 spin_lock(&vcpu->arch.vpa_update_lock);
2336 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
2337 val->vpaval.length = vcpu->arch.dtl.len;
2338 spin_unlock(&vcpu->arch.vpa_update_lock);
2341 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
2345 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
2352 *val = get_reg_val(id, vcpu->arch.tfhar);
2355 *val = get_reg_val(id, vcpu->arch.tfiar);
2358 *val = get_reg_val(id, vcpu->arch.texasr);
2362 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
2370 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
2373 val->vval = vcpu->arch.vr_tm.vr[i-32];
2380 *val = get_reg_val(id, vcpu->arch.cr_tm);
2383 *val = get_reg_val(id, vcpu->arch.xer_tm);
2386 *val = get_reg_val(id, vcpu->arch.lr_tm);
2389 *val = get_reg_val(id, vcpu->arch.ctr_tm);
2392 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
2395 *val = get_reg_val(id, vcpu->arch.amr_tm);
2398 *val = get_reg_val(id, vcpu->arch.ppr_tm);
2401 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
2405 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
2410 *val = get_reg_val(id, vcpu->arch.dscr_tm);
2413 *val = get_reg_val(id, vcpu->arch.tar_tm);
2417 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
2420 *val = get_reg_val(id, vcpu->arch.dec_expires);
2423 *val = get_reg_val(id, vcpu->arch.online);
2426 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
2453 vcpu->arch.dabr = set_reg_val(id, *val);
2456 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
2484 vcpu->arch.mmcrs = set_reg_val(id, *val);
2487 *val = get_reg_val(id, vcpu->arch.mmcr[3]);
2495 vcpu->arch.spmc[i] = set_reg_val(id, *val);
2520 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1;
2522 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
2525 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
2546 vcpu->arch.csigr = set_reg_val(id, *val);
2549 vcpu->arch.tacr = set_reg_val(id, *val);
2552 vcpu->arch.tcscr = set_reg_val(id, *val);
2555 vcpu->arch.pid = set_reg_val(id, *val);
2558 vcpu->arch.acop = set_reg_val(id, *val);
2564 vcpu->arch.tid = set_reg_val(id, *val);
2567 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
2572 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
2573 vcpu->arch.dtl.next_gpa))
2575 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
2581 if (addr && !vcpu->arch.vpa.next_gpa)
2583 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
2590 !vcpu->arch.vpa.next_gpa))
2593 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
2608 if (!vcpu->arch.dec_expires && tb_offset)
2609 vcpu->arch.dec_expires = get_tb() + tb_offset;
2611 vcpu->arch.vcore->tb_offset = tb_offset;
2625 vcpu->arch.tfhar = set_reg_val(id, *val);
2628 vcpu->arch.tfiar = set_reg_val(id, *val);
2631 vcpu->arch.texasr = set_reg_val(id, *val);
2635 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
2643 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
2646 vcpu->arch.vr_tm.vr[i-32] = val->vval;
2652 vcpu->arch.cr_tm = set_reg_val(id, *val);
2655 vcpu->arch.xer_tm = set_reg_val(id, *val);
2658 vcpu->arch.lr_tm = set_reg_val(id, *val);
2661 vcpu->arch.ctr_tm = set_reg_val(id, *val);
2664 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
2667 vcpu->arch.amr_tm = set_reg_val(id, *val);
2670 vcpu->arch.ppr_tm = set_reg_val(id, *val);
2673 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
2677 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
2682 vcpu->arch.dscr_tm = set_reg_val(id, *val);
2685 vcpu->arch.tar_tm = set_reg_val(id, *val);
2692 vcpu->arch.dec_expires = set_reg_val(id, *val);
2696 if (i && !vcpu->arch.online)
2697 atomic_inc(&vcpu->arch.vcore->online_count);
2698 else if (!i && vcpu->arch.online)
2699 atomic_dec(&vcpu->arch.vcore->online_count);
2700 vcpu->arch.online = i;
2703 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
2743 vcore->lpcr = kvm->arch.lpcr;
2757 {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
2758 {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
2759 {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
2760 {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
2761 {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
2762 {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
2763 {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
2765 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2766 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2767 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2768 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2769 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2913 vcpu->arch.shared = &vcpu->arch.shregs;
2920 vcpu->arch.shared_big_endian = true;
2922 vcpu->arch.shared_big_endian = false;
2935 spin_lock_init(&vcpu->arch.vpa_update_lock);
2936 spin_lock_init(&vcpu->arch.tbacct_lock);
2937 vcpu->arch.busy_preempt = TB_NIL;
2939 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
2964 vcpu->arch.hfscr |= HFSCR_TM;
2966 vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
2975 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2977 init_waitqueue_head(&vcpu->arch.cpu_run);
2983 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
2987 BUG_ON(kvm->arch.smt_mode != 1);
2991 core = id / kvm->arch.smt_mode;
2994 vcore = kvm->arch.vcores[core];
3005 id & ~(kvm->arch.smt_mode - 1));
3006 mutex_lock(&kvm->arch.mmu_setup_lock);
3007 kvm->arch.vcores[core] = vcore;
3008 kvm->arch.online_vcores++;
3009 mutex_unlock(&kvm->arch.mmu_setup_lock);
3020 vcpu->arch.vcore = vcore;
3021 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
3022 vcpu->arch.thread_cpu = -1;
3023 vcpu->arch.prev_cpu = -1;
3025 vcpu->arch.cpu_type = KVM_CPU_3S_64;
3058 if (!kvm->arch.online_vcores) {
3059 kvm->arch.smt_mode = smt_mode;
3060 kvm->arch.emul_smt_mode = esmt;
3077 spin_lock(&vcpu->arch.vpa_update_lock);
3078 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
3079 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
3080 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
3081 spin_unlock(&vcpu->arch.vpa_update_lock);
3102 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
3103 vcpu->arch.timer_running = 1;
3113 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
3115 spin_lock_irq(&vcpu->arch.tbacct_lock);
3117 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
3118 vcpu->arch.stolen_logged;
3119 vcpu->arch.busy_preempt = now;
3120 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
3121 spin_unlock_irq(&vcpu->arch.tbacct_lock);
3123 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
3175 struct kvm_nested_guest *nested = vcpu->arch.nested;
3182 need_tlb_flush = &kvm->arch.need_tlb_flush;
3220 if (kvm->arch.lpcr & LPCR_GTSE)
3228 struct kvm_nested_guest *nested = vcpu->arch.nested;
3236 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
3238 prev_cpu = vcpu->arch.prev_cpu;
3261 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
3263 vcpu->arch.prev_cpu = pcpu;
3274 if (vcpu->arch.timer_running) {
3275 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
3276 vcpu->arch.timer_running = 0;
3278 cpu += vcpu->arch.ptid;
3280 vcpu->arch.thread_cpu = cpu;
3520 if (signal_pending(vcpu->arch.run_task))
3521 vcpu->arch.ret = -EINTR;
3522 else if (vcpu->arch.vpa.update_pending ||
3523 vcpu->arch.slb_shadow.update_pending ||
3524 vcpu->arch.dtl.update_pending)
3525 vcpu->arch.ret = RESUME_GUEST;
3529 wake_up(&vcpu->arch.cpu_run);
3543 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
3572 if (!vc->kvm->arch.mmu_ready)
3575 if (signal_pending(vcpu->arch.run_task))
3595 * so any vcpus becoming runnable will have their arch.trap
3607 if (vcpu->arch.trap)
3609 vcpu->arch.run_task);
3611 vcpu->arch.ret = ret;
3612 vcpu->arch.trap = 0;
3615 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
3616 if (vcpu->arch.pending_exceptions)
3618 if (vcpu->arch.ceded)
3624 wake_up(&vcpu->arch.cpu_run);
3640 wake_up(&vcpu->arch.cpu_run);
3738 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
3762 vcpu->arch.ret = -EBUSY;
3764 wake_up(&vcpu->arch.cpu_run);
3896 * It updates vcpu->cpu and vcpu->arch.thread_cpu
3905 if (!vcpu->arch.ptid)
3907 active |= 1 << (thr + vcpu->arch.ptid);
4038 struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
4042 vcpu->arch.vpa.dirty = 1;
4049 struct kvmppc_vcore *vc = vcpu->arch.vcore;
4076 if (vcpu->arch.psscr != host_psscr)
4077 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
4082 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
4084 if (vcpu->arch.nested) {
4085 hvregs.lpid = vcpu->arch.nested->shadow_lpid;
4086 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
4088 hvregs.lpid = vcpu->kvm->arch.lpid;
4109 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
4110 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
4112 accumulate_time(vcpu, &vcpu->arch.in_guest);
4114 __pa(&vcpu->arch.regs));
4115 accumulate_time(vcpu, &vcpu->arch.guest_exit);
4118 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
4119 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
4120 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
4121 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
4129 vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
4134 if (vcpu->arch.psscr != host_psscr)
4147 struct kvm_nested_guest *nested = vcpu->arch.nested;
4159 vcpu->arch.ceded = 0;
4203 vcpu->arch.ceded = 0;
4235 vcpu->arch.slb_max = 0;
4252 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
4253 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4258 finish_wait(&vcpu->arch.cpu_run, &wait);
4284 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
4285 vcpu->arch.xive_saved_state.cppr;
4296 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
4305 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
4450 mutex_lock(&kvm->arch.mmu_setup_lock);
4451 if (!kvm->arch.mmu_ready) {
4457 kvm->arch.mmu_ready = 1;
4460 mutex_unlock(&kvm->arch.mmu_setup_lock);
4474 vcpu->arch.ret = RESUME_GUEST;
4475 vcpu->arch.trap = 0;
4481 vc = vcpu->arch.vcore;
4483 vcpu->arch.ceded = 0;
4484 vcpu->arch.run_task = current;
4485 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
4486 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4487 vcpu->arch.busy_preempt = TB_NIL;
4488 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
4509 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4512 if (!vcpu->kvm->arch.mmu_ready) {
4520 vcpu->arch.ret = r;
4534 if (signal_pending(v->arch.run_task)) {
4538 v->arch.ret = -EINTR;
4539 wake_up(&v->arch.cpu_run);
4542 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
4547 n_ceded += v->arch.ceded;
4549 v->arch.ceded = 0;
4566 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
4575 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
4579 vcpu->arch.ret = -EINTR;
4586 wake_up(&v->arch.cpu_run);
4591 return vcpu->arch.ret;
4603 struct kvm_nested_guest *nested = vcpu->arch.nested;
4610 vcpu->arch.ret = RESUME_GUEST;
4611 vcpu->arch.trap = 0;
4613 vc = vcpu->arch.vcore;
4614 vcpu->arch.ceded = 0;
4615 vcpu->arch.run_task = current;
4616 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
4619 if (unlikely(!kvm->arch.mmu_ready)) {
4624 vcpu->arch.ret = r;
4642 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
4646 if (need_resched() || !kvm->arch.mmu_ready)
4650 vcpu->arch.thread_cpu = pcpu;
4667 &vcpu->arch.pending_exceptions) ||
4680 } else if (vcpu->arch.pending_exceptions ||
4681 vcpu->arch.doorbell_request ||
4683 vcpu->arch.ret = RESUME_HOST;
4687 if (vcpu->arch.timer_running) {
4688 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
4689 vcpu->arch.timer_running = 0;
4706 vcpu->arch.trap = trap;
4716 vcpu->arch.thread_cpu = -1;
4717 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4756 vcpu->arch.ret = r;
4767 vcpu->arch.ret = -EINTR;
4780 vcpu->arch.ceded = 0;
4785 return vcpu->arch.ret;
4790 vcpu->arch.ret = -EINTR;
4793 vcpu->arch.thread_cpu = -1;
4794 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4808 start_timing(vcpu, &vcpu->arch.vcpu_entry);
4810 if (!vcpu->arch.sane) {
4840 if (!vcpu->arch.online) {
4841 atomic_inc(&vcpu->arch.vcore->online_count);
4842 vcpu->arch.online = 1;
4848 atomic_inc(&kvm->arch.vcpus_running);
4870 vcpu->arch.waitp = &vcpu->arch.vcore->wait;
4871 vcpu->arch.pgdir = kvm->mm->pgd;
4872 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
4875 accumulate_time(vcpu, &vcpu->arch.guest_entry);
4878 vcpu->arch.vcore->lpcr);
4883 accumulate_time(vcpu, &vcpu->arch.hcall);
4900 accumulate_time(vcpu, &vcpu->arch.pg_fault);
4903 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
4912 accumulate_time(vcpu, &vcpu->arch.vcpu_exit);
4914 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
4915 atomic_dec(&kvm->arch.vcpus_running);
5027 spin_lock(&vcpu->arch.vpa_update_lock);
5028 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
5029 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
5030 spin_unlock(&vcpu->arch.vpa_update_lock);
5045 vfree(slot->arch.rmap);
5046 slot->arch.rmap = NULL;
5055 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap));
5060 new->arch.rmap = vzalloc(size);
5061 if (!new->arch.rmap)
5064 new->arch.rmap = old->arch.rmap;
5082 atomic64_inc(&kvm->arch.mmio_update);
5103 if (!kvm->arch.secure_guest)
5124 * Update LPCR values in kvm->arch and in vcores.
5125 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5126 * of kvm->arch.lpcr update).
5133 if ((kvm->arch.lpcr & mask) == lpcr)
5136 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
5139 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
5147 if (++cores_done >= kvm->arch.online_vcores)
5158 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
5159 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
5161 dw0 |= kvm->arch.sdr1;
5164 dw1 = kvm->arch.process_table;
5167 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
5168 dw1 = PATB_GR | kvm->arch.process_table;
5170 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
5175 * Must be called with kvm->arch.mmu_setup_lock held.
5189 if (!kvm->arch.hpt.virt) {
5238 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
5250 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */
5264 * Must be called with kvm->arch.mmu_setup_lock held and
5274 kvm->arch.process_table = 0;
5277 kvm->arch.radix = 0;
5291 * Must be called with kvm->arch.mmu_setup_lock held and
5305 kvm->arch.radix = 1;
5307 kvmppc_free_hpt(&kvm->arch.hpt);
5314 (kvm->arch.host_lpcr & LPCR_HAIL))
5410 mutex_init(&kvm->arch.uvmem_lock);
5411 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns);
5412 mutex_init(&kvm->arch.mmu_setup_lock);
5419 kvm->arch.lpid = lpid;
5433 cpumask_setall(&kvm->arch.need_tlb_flush);
5436 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
5437 sizeof(kvm->arch.enabled_hcalls));
5440 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
5444 kvm->arch.host_lpid = mfspr(SPRN_LPID);
5445 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
5456 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
5484 kvm->arch.radix = 1;
5485 kvm->arch.mmu_ready = 1;
5490 (kvm->arch.host_lpcr & LPCR_HAIL))
5494 kvmppc_free_lpid(kvm->arch.lpid);
5501 kvm->arch.lpcr = lpcr;
5504 kvm->arch.resize_hpt = NULL;
5514 kvm->arch.tlb_sets = 1;
5516 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
5518 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
5520 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
5522 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
5539 kvm->arch.smt_mode = threads_per_subcore;
5541 kvm->arch.smt_mode = 1;
5542 kvm->arch.emul_smt_mode = 1;
5560 kfree(kvm->arch.vcores[i]);
5561 kvm->arch.online_vcores = 0;
5575 kvmppc_free_hpt(&kvm->arch.hpt);
5581 kvm->arch.process_table = 0;
5582 if (kvm->arch.secure_guest)
5583 uv_svm_terminate(kvm->arch.lpid);
5584 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
5587 kvmppc_free_lpid(kvm->arch.lpid);
5628 kfree(kvm->arch.pimap);
5654 pimap = kvm->arch.pimap;
5662 kvm->arch.pimap = pimap;
5746 if (!kvm->arch.pimap)
5749 pimap = kvm->arch.pimap;
5959 mutex_lock(&kvm->arch.mmu_setup_lock);
5961 if (kvm->arch.mmu_ready) {
5962 kvm->arch.mmu_ready = 0;
5965 if (atomic_read(&kvm->arch.vcpus_running)) {
5966 kvm->arch.mmu_ready = 1;
5979 kvm->arch.process_table = cfg->process_table;
5987 mutex_unlock(&kvm->arch.mmu_setup_lock);
6002 kvm->arch.nested_enable = true;
6019 if (rc && vcpu->arch.nested)
6038 if (rc && vcpu->arch.nested)
6064 kvm->arch.svm_enabled = 1;
6084 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
6087 mutex_lock(&kvm->arch.mmu_setup_lock);
6088 mmu_was_ready = kvm->arch.mmu_ready;
6089 if (kvm->arch.mmu_ready) {
6090 kvm->arch.mmu_ready = 0;
6093 if (atomic_read(&kvm->arch.vcpus_running)) {
6094 kvm->arch.mmu_ready = 1;
6111 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
6116 ret = uv_svm_terminate(kvm->arch.lpid);
6133 spin_lock(&vcpu->arch.vpa_update_lock);
6134 unpin_vpa_reset(kvm, &vcpu->arch.dtl);
6135 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow);
6136 unpin_vpa_reset(kvm, &vcpu->arch.vpa);
6137 spin_unlock(&vcpu->arch.vpa_update_lock);
6141 kvm->arch.secure_guest = 0;
6142 kvm->arch.mmu_ready = mmu_was_ready;
6144 mutex_unlock(&kvm->arch.mmu_setup_lock);
6155 kvm->arch.dawr1_enabled = true;