Lines Matching refs:vcpu

52 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
66 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
68 ulong msr = kvmppc_get_msr(vcpu);
72 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
74 ulong msr = kvmppc_get_msr(vcpu);
75 ulong pc = kvmppc_get_pc(vcpu);
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
93 static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
96 ulong pc = kvmppc_get_pc(vcpu);
97 ulong lr = kvmppc_get_lr(vcpu);
99 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
101 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
106 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
110 kvmppc_unfixup_split_real(vcpu);
112 msr = kvmppc_get_msr(vcpu);
113 pc = kvmppc_get_pc(vcpu);
114 new_msr = vcpu->arch.intr_msr;
115 new_pc = to_book3s(vcpu)->hior + vec;
125 kvmppc_set_srr0(vcpu, pc);
126 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
127 kvmppc_set_pc(vcpu, new_pc);
128 kvmppc_set_msr(vcpu, new_msr);
131 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
134 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
135 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
136 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
146 vcpu->cpu = smp_processor_id();
148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
151 if (kvmppc_is_split_real(vcpu))
152 kvmppc_fixup_split_real(vcpu);
154 kvmppc_restore_tm_pr(vcpu);
157 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
160 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
162 kvmppc_copy_from_svcpu(vcpu);
164 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
165 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
169 if (kvmppc_is_split_real(vcpu))
170 kvmppc_unfixup_split_real(vcpu);
172 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
173 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
174 kvmppc_save_tm_pr(vcpu);
181 vcpu->cpu = -1;
184 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
185 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
187 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
193 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
194 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
195 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
196 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
197 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
198 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
199 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
200 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
201 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
202 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
203 svcpu->cr = vcpu->arch.regs.ccr;
204 svcpu->xer = vcpu->arch.regs.xer;
205 svcpu->ctr = vcpu->arch.regs.ctr;
206 svcpu->lr = vcpu->arch.regs.link;
207 svcpu->pc = vcpu->arch.regs.nip;
209 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
215 vcpu->arch.entry_tb = get_tb();
216 vcpu->arch.entry_vtb = get_vtb();
218 vcpu->arch.entry_ic = mfspr(SPRN_IC);
224 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
226 ulong guest_msr = kvmppc_get_msr(vcpu);
239 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
253 vcpu->arch.shadow_msr = smsr;
256 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
257 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
259 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
271 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
272 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
273 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
274 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
275 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
276 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
277 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
278 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
279 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
280 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
281 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
282 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
283 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
284 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
285 vcpu->arch.regs.ccr = svcpu->cr;
286 vcpu->arch.regs.xer = svcpu->xer;
287 vcpu->arch.regs.ctr = svcpu->ctr;
288 vcpu->arch.regs.link = svcpu->lr;
289 vcpu->arch.regs.nip = svcpu->pc;
290 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
291 vcpu->arch.fault_dar = svcpu->fault_dar;
292 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
293 vcpu->arch.last_inst = svcpu->last_inst;
295 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
300 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
301 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
302 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
304 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
318 old_msr = kvmppc_get_msr(vcpu);
320 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
323 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
324 kvmppc_set_msr_fast(vcpu, old_msr);
325 kvmppc_recalc_shadow_msr(vcpu);
336 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
339 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
340 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
341 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
345 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
348 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
349 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
350 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
357 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
360 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
373 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
376 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
378 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
379 kvmppc_save_tm_sprs(vcpu);
383 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
384 kvmppc_giveup_ext(vcpu, MSR_VSX);
387 _kvmppc_save_tm_pr(vcpu, mfmsr());
391 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
393 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
394 kvmppc_restore_tm_sprs(vcpu);
395 if (kvmppc_get_msr(vcpu) & MSR_TM) {
396 kvmppc_handle_lost_math_exts(vcpu);
397 if (vcpu->arch.fscr & FSCR_TAR)
398 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
404 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
407 if (kvmppc_get_msr(vcpu) & MSR_TM) {
408 kvmppc_handle_lost_math_exts(vcpu);
409 if (vcpu->arch.fscr & FSCR_TAR)
410 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
415 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
421 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
422 kvmppc_mmu_pte_flush(vcpu, 0, 0);
432 struct kvm_vcpu *vcpu;
452 kvm_for_each_vcpu(i, vcpu, kvm)
453 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
487 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
492 if (vcpu->arch.papr_enabled)
505 kvmppc_emulate_tabort(vcpu,
509 old_msr = kvmppc_get_msr(vcpu);
510 msr &= to_book3s(vcpu)->msr_mask;
511 kvmppc_set_msr_fast(vcpu, msr);
512 kvmppc_recalc_shadow_msr(vcpu);
515 if (!vcpu->arch.pending_exceptions) {
516 kvm_vcpu_block(vcpu);
517 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
518 vcpu->stat.halt_wakeup++;
522 kvmppc_set_msr_fast(vcpu, msr);
526 if (kvmppc_is_split_real(vcpu))
527 kvmppc_fixup_split_real(vcpu);
529 kvmppc_unfixup_split_real(vcpu);
531 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
533 kvmppc_mmu_flush_segments(vcpu);
534 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
537 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
538 struct kvm_vcpu_arch *a = &vcpu->arch;
541 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
543 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
555 if (vcpu->arch.magic_page_pa &&
558 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
563 if (kvmppc_get_msr(vcpu) & MSR_FP)
564 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
567 if (kvmppc_get_msr(vcpu) & MSR_TM)
568 kvmppc_handle_lost_math_exts(vcpu);
572 static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
576 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
577 vcpu->arch.pvr = pvr;
580 kvmppc_mmu_book3s_64_init(vcpu);
581 if (!to_book3s(vcpu)->hior_explicit)
582 to_book3s(vcpu)->hior = 0xfff00000;
583 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
584 vcpu->arch.cpu_type = KVM_CPU_3S_64;
588 kvmppc_mmu_book3s_32_init(vcpu);
589 if (!to_book3s(vcpu)->hior_explicit)
590 to_book3s(vcpu)->hior = 0;
591 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
592 vcpu->arch.cpu_type = KVM_CPU_3S_32;
595 kvmppc_sanity_check(vcpu);
599 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
600 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
602 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
607 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
624 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
631 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
646 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
661 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
668 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
688 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
690 ulong mp_pa = vcpu->arch.magic_page_pa;
692 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
700 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
703 static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
712 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
713 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
717 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
722 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
734 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
740 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
745 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
747 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
758 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
759 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
777 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
778 kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
780 kvmppc_core_queue_inst_storage(vcpu, flags);
784 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
785 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
786 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
787 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
793 kvmppc_mmu_unmap_page(vcpu, &pte);
796 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
798 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
802 vcpu->stat.sp_storage++;
803 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
804 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
805 kvmppc_patch_dcbz(vcpu, &pte);
808 vcpu->stat.mmio_exits++;
809 vcpu->arch.paddr_accessed = pte.raddr;
810 vcpu->arch.vaddr_accessed = pte.eaddr;
811 r = kvmppc_emulate_mmio(vcpu);
820 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
831 msr &= vcpu->arch.guest_owned_ext;
858 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
859 kvmppc_recalc_shadow_msr(vcpu);
863 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
866 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
873 vcpu->arch.tar = mfspr(SPRN_TAR);
875 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
882 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
888 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
891 if (!(kvmppc_get_msr(vcpu) & msr)) {
892 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
902 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
914 msr &= ~vcpu->arch.guest_owned_ext;
925 load_fp_state(&vcpu->arch.fp);
927 t->fp_save_area = &vcpu->arch.fp;
935 load_vr_state(&vcpu->arch.vr);
937 t->vr_save_area = &vcpu->arch.vr;
943 vcpu->arch.guest_owned_ext |= msr;
944 kvmppc_recalc_shadow_msr(vcpu);
953 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
957 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
964 load_fp_state(&vcpu->arch.fp);
972 load_vr_state(&vcpu->arch.vr);
982 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
985 vcpu->arch.fscr &= ~(0xffULL << 56);
986 vcpu->arch.fscr |= (fac << 56);
987 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
990 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
994 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
995 er = kvmppc_emulate_instruction(vcpu);
999 kvmppc_trigger_fac_interrupt(vcpu, fac);
1004 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
1016 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
1019 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
1028 kvmppc_trigger_fac_interrupt(vcpu, fac);
1036 mtspr(SPRN_TAR, vcpu->arch.tar);
1037 vcpu->arch.shadow_fscr |= FSCR_TAR;
1040 kvmppc_emulate_fac(vcpu, fac);
1052 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1059 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1061 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1063 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1064 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1065 vcpu->arch.fscr = fscr;
1066 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1070 vcpu->arch.fscr = fscr;
1074 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1076 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1077 u64 msr = kvmppc_get_msr(vcpu);
1079 kvmppc_set_msr(vcpu, msr | MSR_SE);
1083 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1085 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1086 u64 msr = kvmppc_get_msr(vcpu);
1088 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1092 static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1106 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1110 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1114 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1117 kvmppc_get_pc(vcpu), last_inst);
1120 kvmppc_core_queue_program(vcpu, flags);
1125 vcpu->stat.emulated_inst_exits++;
1126 er = kvmppc_emulate_instruction(vcpu);
1136 __func__, kvmppc_get_pc(vcpu), last_inst);
1137 kvmppc_core_queue_program(vcpu, flags);
1141 vcpu->run->exit_reason = KVM_EXIT_MMIO;
1154 int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1156 struct kvm_run *run = vcpu->run;
1160 vcpu->stat.sum_exits++;
1167 trace_kvm_exit(exit_nr, vcpu);
1173 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1174 vcpu->stat.pf_instruc++;
1176 if (kvmppc_is_split_real(vcpu))
1177 kvmppc_fixup_split_real(vcpu);
1186 svcpu = svcpu_get(vcpu);
1187 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1190 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1199 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1200 r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
1201 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1202 vcpu->stat.sp_instruc++;
1203 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1204 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1210 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1213 kvmppc_core_queue_inst_storage(vcpu,
1221 ulong dar = kvmppc_get_fault_dar(vcpu);
1222 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1223 vcpu->stat.pf_storage++;
1232 svcpu = svcpu_get(vcpu);
1236 kvmppc_mmu_map_segment(vcpu, dar);
1249 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1250 r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
1251 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1253 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
1259 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1260 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1261 kvmppc_book3s_queue_irqprio(vcpu,
1267 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1268 kvmppc_book3s_queue_irqprio(vcpu,
1278 vcpu->stat.dec_exits++;
1284 vcpu->stat.ext_intr_exits++;
1294 r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1302 if (vcpu->arch.papr_enabled) {
1304 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1306 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1312 if (vcpu->arch.papr_enabled &&
1314 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1316 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1320 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1328 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1332 vcpu->arch.hcall_needed = 1;
1334 } else if (vcpu->arch.osi_enabled &&
1335 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1336 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1343 gprs[i] = kvmppc_get_gpr(vcpu, i);
1344 vcpu->arch.osi_needed = 1;
1346 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1347 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1349 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1353 vcpu->stat.syscall_exits++;
1354 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1367 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1369 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1372 r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1394 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1400 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1406 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1407 dar = kvmppc_alignment_dar(vcpu, last_inst);
1409 kvmppc_set_dsisr(vcpu, dsisr);
1410 kvmppc_set_dar(vcpu, dar);
1412 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1419 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1423 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1427 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1431 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1437 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1440 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1458 s = kvmppc_prepare_to_enter(vcpu);
1466 kvmppc_handle_lost_ext(vcpu);
1469 trace_kvm_book3s_reenter(r, vcpu);
1474 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1477 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1480 sregs->pvr = vcpu->arch.pvr;
1482 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1483 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1485 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1486 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1490 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1501 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1504 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1507 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1511 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1513 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1514 vcpu->arch.mmu.slbia(vcpu);
1521 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1527 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1530 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1532 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1534 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1536 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1542 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1547 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1557 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1560 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1567 if (vcpu->arch.intr_msr & MSR_LE)
1574 *val = get_reg_val(id, vcpu->arch.tfhar);
1577 *val = get_reg_val(id, vcpu->arch.tfiar);
1580 *val = get_reg_val(id, vcpu->arch.texasr);
1584 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1593 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1596 val->vval = vcpu->arch.vr_tm.vr[i-32];
1603 *val = get_reg_val(id, vcpu->arch.cr_tm);
1606 *val = get_reg_val(id, vcpu->arch.xer_tm);
1609 *val = get_reg_val(id, vcpu->arch.lr_tm);
1612 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1615 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1618 *val = get_reg_val(id, vcpu->arch.amr_tm);
1621 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1624 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1628 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1633 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1636 *val = get_reg_val(id, vcpu->arch.tar_tm);
1647 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1650 vcpu->arch.intr_msr |= MSR_LE;
1652 vcpu->arch.intr_msr &= ~MSR_LE;
1655 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1662 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1663 to_book3s(vcpu)->hior_explicit = true;
1666 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1670 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1674 vcpu->arch.tfhar = set_reg_val(id, *val);
1677 vcpu->arch.tfiar = set_reg_val(id, *val);
1680 vcpu->arch.texasr = set_reg_val(id, *val);
1683 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1693 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1696 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1702 vcpu->arch.cr_tm = set_reg_val(id, *val);
1705 vcpu->arch.xer_tm = set_reg_val(id, *val);
1708 vcpu->arch.lr_tm = set_reg_val(id, *val);
1711 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1714 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1717 vcpu->arch.amr_tm = set_reg_val(id, *val);
1720 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1723 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1727 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1732 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1735 vcpu->arch.tar_tm = set_reg_val(id, *val);
1746 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
1757 vcpu->arch.book3s = vcpu_book3s;
1760 vcpu->arch.shadow_vcpu =
1761 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1762 if (!vcpu->arch.shadow_vcpu)
1769 vcpu->arch.shared = (void *)p;
1773 vcpu->arch.shared_big_endian = true;
1775 vcpu->arch.shared_big_endian = false;
1783 vcpu->arch.pvr = 0x3C0301;
1785 vcpu->arch.pvr = mfspr(SPRN_PVR);
1786 vcpu->arch.intr_msr = MSR_SF;
1789 vcpu->arch.pvr = 0x84202;
1790 vcpu->arch.intr_msr = 0;
1792 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1793 vcpu->arch.slb_nr = 64;
1795 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1797 err = kvmppc_mmu_init_pr(vcpu);
1804 free_page((unsigned long)vcpu->arch.shared);
1807 kfree(vcpu->arch.shadow_vcpu);
1815 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1817 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1819 kvmppc_mmu_destroy_pr(vcpu);
1820 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1822 kfree(vcpu->arch.shadow_vcpu);
1827 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
1831 /* Check if we can run the vcpu at all */
1832 if (!vcpu->arch.sane) {
1833 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1838 kvmppc_setup_debug(vcpu);
1846 ret = kvmppc_prepare_to_enter(vcpu);
1855 if (kvmppc_get_msr(vcpu) & MSR_FP)
1856 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1860 ret = __kvmppc_vcpu_run(vcpu);
1862 kvmppc_clear_debug(vcpu);
1868 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1871 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1874 vcpu->mode = OUTSIDE_GUEST_MODE;
1885 struct kvm_vcpu *vcpu;
1902 kvm_for_each_vcpu(n, vcpu, kvm)
1903 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1948 struct kvm_vcpu *vcpu;
1964 * support it, but unfortunately we don't have a vcpu easily
1965 * to hand here to test. Just pick the first vcpu, and if
1971 vcpu = kvm_get_vcpu(kvm, 0);
1972 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {