Lines Matching refs:vcpu
53 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
56 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
67 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
69 ulong msr = kvmppc_get_msr(vcpu);
73 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
75 ulong msr = kvmppc_get_msr(vcpu);
76 ulong pc = kvmppc_get_pc(vcpu);
83 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
90 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
91 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
94 static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
96 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
97 ulong pc = kvmppc_get_pc(vcpu);
98 ulong lr = kvmppc_get_lr(vcpu);
100 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
102 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
103 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
107 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
111 kvmppc_unfixup_split_real(vcpu);
113 msr = kvmppc_get_msr(vcpu);
114 pc = kvmppc_get_pc(vcpu);
115 new_msr = vcpu->arch.intr_msr;
116 new_pc = to_book3s(vcpu)->hior + vec;
126 kvmppc_set_srr0(vcpu, pc);
127 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
128 kvmppc_set_pc(vcpu, new_pc);
129 kvmppc_set_msr(vcpu, new_msr);
132 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
135 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
136 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
137 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
150 vcpu->cpu = smp_processor_id();
152 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
155 if (kvmppc_is_split_real(vcpu))
156 kvmppc_fixup_split_real(vcpu);
158 kvmppc_restore_tm_pr(vcpu);
161 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
164 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
166 kvmppc_copy_from_svcpu(vcpu);
168 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
169 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
181 if (kvmppc_is_split_real(vcpu))
182 kvmppc_unfixup_split_real(vcpu);
184 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
185 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
186 kvmppc_save_tm_pr(vcpu);
188 vcpu->cpu = -1;
191 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
192 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
194 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
196 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
197 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
198 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
199 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
200 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
201 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
202 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
203 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
204 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
205 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
206 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
207 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
208 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
209 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
210 svcpu->cr = vcpu->arch.regs.ccr;
211 svcpu->xer = vcpu->arch.regs.xer;
212 svcpu->ctr = vcpu->arch.regs.ctr;
213 svcpu->lr = vcpu->arch.regs.link;
214 svcpu->pc = vcpu->arch.regs.nip;
216 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
222 vcpu->arch.entry_tb = get_tb();
223 vcpu->arch.entry_vtb = get_vtb();
225 vcpu->arch.entry_ic = mfspr(SPRN_IC);
231 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
233 ulong guest_msr = kvmppc_get_msr(vcpu);
246 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
260 vcpu->arch.shadow_msr = smsr;
263 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
264 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
266 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
278 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
279 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
280 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
281 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
282 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
283 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
284 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
285 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
286 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
287 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
288 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
289 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
290 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
291 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
292 vcpu->arch.regs.ccr = svcpu->cr;
293 vcpu->arch.regs.xer = svcpu->xer;
294 vcpu->arch.regs.ctr = svcpu->ctr;
295 vcpu->arch.regs.link = svcpu->lr;
296 vcpu->arch.regs.nip = svcpu->pc;
297 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
298 vcpu->arch.fault_dar = svcpu->fault_dar;
299 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
300 vcpu->arch.last_inst = svcpu->last_inst;
302 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
307 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
308 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
309 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
311 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
325 old_msr = kvmppc_get_msr(vcpu);
327 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
330 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
331 kvmppc_set_msr_fast(vcpu, old_msr);
332 kvmppc_recalc_shadow_msr(vcpu);
343 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
346 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
347 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
348 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
352 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
355 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
356 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
357 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
364 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
367 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
380 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
383 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
385 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
386 kvmppc_save_tm_sprs(vcpu);
390 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
391 kvmppc_giveup_ext(vcpu, MSR_VSX);
394 _kvmppc_save_tm_pr(vcpu, mfmsr());
398 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
400 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
401 kvmppc_restore_tm_sprs(vcpu);
402 if (kvmppc_get_msr(vcpu) & MSR_TM) {
403 kvmppc_handle_lost_math_exts(vcpu);
404 if (vcpu->arch.fscr & FSCR_TAR)
405 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
411 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
414 if (kvmppc_get_msr(vcpu) & MSR_TM) {
415 kvmppc_handle_lost_math_exts(vcpu);
416 if (vcpu->arch.fscr & FSCR_TAR)
417 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
422 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
428 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
429 kvmppc_mmu_pte_flush(vcpu, 0, 0);
438 struct kvm_vcpu *vcpu;
440 kvm_for_each_vcpu(i, vcpu, kvm)
441 kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT,
472 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
477 if (vcpu->arch.papr_enabled)
490 kvmppc_emulate_tabort(vcpu,
494 old_msr = kvmppc_get_msr(vcpu);
495 msr &= to_book3s(vcpu)->msr_mask;
496 kvmppc_set_msr_fast(vcpu, msr);
497 kvmppc_recalc_shadow_msr(vcpu);
500 if (!vcpu->arch.pending_exceptions) {
501 kvm_vcpu_halt(vcpu);
502 vcpu->stat.generic.halt_wakeup++;
506 kvmppc_set_msr_fast(vcpu, msr);
510 if (kvmppc_is_split_real(vcpu))
511 kvmppc_fixup_split_real(vcpu);
513 kvmppc_unfixup_split_real(vcpu);
515 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
517 kvmppc_mmu_flush_segments(vcpu);
518 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
521 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
522 struct kvm_vcpu_arch *a = &vcpu->arch;
525 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
527 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
539 if (vcpu->arch.magic_page_pa &&
542 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
547 if (kvmppc_get_msr(vcpu) & MSR_FP)
548 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
551 if (kvmppc_get_msr(vcpu) & MSR_TM)
552 kvmppc_handle_lost_math_exts(vcpu);
556 static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
560 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
561 vcpu->arch.pvr = pvr;
564 kvmppc_mmu_book3s_64_init(vcpu);
565 if (!to_book3s(vcpu)->hior_explicit)
566 to_book3s(vcpu)->hior = 0xfff00000;
567 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
568 vcpu->arch.cpu_type = KVM_CPU_3S_64;
572 kvmppc_mmu_book3s_32_init(vcpu);
573 if (!to_book3s(vcpu)->hior_explicit)
574 to_book3s(vcpu)->hior = 0;
575 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
576 vcpu->arch.cpu_type = KVM_CPU_3S_32;
579 kvmppc_sanity_check(vcpu);
583 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
584 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
586 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
591 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
608 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
615 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
630 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
645 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
652 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
672 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
674 ulong mp_pa = vcpu->arch.magic_page_pa;
676 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
684 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
687 static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
696 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
697 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
701 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
706 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
718 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
724 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
729 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
731 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
742 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
743 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
761 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
762 kvmppc_core_queue_data_storage(vcpu, 0, eaddr, flags);
764 kvmppc_core_queue_inst_storage(vcpu, flags);
768 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
769 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
770 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
771 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
777 kvmppc_mmu_unmap_page(vcpu, &pte);
780 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
782 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
786 vcpu->stat.sp_storage++;
787 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
788 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
789 kvmppc_patch_dcbz(vcpu, &pte);
792 vcpu->stat.mmio_exits++;
793 vcpu->arch.paddr_accessed = pte.raddr;
794 vcpu->arch.vaddr_accessed = pte.eaddr;
795 r = kvmppc_emulate_mmio(vcpu);
804 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
815 msr &= vcpu->arch.guest_owned_ext;
842 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
843 kvmppc_recalc_shadow_msr(vcpu);
847 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
850 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
857 vcpu->arch.tar = mfspr(SPRN_TAR);
859 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
866 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
872 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
875 if (!(kvmppc_get_msr(vcpu) & msr)) {
876 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
886 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
898 msr &= ~vcpu->arch.guest_owned_ext;
909 load_fp_state(&vcpu->arch.fp);
911 t->fp_save_area = &vcpu->arch.fp;
919 load_vr_state(&vcpu->arch.vr);
921 t->vr_save_area = &vcpu->arch.vr;
927 vcpu->arch.guest_owned_ext |= msr;
928 kvmppc_recalc_shadow_msr(vcpu);
937 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
941 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
948 load_fp_state(&vcpu->arch.fp);
956 load_vr_state(&vcpu->arch.vr);
966 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
969 vcpu->arch.fscr &= ~(0xffULL << 56);
970 vcpu->arch.fscr |= (fac << 56);
971 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
974 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
978 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
979 er = kvmppc_emulate_instruction(vcpu);
983 kvmppc_trigger_fac_interrupt(vcpu, fac);
988 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
1000 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
1003 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
1012 kvmppc_trigger_fac_interrupt(vcpu, fac);
1020 mtspr(SPRN_TAR, vcpu->arch.tar);
1021 vcpu->arch.shadow_fscr |= FSCR_TAR;
1024 kvmppc_emulate_fac(vcpu, fac);
1036 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1043 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1049 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1051 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1052 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1053 vcpu->arch.fscr = fscr;
1054 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1058 vcpu->arch.fscr = fscr;
1062 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1064 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1065 u64 msr = kvmppc_get_msr(vcpu);
1067 kvmppc_set_msr(vcpu, msr | MSR_SE);
1071 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1073 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1074 u64 msr = kvmppc_get_msr(vcpu);
1076 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1080 static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1094 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1098 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1102 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1105 kvmppc_get_pc(vcpu), ppc_inst_val(last_inst));
1108 kvmppc_core_queue_program(vcpu, flags);
1113 vcpu->stat.emulated_inst_exits++;
1114 er = kvmppc_emulate_instruction(vcpu);
1124 __func__, kvmppc_get_pc(vcpu), ppc_inst_val(last_inst));
1125 kvmppc_core_queue_program(vcpu, flags);
1129 vcpu->run->exit_reason = KVM_EXIT_MMIO;
1142 int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1144 struct kvm_run *run = vcpu->run;
1148 vcpu->stat.sum_exits++;
1155 trace_kvm_exit(exit_nr, vcpu);
1161 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1162 vcpu->stat.pf_instruc++;
1164 if (kvmppc_is_split_real(vcpu))
1165 kvmppc_fixup_split_real(vcpu);
1174 svcpu = svcpu_get(vcpu);
1175 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
1178 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1187 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1188 r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
1189 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1190 vcpu->stat.sp_instruc++;
1191 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1192 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1198 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1201 kvmppc_core_queue_inst_storage(vcpu,
1209 ulong dar = kvmppc_get_fault_dar(vcpu);
1210 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1211 vcpu->stat.pf_storage++;
1220 svcpu = svcpu_get(vcpu);
1224 kvmppc_mmu_map_segment(vcpu, dar);
1237 int idx = srcu_read_lock(&vcpu->kvm->srcu);
1238 r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
1239 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1241 kvmppc_core_queue_data_storage(vcpu, 0, dar, fault_dsisr);
1247 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
1248 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
1249 kvmppc_book3s_queue_irqprio(vcpu,
1255 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1256 kvmppc_book3s_queue_irqprio(vcpu,
1266 vcpu->stat.dec_exits++;
1272 vcpu->stat.ext_intr_exits++;
1282 r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1290 if (vcpu->arch.papr_enabled) {
1292 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1294 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1300 if (vcpu->arch.papr_enabled &&
1302 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
1304 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1308 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1316 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1320 vcpu->arch.hcall_needed = 1;
1322 } else if (vcpu->arch.osi_enabled &&
1323 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1324 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1331 gprs[i] = kvmppc_get_gpr(vcpu, i);
1332 vcpu->arch.osi_needed = 1;
1334 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
1335 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1337 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1341 vcpu->stat.syscall_exits++;
1342 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1355 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1357 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1360 r = kvmppc_exit_pr_progint(vcpu, exit_nr);
1382 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1388 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1394 dsisr = kvmppc_alignment_dsisr(vcpu, ppc_inst_val(last_inst));
1395 dar = kvmppc_alignment_dar(vcpu, ppc_inst_val(last_inst));
1397 kvmppc_set_dsisr(vcpu, dsisr);
1398 kvmppc_set_dar(vcpu, dar);
1400 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1407 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1411 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1415 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1419 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1425 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1428 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
1446 s = kvmppc_prepare_to_enter(vcpu);
1454 kvmppc_handle_lost_ext(vcpu);
1457 trace_kvm_book3s_reenter(r, vcpu);
1462 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1465 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1468 sregs->pvr = vcpu->arch.pvr;
1470 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1471 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1473 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1474 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1478 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
1489 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1492 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1495 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
1499 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1501 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1502 vcpu->arch.mmu.slbia(vcpu);
1509 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1515 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1518 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1520 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1522 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1524 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1530 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1535 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1545 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1548 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1555 if (vcpu->arch.intr_msr & MSR_LE)
1562 *val = get_reg_val(id, vcpu->arch.tfhar);
1565 *val = get_reg_val(id, vcpu->arch.tfiar);
1568 *val = get_reg_val(id, vcpu->arch.texasr);
1572 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1581 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1584 val->vval = vcpu->arch.vr_tm.vr[i-32];
1591 *val = get_reg_val(id, vcpu->arch.cr_tm);
1594 *val = get_reg_val(id, vcpu->arch.xer_tm);
1597 *val = get_reg_val(id, vcpu->arch.lr_tm);
1600 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1603 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1606 *val = get_reg_val(id, vcpu->arch.amr_tm);
1609 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1612 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1616 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1621 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1624 *val = get_reg_val(id, vcpu->arch.tar_tm);
1635 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1638 vcpu->arch.intr_msr |= MSR_LE;
1640 vcpu->arch.intr_msr &= ~MSR_LE;
1643 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1650 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1651 to_book3s(vcpu)->hior_explicit = true;
1654 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1658 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1662 vcpu->arch.tfhar = set_reg_val(id, *val);
1665 vcpu->arch.tfiar = set_reg_val(id, *val);
1668 vcpu->arch.texasr = set_reg_val(id, *val);
1671 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1681 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1684 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1690 vcpu->arch.cr_tm = set_reg_val(id, *val);
1693 vcpu->arch.xer_tm = set_reg_val(id, *val);
1696 vcpu->arch.lr_tm = set_reg_val(id, *val);
1699 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1702 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1705 vcpu->arch.amr_tm = set_reg_val(id, *val);
1708 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1711 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1715 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1720 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1723 vcpu->arch.tar_tm = set_reg_val(id, *val);
1734 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
1745 vcpu->arch.book3s = vcpu_book3s;
1748 vcpu->arch.shadow_vcpu =
1749 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1750 if (!vcpu->arch.shadow_vcpu)
1757 vcpu->arch.shared = (void *)p;
1761 vcpu->arch.shared_big_endian = true;
1763 vcpu->arch.shared_big_endian = false;
1771 vcpu->arch.pvr = 0x3C0301;
1773 vcpu->arch.pvr = mfspr(SPRN_PVR);
1774 vcpu->arch.intr_msr = MSR_SF;
1777 vcpu->arch.pvr = 0x84202;
1778 vcpu->arch.intr_msr = 0;
1780 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1781 vcpu->arch.slb_nr = 64;
1783 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1785 err = kvmppc_mmu_init_pr(vcpu);
1792 free_page((unsigned long)vcpu->arch.shared);
1795 kfree(vcpu->arch.shadow_vcpu);
1803 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1805 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1807 kvmppc_mmu_destroy_pr(vcpu);
1808 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1810 kfree(vcpu->arch.shadow_vcpu);
1815 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
1819 /* Check if we can run the vcpu at all */
1820 if (!vcpu->arch.sane) {
1821 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1826 kvmppc_setup_debug(vcpu);
1834 ret = kvmppc_prepare_to_enter(vcpu);
1843 if (kvmppc_get_msr(vcpu) & MSR_FP)
1844 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1848 ret = __kvmppc_vcpu_run(vcpu);
1850 kvmppc_clear_debug(vcpu);
1856 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1859 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1863 vcpu->mode = OUTSIDE_GUEST_MODE;
1874 struct kvm_vcpu *vcpu;
1891 kvm_for_each_vcpu(n, vcpu, kvm)
1892 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1936 struct kvm_vcpu *vcpu;
1952 * support it, but unfortunately we don't have a vcpu easily
1953 * to hand here to test. Just pick the first vcpu, and if
1959 vcpu = kvm_get_vcpu(kvm, 0);
1960 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {