Lines Matching refs:vcpu

64 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
68 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
69 vcpu->arch.shared->msr);
70 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
71 vcpu->arch.regs.ctr);
72 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
73 vcpu->arch.shared->srr1);
75 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
79 kvmppc_get_gpr(vcpu, i),
80 kvmppc_get_gpr(vcpu, i+1),
81 kvmppc_get_gpr(vcpu, i+2),
82 kvmppc_get_gpr(vcpu, i+3));
87 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
91 kvmppc_save_guest_spe(vcpu);
93 vcpu->arch.shadow_msr &= ~MSR_SPE;
97 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
101 kvmppc_load_guest_spe(vcpu);
103 vcpu->arch.shadow_msr |= MSR_SPE;
107 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109 if (vcpu->arch.shared->msr & MSR_SPE) {
110 if (!(vcpu->arch.shadow_msr & MSR_SPE))
111 kvmppc_vcpu_enable_spe(vcpu);
112 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
113 kvmppc_vcpu_disable_spe(vcpu);
117 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
123 * Load up guest vcpu FP state if it's needed.
126 * guest vcpu FP state if other threads require to use FPU.
131 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
136 load_fp_state(&vcpu->arch.fp);
138 current->thread.fp_save_area = &vcpu->arch.fp;
145 * Save guest vcpu FP state into thread.
148 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
157 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
162 vcpu->arch.shadow_msr &= ~MSR_FP;
163 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
172 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
178 load_vr_state(&vcpu->arch.vr);
180 current->thread.vr_save_area = &vcpu->arch.vr;
188 * Save guest vcpu AltiVec state into thread.
191 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
202 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
206 vcpu->arch.shadow_msr &= ~MSR_DE;
207 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
211 if (vcpu->guest_debug) {
217 vcpu->arch.shared->msr |= MSR_DE;
219 vcpu->arch.shadow_msr |= MSR_DE;
220 vcpu->arch.shared->msr &= ~MSR_DE;
229 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
231 u32 old_msr = vcpu->arch.shared->msr;
237 vcpu->arch.shared->msr = new_msr;
239 kvmppc_mmu_msr_notify(vcpu, old_msr);
240 kvmppc_vcpu_sync_spe(vcpu);
241 kvmppc_vcpu_sync_fpu(vcpu);
242 kvmppc_vcpu_sync_debug(vcpu);
245 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
248 trace_kvm_booke_queue_irqprio(vcpu, priority);
249 set_bit(priority, &vcpu->arch.pending_exceptions);
252 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
255 vcpu->arch.queued_dear = dear_flags;
256 vcpu->arch.queued_esr = esr_flags;
257 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
260 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
263 vcpu->arch.queued_dear = dear_flags;
264 vcpu->arch.queued_esr = esr_flags;
265 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
268 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
270 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
273 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
275 vcpu->arch.queued_esr = esr_flags;
276 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
279 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
282 vcpu->arch.queued_dear = dear_flags;
283 vcpu->arch.queued_esr = esr_flags;
284 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
287 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
289 vcpu->arch.queued_esr = esr_flags;
290 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
293 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
295 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
299 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
301 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
305 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
307 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
310 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
312 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
315 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
317 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
320 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
328 kvmppc_booke_queue_irqprio(vcpu, prio);
331 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
333 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
334 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
337 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
339 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
342 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
344 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
347 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
349 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
352 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
354 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
357 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
359 kvmppc_set_srr0(vcpu, srr0);
360 kvmppc_set_srr1(vcpu, srr1);
363 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
365 vcpu->arch.csrr0 = srr0;
366 vcpu->arch.csrr1 = srr1;
369 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
372 vcpu->arch.dsrr0 = srr0;
373 vcpu->arch.dsrr1 = srr1;
375 set_guest_csrr(vcpu, srr0, srr1);
379 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
381 vcpu->arch.mcsrr0 = srr0;
382 vcpu->arch.mcsrr1 = srr1;
386 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
392 ulong crit_raw = vcpu->arch.shared->critical;
393 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
397 ulong new_msr = vcpu->arch.shared->msr;
400 if (!(vcpu->arch.shared->msr & MSR_SF)) {
408 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
415 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
448 allowed = vcpu->arch.shared->msr & MSR_CE;
454 allowed = vcpu->arch.shared->msr & MSR_ME;
464 allowed = vcpu->arch.shared->msr & MSR_EE;
470 allowed = vcpu->arch.shared->msr & MSR_DE;
484 set_guest_srr(vcpu, vcpu->arch.regs.nip,
485 vcpu->arch.shared->msr);
488 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
489 vcpu->arch.shared->msr);
492 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
493 vcpu->arch.shared->msr);
496 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
497 vcpu->arch.shared->msr);
501 vcpu->arch.regs.nip = vcpu->arch.ivpr |
502 vcpu->arch.ivor[priority];
504 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
506 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
508 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
509 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
510 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
511 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
512 kvmppc_mpic_set_epr(vcpu);
518 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
521 kvmppc_set_msr(vcpu, new_msr);
524 clear_bit(priority, &vcpu->arch.pending_exceptions);
533 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
534 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
535 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
536 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
537 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
538 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
549 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
553 u32 period = TCR_GET_WP(vcpu->arch.tcr);
575 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
584 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
585 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
587 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
588 nr_jiffies = watchdog_next_timeout(vcpu);
594 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
596 del_timer(&vcpu->arch.wdt_timer);
597 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
602 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
607 new_tsr = tsr = vcpu->arch.tsr;
619 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
623 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
624 kvm_vcpu_kick(vcpu);
631 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
632 vcpu->arch.watchdog_enabled) {
634 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
635 kvm_vcpu_kick(vcpu);
645 arm_next_watchdog(vcpu);
648 static void update_timer_ints(struct kvm_vcpu *vcpu)
650 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
651 kvmppc_core_queue_dec(vcpu);
653 kvmppc_core_dequeue_dec(vcpu);
655 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
656 kvmppc_core_queue_watchdog(vcpu);
658 kvmppc_core_dequeue_watchdog(vcpu);
661 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
663 unsigned long *pending = &vcpu->arch.pending_exceptions;
668 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
677 vcpu->arch.shared->int_pending = !!*pending;
681 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
686 kvmppc_core_check_exceptions(vcpu);
688 if (kvm_request_pending(vcpu)) {
693 if (vcpu->arch.shared->msr & MSR_WE) {
695 kvm_vcpu_block(vcpu);
696 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
699 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
706 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
710 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
711 update_timer_ints(vcpu);
713 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
714 kvmppc_core_flush_tlb(vcpu);
717 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
718 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
722 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
723 vcpu->run->epr.epr = 0;
724 vcpu->arch.epr_needed = true;
725 vcpu->run->exit_reason = KVM_EXIT_EPR;
732 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
737 if (!vcpu->arch.sane) {
738 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
742 s = kvmppc_prepare_to_enter(vcpu);
757 kvmppc_load_guest_fp(vcpu);
768 kvmppc_load_guest_altivec(vcpu);
772 debug = vcpu->arch.dbg_reg;
775 current->thread.debug = vcpu->arch.dbg_reg;
777 vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
780 ret = __kvmppc_vcpu_run(vcpu);
790 kvmppc_save_guest_fp(vcpu);
794 kvmppc_save_guest_altivec(vcpu);
798 vcpu->mode = OUTSIDE_GUEST_MODE;
802 static int emulation_exit(struct kvm_vcpu *vcpu)
806 er = kvmppc_emulate_instruction(vcpu);
810 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
820 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
823 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
824 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
825 kvmppc_core_queue_program(vcpu, ESR_PIL);
836 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
838 struct kvm_run *run = vcpu->run;
839 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
840 u32 dbsr = vcpu->arch.dbsr;
842 if (vcpu->guest_debug == 0) {
853 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
854 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
855 kvmppc_core_queue_debug(vcpu);
858 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
859 kvmppc_core_queue_program(vcpu, ESR_PTR);
866 * Clear guest dbsr (vcpu->arch.dbsr)
868 vcpu->arch.dbsr = 0;
870 run->debug.arch.address = vcpu->arch.regs.nip;
910 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
951 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
957 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
966 __func__, vcpu->arch.regs.nip);
969 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
970 vcpu->run->hw.hardware_exit_reason |= last_inst;
971 kvmppc_core_queue_program(vcpu, ESR_PIL);
984 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
986 struct kvm_run *run = vcpu->run;
994 kvmppc_update_timing_stats(vcpu);
997 kvmppc_restart_interrupt(vcpu, exit_nr);
1007 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1011 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1012 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1018 trace_kvm_exit(exit_nr, vcpu);
1041 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1048 kvmppc_dump_vcpu(vcpu);
1056 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1061 kvmppc_account_exit(vcpu, DEC_EXITS);
1070 kvmppc_account_exit(vcpu, DBELL_EXITS);
1075 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1086 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1101 r = emulation_exit(vcpu);
1105 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1111 r = kvmppc_handle_debug(vcpu);
1113 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1117 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1126 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1128 kvmppc_account_exit(vcpu, USR_PR_INST);
1132 r = emulation_exit(vcpu);
1136 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1137 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1143 if (vcpu->arch.shared->msr & MSR_SPE)
1144 kvmppc_vcpu_enable_spe(vcpu);
1146 kvmppc_booke_queue_irqprio(vcpu,
1153 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1158 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1167 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1178 __func__, exit_nr, vcpu->arch.regs.nip);
1190 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1195 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1201 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1202 vcpu->arch.fault_esr);
1203 kvmppc_account_exit(vcpu, DSI_EXITS);
1208 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1209 kvmppc_account_exit(vcpu, ISI_EXITS);
1214 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1215 vcpu->arch.fault_esr);
1221 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1222 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1228 kvmppc_core_queue_program(vcpu, ESR_PPR);
1235 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1236 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1238 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1242 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1244 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1250 unsigned long eaddr = vcpu->arch.fault_dear;
1256 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1257 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1258 kvmppc_map_magic(vcpu);
1259 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1267 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1270 kvmppc_core_queue_dtlb_miss(vcpu,
1271 vcpu->arch.fault_dear,
1272 vcpu->arch.fault_esr);
1273 kvmppc_mmu_dtlb_miss(vcpu);
1274 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1279 idx = srcu_read_lock(&vcpu->kvm->srcu);
1281 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1284 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1291 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1292 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1297 vcpu->arch.paddr_accessed = gpaddr;
1298 vcpu->arch.vaddr_accessed = eaddr;
1299 r = kvmppc_emulate_mmio(vcpu);
1300 kvmppc_account_exit(vcpu, MMIO_EXITS);
1303 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1308 unsigned long eaddr = vcpu->arch.regs.nip;
1316 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1319 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1320 kvmppc_mmu_itlb_miss(vcpu);
1321 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1325 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1327 idx = srcu_read_lock(&vcpu->kvm->srcu);
1329 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1332 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1339 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1342 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1345 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1350 r = kvmppc_handle_debug(vcpu);
1353 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1368 s = kvmppc_prepare_to_enter(vcpu);
1374 kvmppc_load_guest_fp(vcpu);
1375 kvmppc_load_guest_altivec(vcpu);
1382 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1384 u32 old_tsr = vcpu->arch.tsr;
1386 vcpu->arch.tsr = new_tsr;
1388 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1389 arm_next_watchdog(vcpu);
1391 update_timer_ints(vcpu);
1394 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1397 spin_lock_init(&vcpu->arch.wdt_lock);
1398 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1408 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1410 del_timer_sync(&vcpu->arch.wdt_timer);
1413 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1417 vcpu_load(vcpu);
1419 regs->pc = vcpu->arch.regs.nip;
1420 regs->cr = kvmppc_get_cr(vcpu);
1421 regs->ctr = vcpu->arch.regs.ctr;
1422 regs->lr = vcpu->arch.regs.link;
1423 regs->xer = kvmppc_get_xer(vcpu);
1424 regs->msr = vcpu->arch.shared->msr;
1425 regs->srr0 = kvmppc_get_srr0(vcpu);
1426 regs->srr1 = kvmppc_get_srr1(vcpu);
1427 regs->pid = vcpu->arch.pid;
1428 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1429 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1430 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1431 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1432 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1433 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1434 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1435 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1438 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1440 vcpu_put(vcpu);
1444 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1448 vcpu_load(vcpu);
1450 vcpu->arch.regs.nip = regs->pc;
1451 kvmppc_set_cr(vcpu, regs->cr);
1452 vcpu->arch.regs.ctr = regs->ctr;
1453 vcpu->arch.regs.link = regs->lr;
1454 kvmppc_set_xer(vcpu, regs->xer);
1455 kvmppc_set_msr(vcpu, regs->msr);
1456 kvmppc_set_srr0(vcpu, regs->srr0);
1457 kvmppc_set_srr1(vcpu, regs->srr1);
1458 kvmppc_set_pid(vcpu, regs->pid);
1459 kvmppc_set_sprg0(vcpu, regs->sprg0);
1460 kvmppc_set_sprg1(vcpu, regs->sprg1);
1461 kvmppc_set_sprg2(vcpu, regs->sprg2);
1462 kvmppc_set_sprg3(vcpu, regs->sprg3);
1463 kvmppc_set_sprg4(vcpu, regs->sprg4);
1464 kvmppc_set_sprg5(vcpu, regs->sprg5);
1465 kvmppc_set_sprg6(vcpu, regs->sprg6);
1466 kvmppc_set_sprg7(vcpu, regs->sprg7);
1469 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1471 vcpu_put(vcpu);
1475 static void get_sregs_base(struct kvm_vcpu *vcpu,
1482 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1483 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1484 sregs->u.e.mcsr = vcpu->arch.mcsr;
1485 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1486 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1487 sregs->u.e.tsr = vcpu->arch.tsr;
1488 sregs->u.e.tcr = vcpu->arch.tcr;
1489 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1491 sregs->u.e.vrsave = vcpu->arch.vrsave;
1494 static int set_sregs_base(struct kvm_vcpu *vcpu,
1500 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1501 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1502 vcpu->arch.mcsr = sregs->u.e.mcsr;
1503 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1504 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1505 vcpu->arch.vrsave = sregs->u.e.vrsave;
1506 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1509 vcpu->arch.dec = sregs->u.e.dec;
1510 kvmppc_emulate_dec(vcpu);
1514 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1519 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1524 sregs->u.e.pir = vcpu->vcpu_id;
1525 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1526 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1527 sregs->u.e.decar = vcpu->arch.decar;
1528 sregs->u.e.ivpr = vcpu->arch.ivpr;
1531 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1537 if (sregs->u.e.pir != vcpu->vcpu_id)
1540 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1541 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1542 vcpu->arch.decar = sregs->u.e.decar;
1543 vcpu->arch.ivpr = sregs->u.e.ivpr;
1548 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1552 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1553 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1554 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1555 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1556 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1557 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1558 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1559 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1560 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1561 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1562 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1563 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1564 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1565 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1566 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1567 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1571 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1576 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1577 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1578 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1579 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1580 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1581 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1582 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1583 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1584 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1585 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1586 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1587 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1588 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1589 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1590 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1591 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1596 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1601 vcpu_load(vcpu);
1603 sregs->pvr = vcpu->arch.pvr;
1605 get_sregs_base(vcpu, sregs);
1606 get_sregs_arch206(vcpu, sregs);
1607 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1609 vcpu_put(vcpu);
1613 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1618 vcpu_load(vcpu);
1619 if (vcpu->arch.pvr != sregs->pvr)
1622 ret = set_sregs_base(vcpu, sregs);
1626 ret = set_sregs_arch206(vcpu, sregs);
1630 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1633 vcpu_put(vcpu);
1637 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1644 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1647 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1651 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1654 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1658 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1661 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1664 u32 epr = kvmppc_get_epr(vcpu);
1670 *val = get_reg_val(id, vcpu->arch.epcr);
1674 *val = get_reg_val(id, vcpu->arch.tcr);
1677 *val = get_reg_val(id, vcpu->arch.tsr);
1683 *val = get_reg_val(id, vcpu->arch.vrsave);
1686 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1693 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1700 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1703 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1707 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1710 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1714 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1717 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1721 kvmppc_set_epr(vcpu, new_epr);
1727 kvmppc_set_epcr(vcpu, new_epcr);
1733 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1738 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1743 kvmppc_set_tsr(vcpu, tsr);
1748 kvmppc_set_tcr(vcpu, tcr);
1752 vcpu->arch.vrsave = set_reg_val(id, *val);
1755 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1762 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1767 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1772 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1777 vcpu_load(vcpu);
1778 r = kvmppc_core_vcpu_translate(vcpu, tr);
1779 vcpu_put(vcpu);
1817 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1820 vcpu->arch.epcr = new_epcr;
1822 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1823 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1824 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1829 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1831 vcpu->arch.tcr = new_tcr;
1832 arm_next_watchdog(vcpu);
1833 update_timer_ints(vcpu);
1836 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1838 set_bits(tsr_bits, &vcpu->arch.tsr);
1840 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1841 kvm_vcpu_kick(vcpu);
1844 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1846 clear_bits(tsr_bits, &vcpu->arch.tsr);
1853 arm_next_watchdog(vcpu);
1855 update_timer_ints(vcpu);
1858 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1860 if (vcpu->arch.tcr & TCR_ARE) {
1861 vcpu->arch.dec = vcpu->arch.decar;
1862 kvmppc_emulate_dec(vcpu);
1865 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1923 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1930 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1932 vcpu->arch.shadow_msrp |= MSRP_DEP;
1934 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1937 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1939 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1941 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1946 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1953 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1954 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1956 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1970 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1973 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1983 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1997 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2004 vcpu_load(vcpu);
2007 vcpu->arch.dbg_reg.dbcr0 = 0;
2008 vcpu->guest_debug = 0;
2009 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2013 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2014 vcpu->guest_debug = dbg->control;
2015 vcpu->arch.dbg_reg.dbcr0 = 0;
2017 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2018 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2021 dbg_reg = &(vcpu->arch.dbg_reg);
2041 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2071 vcpu_put(vcpu);
2075 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2077 vcpu->cpu = smp_processor_id();
2078 current->thread.kvm_vcpu = vcpu;
2081 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2084 vcpu->cpu = -1;
2095 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2100 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2105 vcpu->arch.regs.nip = 0;
2106 vcpu->arch.shared->pir = vcpu->vcpu_id;
2107 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2108 kvmppc_set_msr(vcpu, 0);
2111 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2112 vcpu->arch.shadow_pid = 1;
2113 vcpu->arch.shared->msr = 0;
2118 vcpu->arch.ivpr = 0x55550000;
2120 vcpu->arch.ivor[i] = 0x7700 | i * 4;
2122 kvmppc_init_timing_stats(vcpu);
2124 r = kvmppc_core_vcpu_setup(vcpu);
2126 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2127 kvmppc_sanity_check(vcpu);
2131 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2133 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2141 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2143 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2146 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2148 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);