Lines Matching refs:vcpu
90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
95 vcpu->arch.shared->msr);
96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
97 vcpu->arch.regs.ctr);
98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
99 vcpu->arch.shared->srr1);
101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
105 kvmppc_get_gpr(vcpu, i),
106 kvmppc_get_gpr(vcpu, i+1),
107 kvmppc_get_gpr(vcpu, i+2),
108 kvmppc_get_gpr(vcpu, i+3));
113 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
117 kvmppc_save_guest_spe(vcpu);
119 vcpu->arch.shadow_msr &= ~MSR_SPE;
123 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
127 kvmppc_load_guest_spe(vcpu);
129 vcpu->arch.shadow_msr |= MSR_SPE;
133 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
135 if (vcpu->arch.shared->msr & MSR_SPE) {
136 if (!(vcpu->arch.shadow_msr & MSR_SPE))
137 kvmppc_vcpu_enable_spe(vcpu);
138 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
139 kvmppc_vcpu_disable_spe(vcpu);
143 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
149 * Load up guest vcpu FP state if it's needed.
152 * guest vcpu FP state if other threads require to use FPU.
157 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
162 load_fp_state(&vcpu->arch.fp);
164 current->thread.fp_save_area = &vcpu->arch.fp;
171 * Save guest vcpu FP state into thread.
174 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
183 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
188 vcpu->arch.shadow_msr &= ~MSR_FP;
189 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
198 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
204 load_vr_state(&vcpu->arch.vr);
206 current->thread.vr_save_area = &vcpu->arch.vr;
214 * Save guest vcpu AltiVec state into thread.
217 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
228 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
232 vcpu->arch.shadow_msr &= ~MSR_DE;
233 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
237 if (vcpu->guest_debug) {
243 vcpu->arch.shared->msr |= MSR_DE;
245 vcpu->arch.shadow_msr |= MSR_DE;
246 vcpu->arch.shared->msr &= ~MSR_DE;
255 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
257 u32 old_msr = vcpu->arch.shared->msr;
263 vcpu->arch.shared->msr = new_msr;
265 kvmppc_mmu_msr_notify(vcpu, old_msr);
266 kvmppc_vcpu_sync_spe(vcpu);
267 kvmppc_vcpu_sync_fpu(vcpu);
268 kvmppc_vcpu_sync_debug(vcpu);
271 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
274 trace_kvm_booke_queue_irqprio(vcpu, priority);
275 set_bit(priority, &vcpu->arch.pending_exceptions);
278 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
281 vcpu->arch.queued_dear = dear_flags;
282 vcpu->arch.queued_esr = esr_flags;
283 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
286 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
290 vcpu->arch.queued_dear = dear_flags;
291 vcpu->arch.queued_esr = esr_flags;
292 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
295 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
297 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
300 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
302 vcpu->arch.queued_esr = esr_flags;
303 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
306 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
309 vcpu->arch.queued_dear = dear_flags;
310 vcpu->arch.queued_esr = esr_flags;
311 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
314 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
316 vcpu->arch.queued_esr = esr_flags;
317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
320 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
323 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
327 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
330 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
334 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
336 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
339 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
341 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
344 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
346 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
349 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
357 kvmppc_booke_queue_irqprio(vcpu, prio);
360 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
362 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
363 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
366 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
368 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
371 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
373 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
376 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
378 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
381 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
383 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
386 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
388 kvmppc_set_srr0(vcpu, srr0);
389 kvmppc_set_srr1(vcpu, srr1);
392 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
394 vcpu->arch.csrr0 = srr0;
395 vcpu->arch.csrr1 = srr1;
398 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
401 vcpu->arch.dsrr0 = srr0;
402 vcpu->arch.dsrr1 = srr1;
404 set_guest_csrr(vcpu, srr0, srr1);
408 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
410 vcpu->arch.mcsrr0 = srr0;
411 vcpu->arch.mcsrr1 = srr1;
415 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
421 ulong crit_raw = vcpu->arch.shared->critical;
422 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
426 ulong new_msr = vcpu->arch.shared->msr;
429 if (!(vcpu->arch.shared->msr & MSR_SF)) {
437 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
444 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
477 allowed = vcpu->arch.shared->msr & MSR_CE;
483 allowed = vcpu->arch.shared->msr & MSR_ME;
493 allowed = vcpu->arch.shared->msr & MSR_EE;
499 allowed = vcpu->arch.shared->msr & MSR_DE;
513 set_guest_srr(vcpu, vcpu->arch.regs.nip,
514 vcpu->arch.shared->msr);
517 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
518 vcpu->arch.shared->msr);
521 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
522 vcpu->arch.shared->msr);
525 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
526 vcpu->arch.shared->msr);
530 vcpu->arch.regs.nip = vcpu->arch.ivpr |
531 vcpu->arch.ivor[priority];
533 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
535 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
537 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
538 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
539 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
540 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
541 kvmppc_mpic_set_epr(vcpu);
547 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
550 kvmppc_set_msr(vcpu, new_msr);
553 clear_bit(priority, &vcpu->arch.pending_exceptions);
562 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
563 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
564 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
565 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
566 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
567 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
578 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
582 u32 period = TCR_GET_WP(vcpu->arch.tcr);
604 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
613 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
614 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
616 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
617 nr_jiffies = watchdog_next_timeout(vcpu);
623 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
625 del_timer(&vcpu->arch.wdt_timer);
626 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
631 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
636 new_tsr = tsr = vcpu->arch.tsr;
648 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
652 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
653 kvm_vcpu_kick(vcpu);
660 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
661 vcpu->arch.watchdog_enabled) {
663 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
664 kvm_vcpu_kick(vcpu);
674 arm_next_watchdog(vcpu);
677 static void update_timer_ints(struct kvm_vcpu *vcpu)
679 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
680 kvmppc_core_queue_dec(vcpu);
682 kvmppc_core_dequeue_dec(vcpu);
684 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
685 kvmppc_core_queue_watchdog(vcpu);
687 kvmppc_core_dequeue_watchdog(vcpu);
690 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
692 unsigned long *pending = &vcpu->arch.pending_exceptions;
697 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
706 vcpu->arch.shared->int_pending = !!*pending;
710 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
715 kvmppc_core_check_exceptions(vcpu);
717 if (kvm_request_pending(vcpu)) {
722 if (vcpu->arch.shared->msr & MSR_WE) {
724 kvm_vcpu_halt(vcpu);
727 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
734 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
738 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
739 update_timer_ints(vcpu);
741 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
742 kvmppc_core_flush_tlb(vcpu);
745 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
746 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
750 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
751 vcpu->run->epr.epr = 0;
752 vcpu->arch.epr_needed = true;
753 vcpu->run->exit_reason = KVM_EXIT_EPR;
760 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
765 if (!vcpu->arch.sane) {
766 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
770 s = kvmppc_prepare_to_enter(vcpu);
785 kvmppc_load_guest_fp(vcpu);
796 kvmppc_load_guest_altivec(vcpu);
800 debug = vcpu->arch.dbg_reg;
803 current->thread.debug = vcpu->arch.dbg_reg;
805 vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
808 ret = __kvmppc_vcpu_run(vcpu);
818 kvmppc_save_guest_fp(vcpu);
822 kvmppc_save_guest_altivec(vcpu);
826 vcpu->mode = OUTSIDE_GUEST_MODE;
830 static int emulation_exit(struct kvm_vcpu *vcpu)
834 er = kvmppc_emulate_instruction(vcpu);
838 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
848 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
851 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
852 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
853 kvmppc_core_queue_program(vcpu, ESR_PIL);
864 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
866 struct kvm_run *run = vcpu->run;
867 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
868 u32 dbsr = vcpu->arch.dbsr;
870 if (vcpu->guest_debug == 0) {
881 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
882 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
883 kvmppc_core_queue_debug(vcpu);
886 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
887 kvmppc_core_queue_program(vcpu, ESR_PTR);
894 * Clear guest dbsr (vcpu->arch.dbsr)
896 vcpu->arch.dbsr = 0;
898 run->debug.arch.address = vcpu->arch.regs.nip;
937 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
978 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
984 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
993 __func__, vcpu->arch.regs.nip);
996 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
997 vcpu->run->hw.hardware_exit_reason |= last_inst;
998 kvmppc_core_queue_program(vcpu, ESR_PIL);
1011 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1013 struct kvm_run *run = vcpu->run;
1025 kvmppc_update_timing_stats(vcpu);
1028 kvmppc_restart_interrupt(vcpu, exit_nr);
1038 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
1043 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1044 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &pinst);
1052 trace_kvm_exit(exit_nr, vcpu);
1075 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1082 kvmppc_dump_vcpu(vcpu);
1090 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1095 kvmppc_account_exit(vcpu, DEC_EXITS);
1104 kvmppc_account_exit(vcpu, DBELL_EXITS);
1109 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1120 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1135 r = emulation_exit(vcpu);
1139 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1145 r = kvmppc_handle_debug(vcpu);
1147 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1151 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1160 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1162 kvmppc_account_exit(vcpu, USR_PR_INST);
1166 r = emulation_exit(vcpu);
1170 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1171 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1177 if (vcpu->arch.shared->msr & MSR_SPE)
1178 kvmppc_vcpu_enable_spe(vcpu);
1180 kvmppc_booke_queue_irqprio(vcpu,
1187 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1192 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1201 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1212 __func__, exit_nr, vcpu->arch.regs.nip);
1224 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1229 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1235 kvmppc_core_queue_data_storage(vcpu, 0, vcpu->arch.fault_dear,
1236 vcpu->arch.fault_esr);
1237 kvmppc_account_exit(vcpu, DSI_EXITS);
1242 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1243 kvmppc_account_exit(vcpu, ISI_EXITS);
1248 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1249 vcpu->arch.fault_esr);
1255 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1256 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1262 kvmppc_core_queue_program(vcpu, ESR_PPR);
1269 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1270 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1272 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1276 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1278 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1284 unsigned long eaddr = vcpu->arch.fault_dear;
1290 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1291 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1292 kvmppc_map_magic(vcpu);
1293 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1301 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1304 kvmppc_core_queue_dtlb_miss(vcpu,
1305 vcpu->arch.fault_dear,
1306 vcpu->arch.fault_esr);
1307 kvmppc_mmu_dtlb_miss(vcpu);
1308 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1313 idx = srcu_read_lock(&vcpu->kvm->srcu);
1315 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1318 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1325 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1326 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1331 vcpu->arch.paddr_accessed = gpaddr;
1332 vcpu->arch.vaddr_accessed = eaddr;
1333 r = kvmppc_emulate_mmio(vcpu);
1334 kvmppc_account_exit(vcpu, MMIO_EXITS);
1337 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1342 unsigned long eaddr = vcpu->arch.regs.nip;
1350 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1353 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1354 kvmppc_mmu_itlb_miss(vcpu);
1355 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1359 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1361 idx = srcu_read_lock(&vcpu->kvm->srcu);
1363 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1366 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1373 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1376 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1379 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1384 r = kvmppc_handle_debug(vcpu);
1387 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1402 s = kvmppc_prepare_to_enter(vcpu);
1408 kvmppc_load_guest_fp(vcpu);
1409 kvmppc_load_guest_altivec(vcpu);
1416 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1418 u32 old_tsr = vcpu->arch.tsr;
1420 vcpu->arch.tsr = new_tsr;
1422 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1423 arm_next_watchdog(vcpu);
1425 update_timer_ints(vcpu);
1428 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1431 spin_lock_init(&vcpu->arch.wdt_lock);
1432 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1442 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1444 del_timer_sync(&vcpu->arch.wdt_timer);
1447 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1451 vcpu_load(vcpu);
1453 regs->pc = vcpu->arch.regs.nip;
1454 regs->cr = kvmppc_get_cr(vcpu);
1455 regs->ctr = vcpu->arch.regs.ctr;
1456 regs->lr = vcpu->arch.regs.link;
1457 regs->xer = kvmppc_get_xer(vcpu);
1458 regs->msr = vcpu->arch.shared->msr;
1459 regs->srr0 = kvmppc_get_srr0(vcpu);
1460 regs->srr1 = kvmppc_get_srr1(vcpu);
1461 regs->pid = vcpu->arch.pid;
1462 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1463 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1464 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1465 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1466 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1467 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1468 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1469 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1472 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1474 vcpu_put(vcpu);
1478 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1482 vcpu_load(vcpu);
1484 vcpu->arch.regs.nip = regs->pc;
1485 kvmppc_set_cr(vcpu, regs->cr);
1486 vcpu->arch.regs.ctr = regs->ctr;
1487 vcpu->arch.regs.link = regs->lr;
1488 kvmppc_set_xer(vcpu, regs->xer);
1489 kvmppc_set_msr(vcpu, regs->msr);
1490 kvmppc_set_srr0(vcpu, regs->srr0);
1491 kvmppc_set_srr1(vcpu, regs->srr1);
1492 kvmppc_set_pid(vcpu, regs->pid);
1493 kvmppc_set_sprg0(vcpu, regs->sprg0);
1494 kvmppc_set_sprg1(vcpu, regs->sprg1);
1495 kvmppc_set_sprg2(vcpu, regs->sprg2);
1496 kvmppc_set_sprg3(vcpu, regs->sprg3);
1497 kvmppc_set_sprg4(vcpu, regs->sprg4);
1498 kvmppc_set_sprg5(vcpu, regs->sprg5);
1499 kvmppc_set_sprg6(vcpu, regs->sprg6);
1500 kvmppc_set_sprg7(vcpu, regs->sprg7);
1503 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1505 vcpu_put(vcpu);
1509 static void get_sregs_base(struct kvm_vcpu *vcpu,
1516 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1517 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1518 sregs->u.e.mcsr = vcpu->arch.mcsr;
1519 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1520 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1521 sregs->u.e.tsr = vcpu->arch.tsr;
1522 sregs->u.e.tcr = vcpu->arch.tcr;
1523 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1525 sregs->u.e.vrsave = vcpu->arch.vrsave;
1528 static int set_sregs_base(struct kvm_vcpu *vcpu,
1534 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1535 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1536 vcpu->arch.mcsr = sregs->u.e.mcsr;
1537 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1538 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1539 vcpu->arch.vrsave = sregs->u.e.vrsave;
1540 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1543 vcpu->arch.dec = sregs->u.e.dec;
1544 kvmppc_emulate_dec(vcpu);
1548 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1553 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1558 sregs->u.e.pir = vcpu->vcpu_id;
1559 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1560 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1561 sregs->u.e.decar = vcpu->arch.decar;
1562 sregs->u.e.ivpr = vcpu->arch.ivpr;
1565 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1571 if (sregs->u.e.pir != vcpu->vcpu_id)
1574 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1575 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1576 vcpu->arch.decar = sregs->u.e.decar;
1577 vcpu->arch.ivpr = sregs->u.e.ivpr;
1582 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1586 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1587 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1588 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1589 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1590 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1591 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1592 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1593 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1594 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1595 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1596 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1597 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1598 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1599 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1600 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1601 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1605 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1610 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1611 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1612 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1613 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1614 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1615 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1616 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1617 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1618 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1619 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1620 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1621 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1622 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1623 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1624 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1625 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1630 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1635 vcpu_load(vcpu);
1637 sregs->pvr = vcpu->arch.pvr;
1639 get_sregs_base(vcpu, sregs);
1640 get_sregs_arch206(vcpu, sregs);
1641 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1643 vcpu_put(vcpu);
1647 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1652 vcpu_load(vcpu);
1653 if (vcpu->arch.pvr != sregs->pvr)
1656 ret = set_sregs_base(vcpu, sregs);
1660 ret = set_sregs_arch206(vcpu, sregs);
1664 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1667 vcpu_put(vcpu);
1671 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1678 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1681 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1685 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1688 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1692 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1695 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1698 u32 epr = kvmppc_get_epr(vcpu);
1704 *val = get_reg_val(id, vcpu->arch.epcr);
1708 *val = get_reg_val(id, vcpu->arch.tcr);
1711 *val = get_reg_val(id, vcpu->arch.tsr);
1717 *val = get_reg_val(id, vcpu->arch.vrsave);
1720 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1727 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1734 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1737 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1741 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1744 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1748 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1751 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1755 kvmppc_set_epr(vcpu, new_epr);
1761 kvmppc_set_epcr(vcpu, new_epcr);
1767 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1772 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1777 kvmppc_set_tsr(vcpu, tsr);
1782 kvmppc_set_tcr(vcpu, tcr);
1786 vcpu->arch.vrsave = set_reg_val(id, *val);
1789 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1796 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1801 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1806 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1811 vcpu_load(vcpu);
1812 r = kvmppc_core_vcpu_translate(vcpu, tr);
1813 vcpu_put(vcpu);
1850 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1853 vcpu->arch.epcr = new_epcr;
1855 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1856 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1857 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1862 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1864 vcpu->arch.tcr = new_tcr;
1865 arm_next_watchdog(vcpu);
1866 update_timer_ints(vcpu);
1869 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1871 set_bits(tsr_bits, &vcpu->arch.tsr);
1873 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1874 kvm_vcpu_kick(vcpu);
1877 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1879 clear_bits(tsr_bits, &vcpu->arch.tsr);
1886 arm_next_watchdog(vcpu);
1888 update_timer_ints(vcpu);
1891 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1893 if (vcpu->arch.tcr & TCR_ARE) {
1894 vcpu->arch.dec = vcpu->arch.decar;
1895 kvmppc_emulate_dec(vcpu);
1898 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1956 static void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap,
1964 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1966 vcpu->arch.shadow_msrp |= MSRP_DEP;
1968 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1971 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1973 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1975 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1980 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1987 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1988 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1990 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
2004 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
2007 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
2017 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2031 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2038 vcpu_load(vcpu);
2041 vcpu->arch.dbg_reg.dbcr0 = 0;
2042 vcpu->guest_debug = 0;
2043 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2047 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2048 vcpu->guest_debug = dbg->control;
2049 vcpu->arch.dbg_reg.dbcr0 = 0;
2051 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2052 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2055 dbg_reg = &(vcpu->arch.dbg_reg);
2075 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2105 vcpu_put(vcpu);
2109 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2111 vcpu->cpu = smp_processor_id();
2112 current->thread.kvm_vcpu = vcpu;
2115 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2118 vcpu->cpu = -1;
2129 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2134 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2139 vcpu->arch.regs.nip = 0;
2140 vcpu->arch.shared->pir = vcpu->vcpu_id;
2141 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2142 kvmppc_set_msr(vcpu, 0);
2145 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2146 vcpu->arch.shadow_pid = 1;
2147 vcpu->arch.shared->msr = 0;
2152 vcpu->arch.ivpr = 0x55550000;
2154 vcpu->arch.ivor[i] = 0x7700 | i * 4;
2156 kvmppc_init_timing_stats(vcpu);
2158 r = kvmppc_core_vcpu_setup(vcpu);
2160 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2161 kvmppc_sanity_check(vcpu);
2165 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2167 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2175 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2177 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2180 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2182 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);