Lines Matching refs:vcpu

40 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
45 struct kvm_vcpu_arch *arch = &vcpu->arch;
56 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
243 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
248 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
249 &vcpu->arch.pc);
253 vcpu->arch.pc += 4;
256 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
264 * @vcpu: KVM VCPU information.
272 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
275 *out = vcpu->arch.host_cp0_badinstr;
286 * @vcpu: KVM VCPU information.
294 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
297 *out = vcpu->arch.host_cp0_badinstrp;
307 * @vcpu: Virtual CPU.
313 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
315 struct mips_coproc *cop0 = &vcpu->arch.cop0;
317 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
324 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
326 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
328 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
334 delta = now_ns + vcpu->arch.count_dyn_bias;
336 if (delta >= vcpu->arch.count_period) {
338 periods = div64_s64(now_ns, vcpu->arch.count_period);
339 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
341 delta = now_ns + vcpu->arch.count_dyn_bias;
354 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
359 * @vcpu: Virtual CPU.
367 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
369 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
370 return vcpu->arch.count_resume;
377 * @vcpu: Virtual CPU.
385 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
387 struct mips_coproc *cop0 = &vcpu->arch.cop0;
393 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
409 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
410 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
416 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 kvm_mips_callbacks->queue_timer_int(vcpu);
427 vcpu->arch.count_period);
428 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
438 * @vcpu: Virtual CPU.
445 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
447 struct mips_coproc *cop0 = &vcpu->arch.cop0;
450 if (kvm_mips_count_disabled(vcpu))
453 return kvm_mips_read_count_running(vcpu, ktime_get());
458 * @vcpu: Virtual CPU.
468 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
472 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
477 hrtimer_cancel(&vcpu->arch.comparecount_timer);
481 *count = kvm_mips_read_count_running(vcpu, now);
488 * @vcpu: Virtual CPU.
500 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
502 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
505 struct mips_coproc *cop0 = &vcpu->arch.cop0;
513 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
517 hrtimer_cancel(&vcpu->arch.comparecount_timer);
518 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
523 * @vcpu: Virtual CPU.
538 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
543 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
552 before_count = vcpu->arch.count_bias +
553 kvm_mips_ktime_to_count(vcpu, before);
565 vcpu->arch.count_bias += drift;
572 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
581 vcpu->arch.count_bias += drift;
588 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
593 kvm_mips_resume_hrtimer(vcpu, count_time, count);
599 * @vcpu: Virtual CPU.
604 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
606 struct mips_coproc *cop0 = &vcpu->arch.cop0;
610 now = kvm_mips_count_time(vcpu);
611 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
613 if (kvm_mips_count_disabled(vcpu))
618 kvm_mips_resume_hrtimer(vcpu, now, count);
623 * @vcpu: Virtual CPU.
629 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
631 vcpu->arch.count_hz = count_hz;
632 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
633 vcpu->arch.count_dyn_bias = 0;
636 kvm_mips_write_count(vcpu, 0);
641 * @vcpu: Virtual CPU.
650 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
652 struct mips_coproc *cop0 = &vcpu->arch.cop0;
661 if (vcpu->arch.count_hz == count_hz)
665 dc = kvm_mips_count_disabled(vcpu);
667 now = kvm_mips_count_time(vcpu);
670 now = kvm_mips_freeze_hrtimer(vcpu, &count);
674 vcpu->arch.count_hz = count_hz;
675 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
676 vcpu->arch.count_dyn_bias = 0;
679 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
683 kvm_mips_resume_hrtimer(vcpu, now, count);
689 * @vcpu: Virtual CPU.
697 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
699 struct mips_coproc *cop0 = &vcpu->arch.cop0;
711 kvm_mips_callbacks->dequeue_timer_int(vcpu);
732 dc = kvm_mips_count_disabled(vcpu);
734 now = kvm_mips_freeze_hrtimer(vcpu, &count);
737 kvm_mips_callbacks->dequeue_timer_int(vcpu);
757 kvm_mips_resume_hrtimer(vcpu, now, count);
770 * @vcpu: Virtual CPU.
780 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
782 struct mips_coproc *cop0 = &vcpu->arch.cop0;
787 hrtimer_cancel(&vcpu->arch.comparecount_timer);
791 count = kvm_mips_read_count_running(vcpu, now);
799 * @vcpu: Virtual CPU.
807 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
809 struct mips_coproc *cop0 = &vcpu->arch.cop0;
812 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
813 kvm_mips_count_disable(vcpu);
818 * @vcpu: Virtual CPU.
827 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
829 struct mips_coproc *cop0 = &vcpu->arch.cop0;
840 kvm_mips_write_count(vcpu, count);
845 * @vcpu: Virtual CPU.
853 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
855 struct mips_coproc *cop0 = &vcpu->arch.cop0;
856 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
866 vcpu->arch.count_ctl = count_ctl;
874 vcpu->arch.count_resume = ktime_get();
877 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
887 vcpu->arch.count_hz);
888 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
894 kvm_mips_callbacks->queue_timer_int(vcpu);
897 count = kvm_mips_read_count_running(vcpu, now);
898 kvm_mips_resume_hrtimer(vcpu, now, count);
907 * @vcpu: Virtual CPU.
915 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
925 vcpu->arch.count_resume = ns_to_ktime(count_resume);
931 * @vcpu: Virtual CPU.
937 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
940 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
941 vcpu->arch.count_period);
945 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
947 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
948 vcpu->arch.pending_exceptions);
950 ++vcpu->stat.wait_exits;
951 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
952 if (!vcpu->arch.pending_exceptions) {
953 kvm_vz_lose_htimer(vcpu);
954 vcpu->arch.wait = 1;
955 kvm_vcpu_halt(vcpu);
961 if (kvm_arch_vcpu_runnable(vcpu))
962 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
970 struct kvm_vcpu *vcpu)
975 struct kvm_run *run = vcpu->run;
984 curr_pc = vcpu->arch.pc;
985 er = update_pc(vcpu, cause);
992 vcpu->arch.host_cp0_badvaddr);
1000 *(u64 *)data = vcpu->arch.gprs[rt];
1003 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1004 vcpu->arch.gprs[rt], *(u64 *)data);
1010 *(u32 *)data = vcpu->arch.gprs[rt];
1013 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1014 vcpu->arch.gprs[rt], *(u32 *)data);
1019 *(u16 *)data = vcpu->arch.gprs[rt];
1022 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1023 vcpu->arch.gprs[rt], *(u16 *)data);
1028 *(u8 *)data = vcpu->arch.gprs[rt];
1031 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1032 vcpu->arch.gprs[rt], *(u8 *)data);
1037 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1039 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1043 (vcpu->arch.gprs[rt] >> 24);
1047 (vcpu->arch.gprs[rt] >> 16);
1051 (vcpu->arch.gprs[rt] >> 8);
1054 *(u32 *)data = vcpu->arch.gprs[rt];
1061 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1062 vcpu->arch.gprs[rt], *(u32 *)data);
1067 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1069 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1072 *(u32 *)data = vcpu->arch.gprs[rt];
1076 (vcpu->arch.gprs[rt] << 8);
1080 (vcpu->arch.gprs[rt] << 16);
1084 (vcpu->arch.gprs[rt] << 24);
1091 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1092 vcpu->arch.gprs[rt], *(u32 *)data);
1098 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1101 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1105 ((vcpu->arch.gprs[rt] >> 56) & 0xff);
1109 ((vcpu->arch.gprs[rt] >> 48) & 0xffff);
1113 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff);
1117 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff);
1121 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff);
1125 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff);
1129 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff);
1132 *(u64 *)data = vcpu->arch.gprs[rt];
1139 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1140 vcpu->arch.gprs[rt], *(u64 *)data);
1145 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1148 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1151 *(u64 *)data = vcpu->arch.gprs[rt];
1155 (vcpu->arch.gprs[rt] << 8);
1159 (vcpu->arch.gprs[rt] << 16);
1163 (vcpu->arch.gprs[rt] << 24);
1167 (vcpu->arch.gprs[rt] << 32);
1171 (vcpu->arch.gprs[rt] << 40);
1175 (vcpu->arch.gprs[rt] << 48);
1179 (vcpu->arch.gprs[rt] << 56);
1186 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1187 vcpu->arch.gprs[rt], *(u64 *)data);
1205 *(u8 *)data = vcpu->arch.gprs[rt];
1208 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1209 vcpu->arch.gprs[rt], *(u8 *)data);
1213 *(u16 *)data = vcpu->arch.gprs[rt];
1216 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1217 vcpu->arch.gprs[rt], *(u16 *)data);
1221 *(u32 *)data = vcpu->arch.gprs[rt];
1224 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1225 vcpu->arch.gprs[rt], *(u32 *)data);
1229 *(u64 *)data = vcpu->arch.gprs[rt];
1232 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1233 vcpu->arch.gprs[rt], *(u64 *)data);
1248 vcpu->mmio_needed = 1;
1250 vcpu->mmio_is_write = 1;
1252 r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
1256 vcpu->mmio_needed = 0;
1264 vcpu->arch.pc = curr_pc;
1269 u32 cause, struct kvm_vcpu *vcpu)
1271 struct kvm_run *run = vcpu->run;
1286 curr_pc = vcpu->arch.pc;
1287 er = update_pc(vcpu, cause);
1290 vcpu->arch.io_pc = vcpu->arch.pc;
1291 vcpu->arch.pc = curr_pc;
1293 vcpu->arch.io_gpr = rt;
1296 vcpu->arch.host_cp0_badvaddr);
1300 vcpu->mmio_needed = 2; /* signed */
1308 vcpu->mmio_needed = 1; /* unsigned */
1316 vcpu->mmio_needed = 1; /* unsigned */
1323 vcpu->mmio_needed = 1; /* unsigned */
1331 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1334 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1337 vcpu->mmio_needed = 3; /* 1 byte */
1340 vcpu->mmio_needed = 4; /* 2 bytes */
1343 vcpu->mmio_needed = 5; /* 3 bytes */
1346 vcpu->mmio_needed = 6; /* 4 bytes */
1355 vcpu->arch.host_cp0_badvaddr) & (~0x3);
1358 imme = vcpu->arch.host_cp0_badvaddr & 0x3;
1361 vcpu->mmio_needed = 7; /* 4 bytes */
1364 vcpu->mmio_needed = 8; /* 3 bytes */
1367 vcpu->mmio_needed = 9; /* 2 bytes */
1370 vcpu->mmio_needed = 10; /* 1 byte */
1380 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1383 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1386 vcpu->mmio_needed = 11; /* 1 byte */
1389 vcpu->mmio_needed = 12; /* 2 bytes */
1392 vcpu->mmio_needed = 13; /* 3 bytes */
1395 vcpu->mmio_needed = 14; /* 4 bytes */
1398 vcpu->mmio_needed = 15; /* 5 bytes */
1401 vcpu->mmio_needed = 16; /* 6 bytes */
1404 vcpu->mmio_needed = 17; /* 7 bytes */
1407 vcpu->mmio_needed = 18; /* 8 bytes */
1416 vcpu->arch.host_cp0_badvaddr) & (~0x7);
1419 imme = vcpu->arch.host_cp0_badvaddr & 0x7;
1422 vcpu->mmio_needed = 19; /* 8 bytes */
1425 vcpu->mmio_needed = 20; /* 7 bytes */
1428 vcpu->mmio_needed = 21; /* 6 bytes */
1431 vcpu->mmio_needed = 22; /* 5 bytes */
1434 vcpu->mmio_needed = 23; /* 4 bytes */
1437 vcpu->mmio_needed = 24; /* 3 bytes */
1440 vcpu->mmio_needed = 25; /* 2 bytes */
1443 vcpu->mmio_needed = 26; /* 1 byte */
1465 vcpu->mmio_needed = 27; /* signed */
1469 vcpu->mmio_needed = 28; /* signed */
1473 vcpu->mmio_needed = 29; /* signed */
1477 vcpu->mmio_needed = 30; /* signed */
1490 vcpu->mmio_needed = 0;
1495 vcpu->mmio_is_write = 0;
1497 r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS,
1501 kvm_mips_complete_mmio_load(vcpu);
1502 vcpu->mmio_needed = 0;
1509 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
1511 struct kvm_run *run = vcpu->run;
1512 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1522 vcpu->arch.pc = vcpu->arch.io_pc;
1526 switch (vcpu->mmio_needed) {
1528 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) |
1532 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) |
1536 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) |
1540 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) |
1544 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1548 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1552 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1560 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) |
1564 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) |
1568 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) |
1572 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) |
1576 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) |
1580 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) |
1584 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) |
1593 switch (vcpu->mmio_needed) {
1601 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) |
1605 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) |
1609 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) |
1617 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) |
1621 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) |
1625 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) |
1634 if (vcpu->mmio_needed == 1)
1641 if (vcpu->mmio_needed == 1)