Lines Matching refs:arch

115 	if (kvm_mips_guest_has_msa(&vcpu->arch))
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
205 set_bit(priority, &vcpu->arch.pending_exceptions);
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
211 clear_bit(priority, &vcpu->arch.pending_exceptions);
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
282 clear_bit(priority, &vcpu->arch.pending_exceptions);
325 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
347 if (mips_hpt_frequency != vcpu->arch.count_hz)
427 struct mips_coproc *cop0 = vcpu->arch.cop0;
522 struct mips_coproc *cop0 = vcpu->arch.cop0;
667 opc = (u32 *)vcpu->arch.pc;
668 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
801 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
821 u32 *opc = (u32 *) vcpu->arch.pc;
822 u32 cause = vcpu->arch.host_cp0_cause;
824 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
868 struct mips_coproc *cop0 = vcpu->arch.cop0;
872 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
873 else if (val < ARRAY_SIZE(vcpu->arch.maar))
881 struct mips_coproc *cop0 = vcpu->arch.cop0;
891 curr_pc = vcpu->arch.pc;
934 ARRAY_SIZE(vcpu->arch.maar));
935 val = vcpu->arch.maar[
968 vcpu->arch.gprs[rt] = val;
981 val = vcpu->arch.gprs[rt];
989 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
993 vcpu->arch.gprs[rt],
1013 ARRAY_SIZE(vcpu->arch.maar));
1014 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1070 vcpu->arch.pc = curr_pc;
1083 struct kvm_vcpu_arch *arch = &vcpu->arch;
1090 curr_pc = vcpu->arch.pc;
1104 va = arch->gprs[base] + offset;
1107 cache, op, base, arch->gprs[base], offset);
1136 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1139 vcpu->arch.pc = curr_pc;
1158 curr_pc = vcpu->arch.pc;
1168 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1170 switch (vcpu->arch.gprs[rs]) {
1172 vcpu->arch.gprs[rd] = 0x14c000;
1178 vcpu->arch.gprs[rd] = hostcfg;
1183 vcpu->arch.gprs[rd] = hostcfg;
1186 vcpu->arch.gprs[rd] = hostcfg;
1190 vcpu->arch.gprs[rd] = 0;
1197 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1207 vcpu->arch.pc = curr_pc;
1218 struct kvm_vcpu_arch *arch = &vcpu->arch;
1265 arch->gprs[rt] =
1275 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1300 struct kvm_vcpu_arch *arch = &vcpu->arch;
1320 unsigned int val = arch->gprs[rt];
1328 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1358 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1397 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1452 curr_pc = vcpu->arch.pc;
1459 vcpu->arch.pc = curr_pc;
1486 u32 *opc = (u32 *) vcpu->arch.pc;
1487 u32 cause = vcpu->arch.host_cp0_cause;
1489 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1554 u32 cause = vcpu->arch.host_cp0_cause;
1564 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1565 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1606 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1609 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1622 u32 *opc = (u32 *) vcpu->arch.pc;
1623 u32 cause = vcpu->arch.host_cp0_cause;
1624 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1631 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1669 u32 *opc = (u32 *) vcpu->arch.pc;
1670 u32 cause = vcpu->arch.host_cp0_cause;
1671 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1680 vcpu->arch.host_cp0_badvaddr = badvaddr;
1796 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1849 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1910 struct mips_coproc *cop0 = vcpu->arch.cop0;
2073 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2075 *v = vcpu->arch.maar[idx];
2080 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2116 *v = vcpu->arch.count_ctl;
2119 *v = ktime_to_ns(vcpu->arch.count_resume);
2122 *v = vcpu->arch.count_hz;
2134 struct mips_coproc *cop0 = vcpu->arch.cop0;
2350 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2352 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2442 vcpu->arch.vzguestid[i] = 0;
2465 if (wired > vcpu->arch.wired_tlb_limit) {
2466 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2467 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2470 wired = vcpu->arch.wired_tlb_limit;
2472 vcpu->arch.wired_tlb = tlbs;
2473 vcpu->arch.wired_tlb_limit = wired;
2479 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2481 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2482 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2483 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2484 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2485 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2487 vcpu->arch.wired_tlb_used = wired;
2493 if (vcpu->arch.wired_tlb)
2494 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2495 vcpu->arch.wired_tlb_used);
2501 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2508 migrated = (vcpu->arch.last_exec_cpu != cpu);
2509 vcpu->arch.last_exec_cpu = cpu;
2526 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2529 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2531 vcpu->arch.vzguestid[cpu]);
2535 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2552 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2561 struct mips_coproc *cop0 = vcpu->arch.cop0;
2568 migrated = (vcpu->arch.last_sched_cpu != cpu);
2703 struct mips_coproc *cop0 = vcpu->arch.cop0;
3051 vcpu->arch.vzguestid[i] = 0;
3075 struct mips_coproc *cop0 = vcpu->arch.cop0;
3209 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3229 cpumask_setall(&kvm->arch.asid_flush_mask);
3269 r = vcpu->arch.vcpu_run(vcpu);