Lines Matching refs:arch
115 if (kvm_mips_guest_has_msa(&vcpu->arch))
122 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
158 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
175 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
205 set_bit(priority, &vcpu->arch.pending_exceptions);
206 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
211 clear_bit(priority, &vcpu->arch.pending_exceptions);
212 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
282 clear_bit(priority, &vcpu->arch.pending_exceptions);
324 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
346 if (mips_hpt_frequency != vcpu->arch.count_hz)
425 struct mips_coproc *cop0 = &vcpu->arch.cop0;
520 struct mips_coproc *cop0 = &vcpu->arch.cop0;
665 opc = (u32 *)vcpu->arch.pc;
666 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
799 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
819 u32 *opc = (u32 *) vcpu->arch.pc;
820 u32 cause = vcpu->arch.host_cp0_cause;
822 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
866 struct mips_coproc *cop0 = &vcpu->arch.cop0;
870 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
871 else if (val < ARRAY_SIZE(vcpu->arch.maar))
879 struct mips_coproc *cop0 = &vcpu->arch.cop0;
889 curr_pc = vcpu->arch.pc;
932 ARRAY_SIZE(vcpu->arch.maar));
933 val = vcpu->arch.maar[
966 vcpu->arch.gprs[rt] = val;
979 val = vcpu->arch.gprs[rt];
987 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
991 vcpu->arch.gprs[rt],
1011 ARRAY_SIZE(vcpu->arch.maar));
1012 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1068 vcpu->arch.pc = curr_pc;
1081 struct kvm_vcpu_arch *arch = &vcpu->arch;
1088 curr_pc = vcpu->arch.pc;
1102 va = arch->gprs[base] + offset;
1105 cache, op, base, arch->gprs[base], offset);
1134 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1137 vcpu->arch.pc = curr_pc;
1156 curr_pc = vcpu->arch.pc;
1166 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1168 switch (vcpu->arch.gprs[rs]) {
1170 vcpu->arch.gprs[rd] = 0x14c000;
1176 vcpu->arch.gprs[rd] = hostcfg;
1181 vcpu->arch.gprs[rd] = hostcfg;
1184 vcpu->arch.gprs[rd] = hostcfg;
1188 vcpu->arch.gprs[rd] = 0;
1195 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1205 vcpu->arch.pc = curr_pc;
1216 struct kvm_vcpu_arch *arch = &vcpu->arch;
1263 arch->gprs[rt] =
1273 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1298 struct kvm_vcpu_arch *arch = &vcpu->arch;
1318 unsigned int val = arch->gprs[rt];
1326 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1356 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1395 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1450 curr_pc = vcpu->arch.pc;
1457 vcpu->arch.pc = curr_pc;
1484 u32 *opc = (u32 *) vcpu->arch.pc;
1485 u32 cause = vcpu->arch.host_cp0_cause;
1487 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1555 u32 cause = vcpu->arch.host_cp0_cause;
1565 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1566 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1610 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1613 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1626 u32 *opc = (u32 *) vcpu->arch.pc;
1627 u32 cause = vcpu->arch.host_cp0_cause;
1628 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1635 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1673 u32 *opc = (u32 *) vcpu->arch.pc;
1674 u32 cause = vcpu->arch.host_cp0_cause;
1675 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1684 vcpu->arch.host_cp0_badvaddr = badvaddr;
1800 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1853 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1914 struct mips_coproc *cop0 = &vcpu->arch.cop0;
2077 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2079 *v = vcpu->arch.maar[idx];
2084 *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
2120 *v = vcpu->arch.count_ctl;
2123 *v = ktime_to_ns(vcpu->arch.count_resume);
2126 *v = vcpu->arch.count_hz;
2138 struct mips_coproc *cop0 = &vcpu->arch.cop0;
2354 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2356 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2446 vcpu->arch.vzguestid[i] = 0;
2469 if (wired > vcpu->arch.wired_tlb_limit) {
2470 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2471 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2474 wired = vcpu->arch.wired_tlb_limit;
2476 vcpu->arch.wired_tlb = tlbs;
2477 vcpu->arch.wired_tlb_limit = wired;
2483 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2485 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2486 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2487 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2488 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2489 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2491 vcpu->arch.wired_tlb_used = wired;
2497 if (vcpu->arch.wired_tlb)
2498 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2499 vcpu->arch.wired_tlb_used);
2505 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2512 migrated = (vcpu->arch.last_exec_cpu != cpu);
2513 vcpu->arch.last_exec_cpu = cpu;
2530 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2533 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2535 vcpu->arch.vzguestid[cpu]);
2539 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2556 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2565 struct mips_coproc *cop0 = &vcpu->arch.cop0;
2572 migrated = (vcpu->arch.last_sched_cpu != cpu);
2707 struct mips_coproc *cop0 = &vcpu->arch.cop0;
3055 vcpu->arch.vzguestid[i] = 0;
3079 struct mips_coproc *cop0 = &vcpu->arch.cop0;
3213 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3230 cpumask_setall(&kvm->arch.asid_flush_mask);
3263 r = vcpu->arch.vcpu_run(vcpu);