Lines Matching refs:arch

15  * This file is derived from arch/powerpc/kvm/44x.c,
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
114 new_msr = vcpu->arch.intr_msr;
148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
193 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
194 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
195 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
196 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
197 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
198 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
199 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
200 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
201 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
202 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
203 svcpu->cr = vcpu->arch.regs.ccr;
204 svcpu->xer = vcpu->arch.regs.xer;
205 svcpu->ctr = vcpu->arch.regs.ctr;
206 svcpu->lr = vcpu->arch.regs.link;
207 svcpu->pc = vcpu->arch.regs.nip;
209 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
215 vcpu->arch.entry_tb = get_tb();
216 vcpu->arch.entry_vtb = get_vtb();
218 vcpu->arch.entry_ic = mfspr(SPRN_IC);
239 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
253 vcpu->arch.shadow_msr = smsr;
271 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
272 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
273 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
274 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
275 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
276 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
277 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
278 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
279 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
280 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
281 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
282 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
283 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
284 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
285 vcpu->arch.regs.ccr = svcpu->cr;
286 vcpu->arch.regs.xer = svcpu->xer;
287 vcpu->arch.regs.ctr = svcpu->ctr;
288 vcpu->arch.regs.link = svcpu->lr;
289 vcpu->arch.regs.nip = svcpu->pc;
290 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
291 vcpu->arch.fault_dar = svcpu->fault_dar;
292 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
293 vcpu->arch.last_inst = svcpu->last_inst;
295 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
300 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
301 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
302 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
304 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
320 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
323 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
339 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
340 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
341 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
348 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
349 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
350 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
360 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
397 if (vcpu->arch.fscr & FSCR_TAR)
409 if (vcpu->arch.fscr & FSCR_TAR)
492 if (vcpu->arch.papr_enabled)
515 if (!vcpu->arch.pending_exceptions) {
537 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
538 struct kvm_vcpu_arch *a = &vcpu->arch;
555 if (vcpu->arch.magic_page_pa &&
558 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
576 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
577 vcpu->arch.pvr = pvr;
584 vcpu->arch.cpu_type = KVM_CPU_3S_64;
592 vcpu->arch.cpu_type = KVM_CPU_3S_32;
599 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
600 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
602 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
624 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
631 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
646 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
690 ulong mp_pa = vcpu->arch.magic_page_pa;
717 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
722 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
740 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
745 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
758 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
759 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
777 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
787 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
803 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
804 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
809 vcpu->arch.paddr_accessed = pte.raddr;
810 vcpu->arch.vaddr_accessed = pte.eaddr;
831 msr &= vcpu->arch.guest_owned_ext;
858 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
866 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
873 vcpu->arch.tar = mfspr(SPRN_TAR);
875 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
888 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
914 msr &= ~vcpu->arch.guest_owned_ext;
925 load_fp_state(&vcpu->arch.fp);
927 t->fp_save_area = &vcpu->arch.fp;
935 load_vr_state(&vcpu->arch.vr);
937 t->vr_save_area = &vcpu->arch.vr;
943 vcpu->arch.guest_owned_ext |= msr;
957 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
964 load_fp_state(&vcpu->arch.fp);
972 load_vr_state(&vcpu->arch.vr);
985 vcpu->arch.fscr &= ~(0xffULL << 56);
986 vcpu->arch.fscr |= (fac << 56);
1016 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
1036 mtspr(SPRN_TAR, vcpu->arch.tar);
1037 vcpu->arch.shadow_fscr |= FSCR_TAR;
1061 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1064 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1065 vcpu->arch.fscr = fscr;
1070 vcpu->arch.fscr = fscr;
1106 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1173 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1203 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1204 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1222 u32 fault_dsisr = vcpu->arch.fault_dsisr;
1302 if (vcpu->arch.papr_enabled) {
1312 if (vcpu->arch.papr_enabled &&
1332 vcpu->arch.hcall_needed = 1;
1334 } else if (vcpu->arch.osi_enabled &&
1344 vcpu->arch.osi_needed = 1;
1367 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1419 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1437 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
1480 sregs->pvr = vcpu->arch.pvr;
1483 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1485 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1486 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1511 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1513 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1514 vcpu->arch.mmu.slbia(vcpu);
1521 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
1527 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1567 if (vcpu->arch.intr_msr & MSR_LE)
1574 *val = get_reg_val(id, vcpu->arch.tfhar);
1577 *val = get_reg_val(id, vcpu->arch.tfiar);
1580 *val = get_reg_val(id, vcpu->arch.texasr);
1584 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1593 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1596 val->vval = vcpu->arch.vr_tm.vr[i-32];
1603 *val = get_reg_val(id, vcpu->arch.cr_tm);
1606 *val = get_reg_val(id, vcpu->arch.xer_tm);
1609 *val = get_reg_val(id, vcpu->arch.lr_tm);
1612 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1615 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1618 *val = get_reg_val(id, vcpu->arch.amr_tm);
1621 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1624 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1628 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1633 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1636 *val = get_reg_val(id, vcpu->arch.tar_tm);
1650 vcpu->arch.intr_msr |= MSR_LE;
1652 vcpu->arch.intr_msr &= ~MSR_LE;
1674 vcpu->arch.tfhar = set_reg_val(id, *val);
1677 vcpu->arch.tfiar = set_reg_val(id, *val);
1680 vcpu->arch.texasr = set_reg_val(id, *val);
1683 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1693 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1696 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1702 vcpu->arch.cr_tm = set_reg_val(id, *val);
1705 vcpu->arch.xer_tm = set_reg_val(id, *val);
1708 vcpu->arch.lr_tm = set_reg_val(id, *val);
1711 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1714 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1717 vcpu->arch.amr_tm = set_reg_val(id, *val);
1720 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1723 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1727 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1732 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1735 vcpu->arch.tar_tm = set_reg_val(id, *val);
1757 vcpu->arch.book3s = vcpu_book3s;
1760 vcpu->arch.shadow_vcpu =
1761 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1762 if (!vcpu->arch.shadow_vcpu)
1769 vcpu->arch.shared = (void *)p;
1773 vcpu->arch.shared_big_endian = true;
1775 vcpu->arch.shared_big_endian = false;
1783 vcpu->arch.pvr = 0x3C0301;
1785 vcpu->arch.pvr = mfspr(SPRN_PVR);
1786 vcpu->arch.intr_msr = MSR_SF;
1789 vcpu->arch.pvr = 0x84202;
1790 vcpu->arch.intr_msr = 0;
1792 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
1793 vcpu->arch.slb_nr = 64;
1795 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
1804 free_page((unsigned long)vcpu->arch.shared);
1807 kfree(vcpu->arch.shadow_vcpu);
1820 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1822 kfree(vcpu->arch.shadow_vcpu);
1832 if (!vcpu->arch.sane) {
1972 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
2015 mutex_init(&kvm->arch.hpt_mutex);
2034 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));