Lines Matching refs:arch
30 struct kvmppc_vcore *vc = vcpu->arch.vcore;
34 hr->hfscr = vcpu->arch.hfscr;
36 hr->dawr0 = vcpu->arch.dawr;
37 hr->dawrx0 = vcpu->arch.dawrx;
38 hr->ciabr = vcpu->arch.ciabr;
39 hr->purr = vcpu->arch.purr;
40 hr->spurr = vcpu->arch.spurr;
41 hr->ic = vcpu->arch.ic;
43 hr->srr0 = vcpu->arch.shregs.srr0;
44 hr->srr1 = vcpu->arch.shregs.srr1;
45 hr->sprg[0] = vcpu->arch.shregs.sprg0;
46 hr->sprg[1] = vcpu->arch.shregs.sprg1;
47 hr->sprg[2] = vcpu->arch.shregs.sprg2;
48 hr->sprg[3] = vcpu->arch.shregs.sprg3;
49 hr->pidr = vcpu->arch.pid;
50 hr->cfar = vcpu->arch.cfar;
51 hr->ppr = vcpu->arch.ppr;
100 struct kvmppc_vcore *vc = vcpu->arch.vcore;
103 hr->hfscr = vcpu->arch.hfscr;
104 hr->purr = vcpu->arch.purr;
105 hr->spurr = vcpu->arch.spurr;
106 hr->ic = vcpu->arch.ic;
108 hr->srr0 = vcpu->arch.shregs.srr0;
109 hr->srr1 = vcpu->arch.shregs.srr1;
110 hr->sprg[0] = vcpu->arch.shregs.sprg0;
111 hr->sprg[1] = vcpu->arch.shregs.sprg1;
112 hr->sprg[2] = vcpu->arch.shregs.sprg2;
113 hr->sprg[3] = vcpu->arch.shregs.sprg3;
114 hr->pidr = vcpu->arch.pid;
115 hr->cfar = vcpu->arch.cfar;
116 hr->ppr = vcpu->arch.ppr;
119 hr->hdar = vcpu->arch.fault_dar;
120 hr->hdsisr = vcpu->arch.fault_dsisr;
121 hr->asdr = vcpu->arch.fault_gpa;
124 hr->asdr = vcpu->arch.fault_gpa;
127 hr->heir = vcpu->arch.emul_inst;
138 hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
150 struct kvmppc_vcore *vc = vcpu->arch.vcore;
154 vcpu->arch.hfscr = hr->hfscr;
155 vcpu->arch.dawr = hr->dawr0;
156 vcpu->arch.dawrx = hr->dawrx0;
157 vcpu->arch.ciabr = hr->ciabr;
158 vcpu->arch.purr = hr->purr;
159 vcpu->arch.spurr = hr->spurr;
160 vcpu->arch.ic = hr->ic;
162 vcpu->arch.shregs.srr0 = hr->srr0;
163 vcpu->arch.shregs.srr1 = hr->srr1;
164 vcpu->arch.shregs.sprg0 = hr->sprg[0];
165 vcpu->arch.shregs.sprg1 = hr->sprg[1];
166 vcpu->arch.shregs.sprg2 = hr->sprg[2];
167 vcpu->arch.shregs.sprg3 = hr->sprg[3];
168 vcpu->arch.pid = hr->pidr;
169 vcpu->arch.cfar = hr->cfar;
170 vcpu->arch.ppr = hr->ppr;
176 struct kvmppc_vcore *vc = vcpu->arch.vcore;
179 vcpu->arch.hfscr = hr->hfscr;
180 vcpu->arch.purr = hr->purr;
181 vcpu->arch.spurr = hr->spurr;
182 vcpu->arch.ic = hr->ic;
184 vcpu->arch.fault_dar = hr->hdar;
185 vcpu->arch.fault_dsisr = hr->hdsisr;
186 vcpu->arch.fault_gpa = hr->asdr;
187 vcpu->arch.emul_inst = hr->heir;
188 vcpu->arch.shregs.srr0 = hr->srr0;
189 vcpu->arch.shregs.srr1 = hr->srr1;
190 vcpu->arch.shregs.sprg0 = hr->sprg[0];
191 vcpu->arch.shregs.sprg1 = hr->sprg[1];
192 vcpu->arch.shregs.sprg2 = hr->sprg[2];
193 vcpu->arch.shregs.sprg3 = hr->sprg[3];
194 vcpu->arch.pid = hr->pidr;
195 vcpu->arch.cfar = hr->cfar;
196 vcpu->arch.ppr = hr->ppr;
202 vcpu->arch.trap = 0;
210 if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
212 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
214 gpr[vcpu->arch.io_gpr]);
215 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
225 struct kvmppc_vcore *vc = vcpu->arch.vcore;
232 if (vcpu->kvm->arch.l1_ptcr == 0)
235 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
267 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
273 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
288 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
289 saved_l1_regs = vcpu->arch.regs;
297 vcpu->arch.nested = l2;
298 vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
299 vcpu->arch.regs = l2_regs;
300 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
307 vcpu->arch.ret = RESUME_GUEST;
308 vcpu->arch.trap = 0;
311 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
319 l2_regs = vcpu->arch.regs;
320 l2_regs.msr = vcpu->arch.shregs.msr;
321 delta_purr = vcpu->arch.purr - l2_hv.purr;
322 delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
323 delta_ic = vcpu->arch.ic - l2_hv.ic;
325 save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
328 vcpu->arch.nested = NULL;
329 vcpu->arch.regs = saved_l1_regs;
330 vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
333 vcpu->arch.shregs.msr |= MSR_TS_S;
336 vcpu->arch.purr += delta_purr;
337 vcpu->arch.spurr += delta_spurr;
338 vcpu->arch.ic += delta_ic;
365 return vcpu->arch.trap;
456 kvm->arch.max_nested_lpid = -1;
481 kvm->arch.l1_ptcr = ptcr;
576 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
577 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
653 if (gp == kvm->arch.nested_guests[lpid]) {
654 kvm->arch.nested_guests[lpid] = NULL;
655 if (lpid == kvm->arch.max_nested_lpid) {
656 while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
658 kvm->arch.max_nested_lpid = lpid;
683 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
684 gp = kvm->arch.nested_guests[i];
687 kvm->arch.nested_guests[i] = NULL;
693 kvm->arch.max_nested_lpid = -1;
726 l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
730 gp = kvm->arch.nested_guests[l1_lpid];
742 if (kvm->arch.nested_guests[l1_lpid]) {
744 gp = kvm->arch.nested_guests[l1_lpid];
746 kvm->arch.nested_guests[l1_lpid] = newgp;
750 if (l1_lpid > kvm->arch.max_nested_lpid)
751 kvm->arch.max_nested_lpid = l1_lpid;
776 if (lpid > kvm->arch.max_nested_lpid)
778 return kvm->arch.nested_guests[lpid];
935 unsigned long *rmap = &memslot->arch.rmap[gfn];
945 unsigned long rmap, *rmapp = &free->arch.rmap[page];
1088 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1089 gp = kvm->arch.nested_guests[i];
1196 vcpu->arch.fault_gpa = fault_addr;
1210 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1228 vcpu->arch.fault_dsisr = flags;
1229 if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1230 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1231 vcpu->arch.shregs.msr |= flags;
1257 gpte.raddr, kvm->arch.lpid);
1310 unsigned long dsisr = vcpu->arch.fault_dsisr;
1311 unsigned long ea = vcpu->arch.fault_dar;
1327 n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1456 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1472 struct kvm_nested_guest *gp = vcpu->arch.nested;
1486 while (++lpid <= kvm->arch.max_nested_lpid) {
1487 if (kvm->arch.nested_guests[lpid]) {