Lines Matching refs:vcpu
28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
31 kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
43 kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
55 kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
79 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
81 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
85 vcpu->arch.mmio_vsx_copy_nums = 0;
86 vcpu->arch.mmio_vsx_offset = 0;
87 vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
88 vcpu->arch.mmio_sp64_extend = 0;
89 vcpu->arch.mmio_sign_extend = 0;
90 vcpu->arch.mmio_vmx_copy_nums = 0;
91 vcpu->arch.mmio_vmx_offset = 0;
92 vcpu->arch.mmio_host_swabbed = 0;
95 vcpu->arch.regs.msr = vcpu->arch.shared->msr;
96 if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
100 vcpu->mmio_is_write = OP_IS_STORE(type);
107 emulated = kvmppc_handle_loads(vcpu,
110 emulated = kvmppc_handle_load(vcpu,
114 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
120 if (kvmppc_check_fp_disabled(vcpu))
124 vcpu->arch.mmio_sp64_extend = 1;
127 emulated = kvmppc_handle_loads(vcpu,
130 emulated = kvmppc_handle_load(vcpu,
134 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
140 if (kvmppc_check_altivec_disabled(vcpu))
144 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
145 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
148 vcpu->arch.mmio_copy_type =
151 vcpu->arch.mmio_copy_type =
154 vcpu->arch.mmio_copy_type =
157 vcpu->arch.mmio_copy_type =
162 vcpu->arch.mmio_vmx_offset =
163 (vcpu->arch.vaddr_accessed & 0xf)/size;
166 vcpu->arch.mmio_vmx_copy_nums = 2;
167 emulated = kvmppc_handle_vmx_load(vcpu,
171 vcpu->arch.mmio_vmx_copy_nums = 1;
172 emulated = kvmppc_handle_vmx_load(vcpu,
183 if (kvmppc_check_altivec_disabled(vcpu))
186 if (kvmppc_check_vsx_disabled(vcpu))
191 vcpu->arch.mmio_sp64_extend = 1;
195 vcpu->arch.mmio_copy_type =
198 vcpu->arch.mmio_copy_type =
202 vcpu->arch.mmio_copy_type =
205 vcpu->arch.mmio_copy_type =
212 vcpu->arch.mmio_vsx_copy_nums = 1;
215 vcpu->arch.mmio_vsx_copy_nums =
220 emulated = kvmppc_handle_vsx_load(vcpu,
230 emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
233 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
238 if (kvmppc_check_fp_disabled(vcpu))
243 * from vcpu->arch.
245 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
246 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
250 vcpu->arch.mmio_sp64_extend = 1;
252 emulated = kvmppc_handle_store(vcpu,
253 VCPU_FPR(vcpu, op.reg), size, 1);
256 kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
262 if (kvmppc_check_altivec_disabled(vcpu))
266 vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
267 vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
269 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
270 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
273 vcpu->arch.mmio_copy_type =
276 vcpu->arch.mmio_copy_type =
279 vcpu->arch.mmio_copy_type =
282 vcpu->arch.mmio_copy_type =
287 vcpu->arch.mmio_vmx_offset =
288 (vcpu->arch.vaddr_accessed & 0xf)/size;
291 vcpu->arch.mmio_vmx_copy_nums = 2;
292 emulated = kvmppc_handle_vmx_store(vcpu,
295 vcpu->arch.mmio_vmx_copy_nums = 1;
296 emulated = kvmppc_handle_vmx_store(vcpu,
307 if (kvmppc_check_altivec_disabled(vcpu))
310 if (kvmppc_check_vsx_disabled(vcpu))
314 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
315 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
319 vcpu->arch.mmio_sp64_extend = 1;
322 vcpu->arch.mmio_copy_type =
325 vcpu->arch.mmio_copy_type =
332 vcpu->arch.mmio_vsx_copy_nums = 1;
335 vcpu->arch.mmio_vsx_copy_nums =
340 emulated = kvmppc_handle_vsx_store(vcpu,
359 trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
363 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));