Lines Matching refs:vcpu

25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
30 switch (vcpu->arch.sie_block->icptcode) {
37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
47 ilen = vcpu->arch.sie_block->pgmilc & 0x6;
53 static int handle_stop(struct kvm_vcpu *vcpu)
55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
59 vcpu->stat.exit_stop_request++;
62 if (kvm_s390_vcpu_has_irq(vcpu, 1))
68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
76 rc = kvm_s390_vcpu_store_status(vcpu,
86 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
87 kvm_s390_vcpu_stop(vcpu);
91 static int handle_validity(struct kvm_vcpu *vcpu)
93 int viwhy = vcpu->arch.sie_block->ipb >> 16;
95 vcpu->stat.exit_validity++;
96 trace_kvm_s390_intercept_validity(vcpu, viwhy);
98 current->pid, vcpu->kvm);
106 static int handle_instruction(struct kvm_vcpu *vcpu)
108 vcpu->stat.exit_instruction++;
109 trace_kvm_s390_intercept_instruction(vcpu,
110 vcpu->arch.sie_block->ipa,
111 vcpu->arch.sie_block->ipb);
113 switch (vcpu->arch.sie_block->ipa >> 8) {
115 return kvm_s390_handle_01(vcpu);
117 return kvm_s390_handle_lpsw(vcpu);
119 return kvm_s390_handle_diag(vcpu);
121 return kvm_s390_handle_aa(vcpu);
123 return kvm_s390_handle_sigp(vcpu);
125 return kvm_s390_handle_b2(vcpu);
127 return kvm_s390_handle_stctl(vcpu);
129 return kvm_s390_handle_lctl(vcpu);
131 return kvm_s390_handle_b9(vcpu);
133 return kvm_s390_handle_e3(vcpu);
135 return kvm_s390_handle_e5(vcpu);
137 return kvm_s390_handle_eb(vcpu);
143 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
146 .code = vcpu->arch.sie_block->iprcc,
151 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
162 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
170 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
178 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
179 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
180 pgm_info.op_access_id = vcpu->arch.sie_block->oai;
183 pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
184 pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
188 pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
191 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
192 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
198 if (vcpu->arch.sie_block->iprcc & PGM_PER) {
199 pgm_info.per_code = vcpu->arch.sie_block->perc;
200 pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
201 pgm_info.per_address = vcpu->arch.sie_block->peraddr;
202 pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
204 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
211 static int handle_itdb(struct kvm_vcpu *vcpu)
216 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
220 itdb = phys_to_virt(vcpu->arch.sie_block->itdba);
221 rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
231 static bool should_handle_per_event(const struct kvm_vcpu *vcpu)
233 if (!guestdbg_enabled(vcpu) || !per_event(vcpu))
235 if (guestdbg_sstep_enabled(vcpu) &&
236 vcpu->arch.sie_block->iprcc != PGM_PER) {
246 static int handle_prog(struct kvm_vcpu *vcpu)
251 vcpu->stat.exit_program_interruption++;
257 if (kvm_s390_pv_cpu_is_protected(vcpu))
260 if (should_handle_per_event(vcpu)) {
261 rc = kvm_s390_handle_per_event(vcpu);
265 if (vcpu->arch.sie_block->iprcc == 0)
269 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
270 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
271 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
278 rc = handle_itdb(vcpu);
282 return inject_prog_on_prog_intercept(vcpu);
287 * @vcpu: virtual cpu
302 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
304 u16 eic = vcpu->arch.sie_block->eic;
309 vcpu->stat.exit_external_interrupt++;
311 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
312 newpsw = vcpu->arch.sie_block->gpsw;
314 rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
336 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
337 rc = kvm_s390_inject_vcpu(vcpu, &irq);
346 return kvm_s390_inject_vcpu(vcpu, &irq);
351 * @vcpu: virtual cpu
358 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
363 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
366 rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg2],
369 return kvm_s390_inject_prog_cond(vcpu, rc);
370 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
375 rc = guest_translate_address_with_key(vcpu, vcpu->run->s.regs.gprs[reg1],
378 return kvm_s390_inject_prog_cond(vcpu, rc);
379 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
383 kvm_s390_retry_instr(vcpu);
388 static int handle_partial_execution(struct kvm_vcpu *vcpu)
390 vcpu->stat.exit_pei++;
392 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
393 return handle_mvpg_pei(vcpu);
394 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
395 return kvm_s390_handle_sigp_pei(vcpu);
405 int handle_sthyi(struct kvm_vcpu *vcpu)
411 if (!test_kvm_facility(vcpu->kvm, 74))
412 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
414 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
415 code = vcpu->run->s.regs.gprs[reg1];
416 addr = vcpu->run->s.regs.gprs[reg2];
418 vcpu->stat.instruction_sthyi++;
419 VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
420 trace_kvm_s390_handle_sthyi(vcpu, code, addr);
423 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
431 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
432 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
445 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
446 memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
448 r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
451 return kvm_s390_inject_prog_cond(vcpu, r);
457 vcpu->run->s.regs.gprs[reg2 + 1] = rc;
458 kvm_s390_set_psw_cc(vcpu, cc);
462 static int handle_operexc(struct kvm_vcpu *vcpu)
467 vcpu->stat.exit_operation_exception++;
468 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
469 vcpu->arch.sie_block->ipb);
471 if (vcpu->arch.sie_block->ipa == 0xb256)
472 return handle_sthyi(vcpu);
474 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
476 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
487 oldpsw = vcpu->arch.sie_block->gpsw;
495 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
498 static int handle_pv_spx(struct kvm_vcpu *vcpu)
500 u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
502 kvm_s390_set_prefix(vcpu, pref);
503 trace_kvm_s390_handle_prefix(vcpu, 1, pref);
507 static int handle_pv_sclp(struct kvm_vcpu *vcpu)
509 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
531 static int handle_pv_uvc(struct kvm_vcpu *vcpu)
533 struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
537 .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
547 rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
562 static int handle_pv_notification(struct kvm_vcpu *vcpu)
566 if (vcpu->arch.sie_block->ipa == 0xb210)
567 return handle_pv_spx(vcpu);
568 if (vcpu->arch.sie_block->ipa == 0xb220)
569 return handle_pv_sclp(vcpu);
570 if (vcpu->arch.sie_block->ipa == 0xb9a4)
571 return handle_pv_uvc(vcpu);
572 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
581 ret = kvm_s390_handle_sigp_pei(vcpu);
586 return handle_instruction(vcpu);
589 static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc)
592 if (!(vcpu->arch.sie_block->icptstatus & 0x02))
596 if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs)
602 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
606 if (kvm_is_ucontrol(vcpu->kvm))
609 switch (vcpu->arch.sie_block->icptcode) {
611 vcpu->stat.exit_external_request++;
614 vcpu->stat.exit_io_request++;
617 rc = handle_instruction(vcpu);
620 return handle_prog(vcpu);
622 return handle_external_interrupt(vcpu);
624 return kvm_s390_handle_wait(vcpu);
626 return handle_validity(vcpu);
628 return handle_stop(vcpu);
630 rc = handle_operexc(vcpu);
633 rc = handle_partial_execution(vcpu);
637 return kvm_s390_skey_check_enable(vcpu);
648 rc = handle_instruction(vcpu);
651 rc = handle_pv_notification(vcpu);
655 gmap_convert_to_secure(vcpu->arch.gmap,
656 kvm_s390_get_prefix(vcpu));
657 gmap_convert_to_secure(vcpu->arch.gmap,
658 kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
664 if (should_handle_per_ifetch(vcpu, rc))
665 per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);