Lines Matching refs:vcpu

25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
30 switch (vcpu->arch.sie_block->icptcode) {
37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
47 ilen = vcpu->arch.sie_block->pgmilc & 0x6;
53 static int handle_stop(struct kvm_vcpu *vcpu)
55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
59 vcpu->stat.exit_stop_request++;
62 if (kvm_s390_vcpu_has_irq(vcpu, 1))
68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
76 rc = kvm_s390_vcpu_store_status(vcpu,
86 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
87 kvm_s390_vcpu_stop(vcpu);
91 static int handle_validity(struct kvm_vcpu *vcpu)
93 int viwhy = vcpu->arch.sie_block->ipb >> 16;
95 vcpu->stat.exit_validity++;
96 trace_kvm_s390_intercept_validity(vcpu, viwhy);
98 current->pid, vcpu->kvm);
106 static int handle_instruction(struct kvm_vcpu *vcpu)
108 vcpu->stat.exit_instruction++;
109 trace_kvm_s390_intercept_instruction(vcpu,
110 vcpu->arch.sie_block->ipa,
111 vcpu->arch.sie_block->ipb);
113 switch (vcpu->arch.sie_block->ipa >> 8) {
115 return kvm_s390_handle_01(vcpu);
117 return kvm_s390_handle_lpsw(vcpu);
119 return kvm_s390_handle_diag(vcpu);
121 return kvm_s390_handle_aa(vcpu);
123 return kvm_s390_handle_sigp(vcpu);
125 return kvm_s390_handle_b2(vcpu);
127 return kvm_s390_handle_stctl(vcpu);
129 return kvm_s390_handle_lctl(vcpu);
131 return kvm_s390_handle_b9(vcpu);
133 return kvm_s390_handle_e3(vcpu);
135 return kvm_s390_handle_e5(vcpu);
137 return kvm_s390_handle_eb(vcpu);
143 static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
146 .code = vcpu->arch.sie_block->iprcc,
151 switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
162 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
170 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
178 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
179 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
180 pgm_info.op_access_id = vcpu->arch.sie_block->oai;
183 pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
184 pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
188 pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
191 pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
192 pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
198 if (vcpu->arch.sie_block->iprcc & PGM_PER) {
199 pgm_info.per_code = vcpu->arch.sie_block->perc;
200 pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
201 pgm_info.per_address = vcpu->arch.sie_block->peraddr;
202 pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
204 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
211 static int handle_itdb(struct kvm_vcpu *vcpu)
216 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
220 itdb = (struct kvm_s390_itdb *)vcpu->arch.sie_block->itdba;
221 rc = write_guest_lc(vcpu, __LC_PGM_TDB, itdb, sizeof(*itdb));
229 #define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
231 static int handle_prog(struct kvm_vcpu *vcpu)
236 vcpu->stat.exit_program_interruption++;
242 if (kvm_s390_pv_cpu_is_protected(vcpu))
245 if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
246 rc = kvm_s390_handle_per_event(vcpu);
250 if (vcpu->arch.sie_block->iprcc == 0)
254 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
255 if (vcpu->arch.sie_block->iprcc == PGM_SPECIFICATION) {
256 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &psw, sizeof(psw_t));
263 rc = handle_itdb(vcpu);
267 return inject_prog_on_prog_intercept(vcpu);
286 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
288 u16 eic = vcpu->arch.sie_block->eic;
293 vcpu->stat.exit_external_interrupt++;
295 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
296 newpsw = vcpu->arch.sie_block->gpsw;
298 rc = read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &newpsw, sizeof(psw_t));
320 irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
321 rc = kvm_s390_inject_vcpu(vcpu, &irq);
330 return kvm_s390_inject_vcpu(vcpu, &irq);
341 static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
346 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
349 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
352 return kvm_s390_inject_prog_cond(vcpu, rc);
353 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
358 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
361 return kvm_s390_inject_prog_cond(vcpu, rc);
362 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
366 kvm_s390_retry_instr(vcpu);
371 static int handle_partial_execution(struct kvm_vcpu *vcpu)
373 vcpu->stat.exit_pei++;
375 if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */
376 return handle_mvpg_pei(vcpu);
377 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */
378 return kvm_s390_handle_sigp_pei(vcpu);
388 int handle_sthyi(struct kvm_vcpu *vcpu)
394 if (!test_kvm_facility(vcpu->kvm, 74))
395 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
397 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
398 code = vcpu->run->s.regs.gprs[reg1];
399 addr = vcpu->run->s.regs.gprs[reg2];
401 vcpu->stat.instruction_sthyi++;
402 VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
403 trace_kvm_s390_handle_sthyi(vcpu, code, addr);
406 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
414 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (addr & ~PAGE_MASK))
415 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
428 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
429 memcpy((void *)(sida_origin(vcpu->arch.sie_block)),
432 r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
435 return kvm_s390_inject_prog_cond(vcpu, r);
441 vcpu->run->s.regs.gprs[reg2 + 1] = rc;
442 kvm_s390_set_psw_cc(vcpu, cc);
446 static int handle_operexc(struct kvm_vcpu *vcpu)
451 vcpu->stat.exit_operation_exception++;
452 trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
453 vcpu->arch.sie_block->ipb);
455 if (vcpu->arch.sie_block->ipa == 0xb256)
456 return handle_sthyi(vcpu);
458 if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
460 rc = read_guest_lc(vcpu, __LC_PGM_NEW_PSW, &newpsw, sizeof(psw_t));
471 oldpsw = vcpu->arch.sie_block->gpsw;
479 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
482 static int handle_pv_spx(struct kvm_vcpu *vcpu)
484 u32 pref = *(u32 *)vcpu->arch.sie_block->sidad;
486 kvm_s390_set_prefix(vcpu, pref);
487 trace_kvm_s390_handle_prefix(vcpu, 1, pref);
491 static int handle_pv_sclp(struct kvm_vcpu *vcpu)
493 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
515 static int handle_pv_uvc(struct kvm_vcpu *vcpu)
517 struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad;
521 .guest_handle = kvm_s390_pv_get_handle(vcpu->kvm),
531 rc = gmap_make_secure(vcpu->arch.gmap, uvcb.gaddr, &uvcb);
541 static int handle_pv_notification(struct kvm_vcpu *vcpu)
545 if (vcpu->arch.sie_block->ipa == 0xb210)
546 return handle_pv_spx(vcpu);
547 if (vcpu->arch.sie_block->ipa == 0xb220)
548 return handle_pv_sclp(vcpu);
549 if (vcpu->arch.sie_block->ipa == 0xb9a4)
550 return handle_pv_uvc(vcpu);
551 if (vcpu->arch.sie_block->ipa >> 8 == 0xae) {
560 ret = kvm_s390_handle_sigp_pei(vcpu);
565 return handle_instruction(vcpu);
568 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
572 if (kvm_is_ucontrol(vcpu->kvm))
575 switch (vcpu->arch.sie_block->icptcode) {
577 vcpu->stat.exit_external_request++;
580 vcpu->stat.exit_io_request++;
583 rc = handle_instruction(vcpu);
586 return handle_prog(vcpu);
588 return handle_external_interrupt(vcpu);
590 return kvm_s390_handle_wait(vcpu);
592 return handle_validity(vcpu);
594 return handle_stop(vcpu);
596 rc = handle_operexc(vcpu);
599 rc = handle_partial_execution(vcpu);
602 rc = kvm_s390_skey_check_enable(vcpu);
614 rc = handle_instruction(vcpu);
617 rc = handle_pv_notification(vcpu);
621 gmap_convert_to_secure(vcpu->arch.gmap,
622 kvm_s390_get_prefix(vcpu));
623 gmap_convert_to_secure(vcpu->arch.gmap,
624 kvm_s390_get_prefix(vcpu) + PAGE_SIZE);
631 if (vcpu->arch.sie_block->icptstatus & 0x02 &&
633 per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);