Lines Matching refs:vcpu

32 static int handle_ri(struct kvm_vcpu *vcpu)
34 vcpu->stat.instruction_ri++;
36 if (test_kvm_facility(vcpu->kvm, 64)) {
37 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
38 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
39 kvm_s390_retry_instr(vcpu);
42 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
45 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
47 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
48 return handle_ri(vcpu);
53 static int handle_gs(struct kvm_vcpu *vcpu)
55 vcpu->stat.instruction_gs++;
57 if (test_kvm_facility(vcpu->kvm, 133)) {
58 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
61 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 vcpu->arch.sie_block->ecb |= ECB_GS;
65 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
66 vcpu->arch.gs_enabled = 1;
67 kvm_s390_retry_instr(vcpu);
70 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
73 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
75 int code = vcpu->arch.sie_block->ipb & 0xff;
78 return handle_gs(vcpu);
83 static int handle_set_clock(struct kvm_vcpu *vcpu)
90 vcpu->stat.instruction_sck++;
92 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
93 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
95 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
97 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
98 rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
100 return kvm_s390_inject_prog_cond(vcpu, rc);
102 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
104 * To set the TOD clock the kvm lock must be taken, but the vcpu lock
113 if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) {
114 kvm_s390_retry_instr(vcpu);
118 kvm_s390_set_psw_cc(vcpu, 0);
122 static int handle_set_prefix(struct kvm_vcpu *vcpu)
129 vcpu->stat.instruction_spx++;
131 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
132 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
134 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
138 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
141 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
143 return kvm_s390_inject_prog_cond(vcpu, rc);
152 if (kvm_is_error_gpa(vcpu->kvm, address))
153 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
155 kvm_s390_set_prefix(vcpu, address);
156 trace_kvm_s390_handle_prefix(vcpu, 1, address);
160 static int handle_store_prefix(struct kvm_vcpu *vcpu)
167 vcpu->stat.instruction_stpx++;
169 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
170 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
172 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
176 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
178 address = kvm_s390_get_prefix(vcpu);
181 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
183 return kvm_s390_inject_prog_cond(vcpu, rc);
185 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
186 trace_kvm_s390_handle_prefix(vcpu, 0, address);
190 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
192 u16 vcpu_id = vcpu->vcpu_id;
197 vcpu->stat.instruction_stap++;
199 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
200 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
202 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
205 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
207 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
209 return kvm_s390_inject_prog_cond(vcpu, rc);
211 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
212 trace_kvm_s390_handle_stap(vcpu, ga);
216 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
220 trace_kvm_s390_skey_related_inst(vcpu);
222 if (vcpu->arch.skey_enabled)
226 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
230 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
231 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
232 if (!vcpu->kvm->arch.use_skf)
233 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
235 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
236 vcpu->arch.skey_enabled = true;
240 static int try_handle_skey(struct kvm_vcpu *vcpu)
244 rc = kvm_s390_skey_check_enable(vcpu);
247 if (vcpu->kvm->arch.use_skf) {
249 kvm_s390_retry_instr(vcpu);
250 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
256 static int handle_iske(struct kvm_vcpu *vcpu)
264 vcpu->stat.instruction_iske++;
266 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
267 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
269 rc = try_handle_skey(vcpu);
273 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
275 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
276 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
277 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
278 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
280 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
296 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
299 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
300 vcpu->run->s.regs.gprs[reg1] |= key;
304 static int handle_rrbe(struct kvm_vcpu *vcpu)
311 vcpu->stat.instruction_rrbe++;
313 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
314 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
316 rc = try_handle_skey(vcpu);
320 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
322 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
323 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
324 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
325 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
327 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
342 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
345 kvm_s390_set_psw_cc(vcpu, rc);
353 static int handle_sske(struct kvm_vcpu *vcpu)
355 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
362 vcpu->stat.instruction_sske++;
364 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
365 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
367 rc = try_handle_skey(vcpu);
371 if (!test_kvm_facility(vcpu->kvm, 8))
373 if (!test_kvm_facility(vcpu->kvm, 10))
375 if (!test_kvm_facility(vcpu->kvm, 14))
378 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
380 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
381 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
382 start = kvm_s390_logical_to_effective(vcpu, start);
387 start = kvm_s390_real_to_abs(vcpu, start);
392 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
396 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
410 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
421 kvm_s390_set_psw_cc(vcpu, 3);
423 kvm_s390_set_psw_cc(vcpu, rc);
424 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
425 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
429 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
430 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
432 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
433 end = kvm_s390_logical_to_effective(vcpu, end);
434 vcpu->run->s.regs.gprs[reg2] |= end;
439 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
441 vcpu->stat.instruction_ipte_interlock++;
442 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
443 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
444 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu->kvm));
445 kvm_s390_retry_instr(vcpu);
446 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
450 static int handle_test_block(struct kvm_vcpu *vcpu)
455 vcpu->stat.instruction_tb++;
457 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
458 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
460 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
461 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
462 addr = kvm_s390_logical_to_effective(vcpu, addr);
463 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
464 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
465 addr = kvm_s390_real_to_abs(vcpu, addr);
467 if (kvm_is_error_gpa(vcpu->kvm, addr))
468 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
473 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
475 kvm_s390_set_psw_cc(vcpu, 0);
476 vcpu->run->s.regs.gprs[0] = 0;
480 static int handle_tpi(struct kvm_vcpu *vcpu)
489 vcpu->stat.instruction_tpi++;
491 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
493 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
495 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
497 kvm_s390_set_psw_cc(vcpu, 0);
510 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
512 rc = kvm_s390_inject_prog_cond(vcpu, rc);
521 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
530 kvm_s390_set_psw_cc(vcpu, 1);
538 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
546 static int handle_tsch(struct kvm_vcpu *vcpu)
551 vcpu->stat.instruction_tsch++;
554 if (vcpu->run->s.regs.gprs[1])
555 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
556 vcpu->run->s.regs.gprs[1]);
566 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
567 vcpu->run->s390_tsch.dequeued = !!inti;
569 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
570 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
571 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
572 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
574 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
579 static int handle_io_inst(struct kvm_vcpu *vcpu)
581 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
583 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
584 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
586 if (vcpu->kvm->arch.css_support) {
591 if (vcpu->arch.sie_block->ipa == 0xb236)
592 return handle_tpi(vcpu);
593 if (vcpu->arch.sie_block->ipa == 0xb235)
594 return handle_tsch(vcpu);
596 vcpu->stat.instruction_io_other++;
603 kvm_s390_set_psw_cc(vcpu, 3);
610 * @vcpu: the vcpu having issue the pqap instruction
623 static int handle_pqap(struct kvm_vcpu *vcpu)
635 if (!(vcpu->arch.sie_block->eca & ECA_APIE))
645 reg0 = vcpu->run->s.regs.gprs[0];
651 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
652 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
657 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
659 if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
660 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
662 if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
663 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
667 if (!test_kvm_facility(vcpu->kvm, 65))
668 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
675 down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
676 if (vcpu->kvm->arch.crypto.pqap_hook) {
677 pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
678 ret = pqap_hook(vcpu);
679 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
680 kvm_s390_set_psw_cc(vcpu, 3);
681 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
684 up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
691 memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
692 kvm_s390_set_psw_cc(vcpu, 3);
696 static int handle_stfl(struct kvm_vcpu *vcpu)
701 vcpu->stat.instruction_stfl++;
703 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
704 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
710 fac = *vcpu->kvm->arch.model.fac_list >> 32;
711 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
715 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
716 trace_kvm_s390_handle_stfl(vcpu, fac);
742 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
744 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
750 vcpu->stat.instruction_lpsw++;
753 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
755 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
757 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
759 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
761 return kvm_s390_inject_prog_cond(vcpu, rc);
763 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
768 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
772 static int handle_lpswe(struct kvm_vcpu *vcpu)
779 vcpu->stat.instruction_lpswe++;
781 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
782 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
784 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
786 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
787 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
789 return kvm_s390_inject_prog_cond(vcpu, rc);
790 vcpu->arch.sie_block->gpsw = new_psw;
791 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
792 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
796 static int handle_stidp(struct kvm_vcpu *vcpu)
798 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
803 vcpu->stat.instruction_stidp++;
805 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
806 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
808 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
811 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
813 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
815 return kvm_s390_inject_prog_cond(vcpu, rc);
817 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
821 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
826 cpus = atomic_read(&vcpu->kvm->online_vcpus);
848 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
851 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
852 vcpu->run->s390_stsi.addr = addr;
853 vcpu->run->s390_stsi.ar = ar;
854 vcpu->run->s390_stsi.fc = fc;
855 vcpu->run->s390_stsi.sel1 = sel1;
856 vcpu->run->s390_stsi.sel2 = sel2;
859 static int handle_stsi(struct kvm_vcpu *vcpu)
861 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
862 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
863 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
869 vcpu->stat.instruction_stsi++;
870 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
872 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
873 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
884 if (fc == 15 && (!test_kvm_facility(vcpu->kvm, 11) ||
885 !vcpu->kvm->arch.user_stsi))
888 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
889 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
890 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
893 vcpu->run->s.regs.gprs[0] = 3 << 28;
894 kvm_s390_set_psw_cc(vcpu, 0);
898 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
900 if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
901 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
918 handle_stsi_3_2_2(vcpu, (void *) mem);
921 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
922 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
925 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
926 memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
929 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
932 rc = kvm_s390_inject_prog_cond(vcpu, rc);
935 if (vcpu->kvm->arch.user_stsi) {
936 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
939 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
941 kvm_s390_set_psw_cc(vcpu, 0);
942 vcpu->run->s.regs.gprs[0] = 0;
945 kvm_s390_set_psw_cc(vcpu, 3);
951 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
953 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
955 return handle_stidp(vcpu);
957 return handle_set_clock(vcpu);
959 return handle_set_prefix(vcpu);
961 return handle_store_prefix(vcpu);
963 return handle_store_cpu_address(vcpu);
965 return kvm_s390_handle_vsie(vcpu);
968 return handle_ipte_interlock(vcpu);
970 return handle_iske(vcpu);
972 return handle_rrbe(vcpu);
974 return handle_sske(vcpu);
976 return handle_test_block(vcpu);
993 return handle_io_inst(vcpu);
995 return handle_sthyi(vcpu);
997 return handle_stsi(vcpu);
999 return handle_pqap(vcpu);
1001 return handle_stfl(vcpu);
1003 return handle_lpswe(vcpu);
1009 static int handle_epsw(struct kvm_vcpu *vcpu)
1013 vcpu->stat.instruction_epsw++;
1015 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
1018 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
1019 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
1021 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
1022 vcpu->run->s.regs.gprs[reg2] |=
1023 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
1038 static int handle_pfmf(struct kvm_vcpu *vcpu)
1045 vcpu->stat.instruction_pfmf++;
1047 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
1049 if (!test_kvm_facility(vcpu->kvm, 8))
1050 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1052 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1053 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1055 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1056 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1059 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1060 !test_kvm_facility(vcpu->kvm, 14))
1061 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1064 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1065 test_kvm_facility(vcpu->kvm, 10)) {
1066 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1067 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1070 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1071 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1072 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1073 start = kvm_s390_logical_to_effective(vcpu, start);
1075 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1076 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1077 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1080 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1083 start = kvm_s390_real_to_abs(vcpu, start);
1092 if (!test_kvm_facility(vcpu->kvm, 78) ||
1093 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1094 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1098 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1106 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1108 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1110 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1111 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1112 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1115 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1116 int rc = kvm_s390_skey_check_enable(vcpu);
1130 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1138 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1139 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1140 vcpu->run->s.regs.gprs[reg2] = end;
1142 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1143 end = kvm_s390_logical_to_effective(vcpu, end);
1144 vcpu->run->s.regs.gprs[reg2] |= end;
1153 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1164 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1165 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1166 hva = gfn_to_hva(vcpu->kvm, gfn);
1167 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1170 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1172 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1175 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1192 vcpu->run->s.regs.gprs[r1] = res;
1200 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1205 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1209 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1215 static int handle_essa(struct kvm_vcpu *vcpu)
1218 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1223 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1224 gmap = vcpu->arch.gmap;
1225 vcpu->stat.instruction_essa++;
1226 if (!vcpu->kvm->arch.use_cmma)
1227 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1229 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1230 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1232 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1234 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1236 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1238 if (!vcpu->kvm->arch.migration_mode) {
1248 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1249 mmap_write_lock(vcpu->kvm->mm);
1250 vcpu->kvm->mm->context.uses_cmm = 1;
1251 mmap_write_unlock(vcpu->kvm->mm);
1262 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1264 kvm_s390_retry_instr(vcpu);
1268 mmap_read_lock(vcpu->kvm->mm);
1269 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1270 i = __do_essa(vcpu, orc);
1271 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1272 mmap_read_unlock(vcpu->kvm->mm);
1278 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1279 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1287 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1289 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1293 return handle_ipte_interlock(vcpu);
1295 return handle_epsw(vcpu);
1297 return handle_essa(vcpu);
1299 return handle_pfmf(vcpu);
1305 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1307 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1308 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1314 vcpu->stat.instruction_lctl++;
1316 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1317 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1319 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1322 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1324 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1325 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1328 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1330 return kvm_s390_inject_prog_cond(vcpu, rc);
1334 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1335 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1340 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1344 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1346 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1347 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1353 vcpu->stat.instruction_stctl++;
1355 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1356 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1358 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1361 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1363 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1364 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1369 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1374 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1375 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1378 static int handle_lctlg(struct kvm_vcpu *vcpu)
1380 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1381 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1387 vcpu->stat.instruction_lctlg++;
1389 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1390 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1392 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1395 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1397 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1398 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1401 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1403 return kvm_s390_inject_prog_cond(vcpu, rc);
1407 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1412 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1416 static int handle_stctg(struct kvm_vcpu *vcpu)
1418 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1419 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1425 vcpu->stat.instruction_stctg++;
1427 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1428 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1430 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1433 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1435 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1436 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1441 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1446 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1447 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1450 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1452 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1454 return handle_stctg(vcpu);
1456 return handle_lctlg(vcpu);
1460 return handle_ri(vcpu);
1466 static int handle_tprot(struct kvm_vcpu *vcpu)
1475 vcpu->stat.instruction_tprot++;
1477 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1478 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1480 kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL);
1483 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1484 ipte_lock(vcpu->kvm);
1486 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1489 gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1493 ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1513 kvm_s390_set_psw_cc(vcpu, cc);
1516 ret = kvm_s390_inject_program_int(vcpu, ret);
1520 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1521 ipte_unlock(vcpu->kvm);
1525 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1527 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1529 return handle_tprot(vcpu);
1535 static int handle_sckpf(struct kvm_vcpu *vcpu)
1539 vcpu->stat.instruction_sckpf++;
1541 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1542 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1544 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1545 return kvm_s390_inject_program_int(vcpu,
1548 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1549 vcpu->arch.sie_block->todpr = value;
1554 static int handle_ptff(struct kvm_vcpu *vcpu)
1556 vcpu->stat.instruction_ptff++;
1559 kvm_s390_set_psw_cc(vcpu, 3);
1563 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1565 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1567 return handle_ptff(vcpu);
1569 return handle_sckpf(vcpu);