Lines Matching refs:arch

285 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
290 if (vcpu->arch.cputm_enabled)
291 vcpu->arch.cputm_start += *delta;
292 if (vcpu->arch.vsie_block)
293 kvm_clock_sync_scb(vcpu->arch.vsie_block,
597 struct gmap *gmap = kvm->arch.gmap;
680 kvm->arch.use_irqchip = 1;
685 kvm->arch.user_sigp = 1;
693 set_kvm_facility(kvm->arch.model.fac_mask, 129);
694 set_kvm_facility(kvm->arch.model.fac_list, 129);
696 set_kvm_facility(kvm->arch.model.fac_mask, 134);
697 set_kvm_facility(kvm->arch.model.fac_list, 134);
700 set_kvm_facility(kvm->arch.model.fac_mask, 135);
701 set_kvm_facility(kvm->arch.model.fac_list, 135);
704 set_kvm_facility(kvm->arch.model.fac_mask, 148);
705 set_kvm_facility(kvm->arch.model.fac_list, 148);
708 set_kvm_facility(kvm->arch.model.fac_mask, 152);
709 set_kvm_facility(kvm->arch.model.fac_list, 152);
724 set_kvm_facility(kvm->arch.model.fac_mask, 64);
725 set_kvm_facility(kvm->arch.model.fac_list, 64);
737 set_kvm_facility(kvm->arch.model.fac_mask, 72);
738 set_kvm_facility(kvm->arch.model.fac_list, 72);
751 set_kvm_facility(kvm->arch.model.fac_mask, 133);
752 set_kvm_facility(kvm->arch.model.fac_list, 133);
763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
775 kvm->arch.use_skf = 0;
776 kvm->arch.use_pfmfi = 0;
784 kvm->arch.user_stsi = 1;
789 kvm->arch.user_instr0 = 1;
808 kvm->arch.mem_limit);
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
836 kvm->arch.use_cmma = 1;
838 kvm->arch.use_pfmfi = 0;
848 if (!kvm->arch.use_cmma)
854 s390_reset_cmma(kvm->arch.gmap->mm);
868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
869 new_limit > kvm->arch.mem_limit)
888 gmap_remove(kvm->arch.gmap);
890 kvm->arch.gmap = new;
897 (void *) kvm->arch.gmap->asce);
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
937 kvm->arch.crypto.aes_kw = 1;
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
948 kvm->arch.crypto.dea_kw = 1;
956 kvm->arch.crypto.aes_kw = 0;
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
966 kvm->arch.crypto.dea_kw = 0;
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
976 kvm->arch.crypto.apie = 1;
983 kvm->arch.crypto.apie = 0;
1016 if (kvm->arch.migration_mode)
1022 if (!kvm->arch.use_cmma) {
1023 kvm->arch.migration_mode = 1;
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1041 kvm->arch.migration_mode = 1;
1053 if (!kvm->arch.migration_mode)
1055 kvm->arch.migration_mode = 0;
1056 if (kvm->arch.use_cmma)
1085 u64 mig = kvm->arch.migration_mode;
1188 gtod->tod = htod.tod + kvm->arch.epoch;
1191 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1279 kvm->arch.model.cpuid = proc->cpuid;
1284 kvm->arch.model.ibc = unblocked_ibc;
1286 kvm->arch.model.ibc = lowest_ibc;
1288 kvm->arch.model.ibc = proc->ibc;
1290 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1293 kvm->arch.model.ibc,
1294 kvm->arch.model.cpuid);
1296 kvm->arch.model.fac_list[0],
1297 kvm->arch.model.fac_list[1],
1298 kvm->arch.model.fac_list[2]);
1324 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1343 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1351 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1352 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1356 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1365 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1371 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1374 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1389 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1398 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1399 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1400 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1401 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1403 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1404 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1405 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1406 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1439 proc->cpuid = kvm->arch.model.cpuid;
1440 proc->ibc = kvm->arch.model.ibc;
1441 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1444 kvm->arch.model.ibc,
1445 kvm->arch.model.cpuid);
1447 kvm->arch.model.fac_list[0],
1448 kvm->arch.model.fac_list[1],
1449 kvm->arch.model.fac_list[2]);
1469 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1474 kvm->arch.model.ibc,
1475 kvm->arch.model.cpuid);
1496 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1527 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1532 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1533 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1537 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1546 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1552 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1555 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1570 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1579 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1580 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1581 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1582 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1584 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1585 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1586 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1587 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
2046 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2087 if (!kvm->arch.use_cmma)
2094 if (!peek && !kvm->arch.migration_mode)
2103 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2121 if (kvm->arch.migration_mode)
2122 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2147 if (!kvm->arch.use_cmma)
2277 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2296 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2406 if (kvm->arch.use_irqchip) {
2485 kvm->arch.user_cpu_state_ctrl = 1;
2537 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2540 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2547 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2549 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2555 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2560 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2597 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2598 sizeof(kvm->arch.crypto.crycb->apcb0));
2599 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2600 sizeof(kvm->arch.crypto.crycb->apcb1));
2621 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
2628 kvm->arch.crypto.aes_kw = 1;
2629 kvm->arch.crypto.dea_kw = 1;
2630 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2631 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2632 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2633 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
2638 if (kvm->arch.use_esca)
2639 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
2641 free_page((unsigned long)(kvm->arch.sca));
2642 kvm->arch.sca = NULL;
2671 rwlock_init(&kvm->arch.sca_lock);
2673 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
2674 if (!kvm->arch.sca)
2680 kvm->arch.sca = (struct bsca_block *)
2681 ((char *) kvm->arch.sca + sca_offset);
2686 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
2687 if (!kvm->arch.dbf)
2691 kvm->arch.sie_page2 =
2693 if (!kvm->arch.sie_page2)
2696 kvm->arch.sie_page2->kvm = kvm;
2697 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
2700 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2703 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2706 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
2709 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2710 set_kvm_facility(kvm->arch.model.fac_list, 138);
2712 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2713 set_kvm_facility(kvm->arch.model.fac_list, 74);
2715 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2716 set_kvm_facility(kvm->arch.model.fac_list, 147);
2720 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2722 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
2723 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
2727 mutex_init(&kvm->arch.float_int.ais_lock);
2728 spin_lock_init(&kvm->arch.float_int.lock);
2730 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
2731 init_waitqueue_head(&kvm->arch.ipte_wq);
2732 mutex_init(&kvm->arch.ipte_mutex);
2734 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
2738 kvm->arch.gmap = NULL;
2739 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
2742 kvm->arch.mem_limit = TASK_SIZE_MAX;
2744 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
2746 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
2747 if (!kvm->arch.gmap)
2749 kvm->arch.gmap->private = kvm;
2750 kvm->arch.gmap->pfault_enabled = 0;
2753 kvm->arch.use_pfmfi = sclp.has_pfmfi;
2754 kvm->arch.use_skf = sclp.has_skey;
2755 spin_lock_init(&kvm->arch.start_stop_lock);
2763 free_page((unsigned long)kvm->arch.sie_page2);
2764 debug_unregister(kvm->arch.dbf);
2782 gmap_remove(vcpu->arch.gmap);
2784 if (vcpu->kvm->arch.use_cmma)
2789 free_page((unsigned long)(vcpu->arch.sie_block));
2823 debug_unregister(kvm->arch.dbf);
2824 free_page((unsigned long)kvm->arch.sie_page2);
2826 gmap_remove(kvm->arch.gmap);
2836 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
2837 if (!vcpu->arch.gmap)
2839 vcpu->arch.gmap->private = vcpu->kvm;
2848 read_lock(&vcpu->kvm->arch.sca_lock);
2849 if (vcpu->kvm->arch.use_esca) {
2850 struct esca_block *sca = vcpu->kvm->arch.sca;
2855 struct bsca_block *sca = vcpu->kvm->arch.sca;
2860 read_unlock(&vcpu->kvm->arch.sca_lock);
2866 struct bsca_block *sca = vcpu->kvm->arch.sca;
2869 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2870 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2873 read_lock(&vcpu->kvm->arch.sca_lock);
2874 if (vcpu->kvm->arch.use_esca) {
2875 struct esca_block *sca = vcpu->kvm->arch.sca;
2877 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2878 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2879 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2880 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2883 struct bsca_block *sca = vcpu->kvm->arch.sca;
2885 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2886 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2887 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2890 read_unlock(&vcpu->kvm->arch.sca_lock);
2913 struct bsca_block *old_sca = kvm->arch.sca;
2919 if (kvm->arch.use_esca)
2930 write_lock(&kvm->arch.sca_lock);
2935 vcpu->arch.sie_block->scaoh = scaoh;
2936 vcpu->arch.sie_block->scaol = scaol;
2937 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2939 kvm->arch.sca = new_sca;
2940 kvm->arch.use_esca = 1;
2942 write_unlock(&kvm->arch.sca_lock);
2948 old_sca, kvm->arch.sca);
2967 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2976 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2977 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2978 vcpu->arch.cputm_start = get_tod_clock_fast();
2979 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2985 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2986 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2987 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2988 vcpu->arch.cputm_start = 0;
2989 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2995 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2996 vcpu->arch.cputm_enabled = true;
3003 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3005 vcpu->arch.cputm_enabled = false;
3026 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3027 if (vcpu->arch.cputm_enabled)
3028 vcpu->arch.cputm_start = get_tod_clock_fast();
3029 vcpu->arch.sie_block->cputm = cputm;
3030 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3040 if (unlikely(!vcpu->arch.cputm_enabled))
3041 return vcpu->arch.sie_block->cputm;
3045 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3051 value = vcpu->arch.sie_block->cputm;
3053 if (likely(vcpu->arch.cputm_start))
3054 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3055 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3063 gmap_enable(vcpu->arch.enabled_gmap);
3065 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3073 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3076 vcpu->arch.enabled_gmap = gmap_get_enabled();
3077 gmap_disable(vcpu->arch.enabled_gmap);
3085 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3086 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3090 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3093 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3094 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3096 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3101 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3124 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3127 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3128 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3129 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3130 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3132 if (vcpu->kvm->arch.crypto.apie)
3133 vcpu->arch.sie_block->eca |= ECA_APIE;
3136 if (vcpu->kvm->arch.crypto.aes_kw) {
3137 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3140 vcpu->arch.sie_block->ecd |= ECD_ECC;
3143 if (vcpu->kvm->arch.crypto.dea_kw)
3144 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3149 free_page(vcpu->arch.sie_block->cbrlo);
3150 vcpu->arch.sie_block->cbrlo = 0;
3155 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
3156 if (!vcpu->arch.sie_block->cbrlo)
3163 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3165 vcpu->arch.sie_block->ibc = model->ibc;
3167 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
3175 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3188 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3190 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3192 vcpu->arch.sie_block->ecb |= ECB_TE;
3194 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3195 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3197 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3198 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3200 vcpu->arch.sie_block->eca |= ECA_CEI;
3202 vcpu->arch.sie_block->eca |= ECA_IB;
3204 vcpu->arch.sie_block->eca |= ECA_SII;
3206 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3208 vcpu->arch.sie_block->eca |= ECA_VX;
3209 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3212 vcpu->arch.sie_block->ecd |= ECD_MEF;
3214 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3215 if (vcpu->arch.sie_block->gd) {
3216 vcpu->arch.sie_block->eca |= ECA_AIV;
3218 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3220 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3222 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
3227 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3229 if (vcpu->kvm->arch.use_cmma) {
3234 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3235 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3237 vcpu->arch.sie_block->hpid = HPID_KVM;
3269 vcpu->arch.sie_block = &sie_page->sie_block;
3270 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3273 vcpu->arch.sie_block->mso = 0;
3274 vcpu->arch.sie_block->msl = sclp.hamax;
3276 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3277 spin_lock_init(&vcpu->arch.local_int.lock);
3278 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
3279 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3280 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
3281 seqcount_init(&vcpu->arch.cputm_seqcount);
3283 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3316 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3317 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3326 gmap_remove(vcpu->arch.gmap);
3328 free_page((unsigned long)(vcpu->arch.sie_block));
3334 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
3340 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
3345 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3351 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
3356 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3362 return atomic_read(&vcpu->arch.sie_block->prog20) &
3368 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
3379 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3439 r = put_user(vcpu->arch.sie_block->todpr,
3443 r = put_user(vcpu->arch.sie_block->epoch,
3451 r = put_user(vcpu->arch.sie_block->ckc,
3455 r = put_user(vcpu->arch.pfault_token,
3459 r = put_user(vcpu->arch.pfault_compare,
3463 r = put_user(vcpu->arch.pfault_select,
3467 r = put_user(vcpu->arch.sie_block->pp,
3471 r = put_user(vcpu->arch.sie_block->gbea,
3489 r = get_user(vcpu->arch.sie_block->todpr,
3493 r = get_user(vcpu->arch.sie_block->epoch,
3502 r = get_user(vcpu->arch.sie_block->ckc,
3506 r = get_user(vcpu->arch.pfault_token,
3508 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3512 r = get_user(vcpu->arch.pfault_compare,
3516 r = get_user(vcpu->arch.pfault_select,
3520 r = get_user(vcpu->arch.sie_block->pp,
3524 r = get_user(vcpu->arch.sie_block->gbea,
3536 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3537 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3555 vcpu->arch.sie_block->gpsw.mask = 0;
3556 vcpu->arch.sie_block->gpsw.addr = 0;
3559 vcpu->arch.sie_block->ckc = 0;
3560 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3561 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3562 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3583 vcpu->arch.sie_block->gbea = 1;
3584 vcpu->arch.sie_block->pp = 0;
3585 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3586 vcpu->arch.sie_block->todpr = 0;
3628 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
3640 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
3731 vcpu->arch.guestdbg.last_bp = 0;
3768 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3814 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3825 vcpu->arch.sie_block->ihcpu = 0xffff;
3846 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3856 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3865 if ((vcpu->kvm->arch.use_cmma) &&
3867 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3889 kvm->arch.epoch = gtod->tod - htod.tod;
3890 kvm->arch.epdx = 0;
3892 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3893 if (kvm->arch.epoch > gtod->tod)
3894 kvm->arch.epdx -= 1;
3899 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3900 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3928 return gmap_fault(vcpu->arch.gmap, gpa,
3952 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3953 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3961 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3962 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3983 struct kvm_arch_async_pf arch;
3985 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3987 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3988 vcpu->arch.pfault_compare)
3994 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
3996 if (!vcpu->arch.gmap->pfault_enabled)
4001 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4004 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4018 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4019 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4039 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
4041 vcpu->arch.sie_block->icptcode = 0;
4042 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4068 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4077 pgm_info = vcpu->arch.pgm;
4091 vcpu->arch.sie_block->icptcode);
4092 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4097 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4098 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4102 sie_page = container_of(vcpu->arch.sie_block,
4109 if (vcpu->arch.sie_block->icptcode > 0) {
4115 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4116 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4117 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4142 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4169 exit_reason = sie64a(vcpu->arch.sie_block,
4181 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4182 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4183 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4207 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4208 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4210 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4211 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4212 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4215 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4216 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4217 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4218 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4222 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4223 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4232 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4234 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4243 !vcpu->arch.gs_enabled) {
4245 vcpu->arch.sie_block->ecb |= ECB_GS;
4246 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4247 vcpu->arch.gs_enabled = 1;
4251 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4252 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4258 vcpu->arch.host_gscb = current->thread.gs_cb;
4259 save_gs_cb(vcpu->arch.host_gscb);
4261 if (vcpu->arch.gs_enabled) {
4278 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4284 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4286 save_access_regs(vcpu->arch.host_acrs);
4290 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4291 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4314 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4315 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4326 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4327 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4328 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4329 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4330 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
4334 if (vcpu->arch.gs_enabled)
4336 current->thread.gs_cb = vcpu->arch.host_gscb;
4337 restore_gs_cb(vcpu->arch.host_gscb);
4338 if (!vcpu->arch.host_gscb)
4340 vcpu->arch.host_gscb = NULL;
4350 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4351 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4353 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4355 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4356 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4357 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4358 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4360 restore_access_regs(vcpu->arch.host_acrs);
4365 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4366 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4476 &vcpu->arch.sie_block->gpsw, 16);
4482 &vcpu->arch.sie_block->todpr, 4);
4486 clkcomp = vcpu->arch.sie_block->ckc >> 8;
4492 &vcpu->arch.sie_block->gcr, 128);
4543 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4550 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4579 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4585 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4599 spin_lock(&vcpu->kvm->arch.start_stop_lock);
4606 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4637 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4651 if (!vcpu->kvm->arch.css_support) {
4652 vcpu->kvm->arch.css_support = 1;
4675 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4682 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4688 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4747 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4892 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4909 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4915 r = gmap_fault(vcpu->arch.gmap, arg, 0);
4983 vmf->page = virt_to_page(vcpu->arch.sie_block);
5008 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5015 if (!kvm->arch.migration_mode)
5044 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5048 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5054 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,