Lines Matching refs:arch

309 			kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
311 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
312 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
314 if (vcpu->arch.cputm_enabled)
315 vcpu->arch.cputm_start += *delta;
316 if (vcpu->arch.vsie_block)
317 kvm_clock_sync_scb(vcpu->arch.vsie_block,
675 struct gmap *gmap = kvm->arch.gmap;
758 kvm->arch.use_irqchip = 1;
763 kvm->arch.user_sigp = 1;
771 set_kvm_facility(kvm->arch.model.fac_mask, 129);
772 set_kvm_facility(kvm->arch.model.fac_list, 129);
774 set_kvm_facility(kvm->arch.model.fac_mask, 134);
775 set_kvm_facility(kvm->arch.model.fac_list, 134);
778 set_kvm_facility(kvm->arch.model.fac_mask, 135);
779 set_kvm_facility(kvm->arch.model.fac_list, 135);
782 set_kvm_facility(kvm->arch.model.fac_mask, 148);
783 set_kvm_facility(kvm->arch.model.fac_list, 148);
786 set_kvm_facility(kvm->arch.model.fac_mask, 152);
787 set_kvm_facility(kvm->arch.model.fac_list, 152);
790 set_kvm_facility(kvm->arch.model.fac_mask, 192);
791 set_kvm_facility(kvm->arch.model.fac_list, 192);
806 set_kvm_facility(kvm->arch.model.fac_mask, 64);
807 set_kvm_facility(kvm->arch.model.fac_list, 64);
819 set_kvm_facility(kvm->arch.model.fac_mask, 72);
820 set_kvm_facility(kvm->arch.model.fac_list, 72);
833 set_kvm_facility(kvm->arch.model.fac_mask, 133);
834 set_kvm_facility(kvm->arch.model.fac_list, 133);
845 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
857 kvm->arch.use_skf = 0;
858 kvm->arch.use_pfmfi = 0;
866 kvm->arch.user_stsi = 1;
871 kvm->arch.user_instr0 = 1;
881 set_kvm_facility(kvm->arch.model.fac_mask, 11);
882 set_kvm_facility(kvm->arch.model.fac_list, 11);
904 kvm->arch.mem_limit);
905 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
932 kvm->arch.use_cmma = 1;
934 kvm->arch.use_pfmfi = 0;
944 if (!kvm->arch.use_cmma)
950 s390_reset_cmma(kvm->arch.gmap->mm);
964 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
965 new_limit > kvm->arch.mem_limit)
984 gmap_remove(kvm->arch.gmap);
986 kvm->arch.gmap = new;
993 (void *) kvm->arch.gmap->asce);
1031 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1032 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1033 kvm->arch.crypto.aes_kw = 1;
1042 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1043 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1044 kvm->arch.crypto.dea_kw = 1;
1052 kvm->arch.crypto.aes_kw = 0;
1053 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1054 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1062 kvm->arch.crypto.dea_kw = 0;
1063 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1064 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1072 kvm->arch.crypto.apie = 1;
1079 kvm->arch.crypto.apie = 0;
1094 if (!vcpu->kvm->arch.use_zpci_interp)
1097 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1098 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1115 kvm->arch.use_zpci_interp = 1;
1148 if (kvm->arch.migration_mode)
1154 if (!kvm->arch.use_cmma) {
1155 kvm->arch.migration_mode = 1;
1171 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1172 kvm->arch.migration_mode = 1;
1184 if (!kvm->arch.migration_mode)
1186 kvm->arch.migration_mode = 0;
1187 if (kvm->arch.use_cmma)
1216 u64 mig = kvm->arch.migration_mode;
1319 gtod->tod = clk.tod + kvm->arch.epoch;
1322 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1410 kvm->arch.model.cpuid = proc->cpuid;
1415 kvm->arch.model.ibc = unblocked_ibc;
1417 kvm->arch.model.ibc = lowest_ibc;
1419 kvm->arch.model.ibc = proc->ibc;
1421 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1424 kvm->arch.model.ibc,
1425 kvm->arch.model.cpuid);
1427 kvm->arch.model.fac_list[0],
1428 kvm->arch.model.fac_list[1],
1429 kvm->arch.model.fac_list[2]);
1455 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1473 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1481 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1482 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1483 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1484 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1486 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1487 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1489 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1492 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1493 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1495 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1496 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1498 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1501 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1502 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1504 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1505 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1513 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1516 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1519 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1528 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1530 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1533 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1536 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1566 kvm->arch.model.uv_feat_guest.feat = data;
1605 proc->cpuid = kvm->arch.model.cpuid;
1606 proc->ibc = kvm->arch.model.ibc;
1607 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1610 kvm->arch.model.ibc,
1611 kvm->arch.model.cpuid);
1613 kvm->arch.model.fac_list[0],
1614 kvm->arch.model.fac_list[1],
1615 kvm->arch.model.fac_list[2]);
1635 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1640 kvm->arch.model.ibc,
1641 kvm->arch.model.cpuid);
1662 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1690 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1695 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1696 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1697 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1698 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1700 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1701 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1703 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1704 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1706 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1707 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1709 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1710 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1712 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1713 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1715 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1716 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1718 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1719 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1721 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1722 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1724 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1725 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1727 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1728 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1730 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1731 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1733 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1736 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1739 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1742 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1743 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1744 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1745 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1747 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1748 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1749 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1750 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1826 unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1899 read_lock(&kvm->arch.sca_lock);
1900 sca = kvm->arch.sca;
1906 read_unlock(&kvm->arch.sca_lock);
1927 read_lock(&kvm->arch.sca_lock);
1928 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1929 read_unlock(&kvm->arch.sca_lock);
2274 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2315 if (!kvm->arch.use_cmma)
2322 if (!peek && !kvm->arch.migration_mode)
2331 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2349 if (kvm->arch.migration_mode)
2350 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2375 if (!kvm->arch.use_cmma)
2557 if (kvm->arch.pv.dumping)
2571 kvm->arch.pv.dumping = true;
2579 if (!kvm->arch.pv.dumping)
2592 if (!kvm->arch.pv.dumping)
2650 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2669 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2694 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
3005 if (kvm->arch.use_irqchip) {
3157 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
3160 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3167 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3169 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3190 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3194 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3241 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3242 sizeof(kvm->arch.crypto.crycb->apcb0));
3243 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3244 sizeof(kvm->arch.crypto.crycb->apcb1));
3264 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3266 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3272 kvm->arch.crypto.aes_kw = 1;
3273 kvm->arch.crypto.dea_kw = 1;
3274 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3275 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3276 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3277 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3282 if (kvm->arch.use_esca)
3283 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3285 free_page((unsigned long)(kvm->arch.sca));
3286 kvm->arch.sca = NULL;
3323 rwlock_init(&kvm->arch.sca_lock);
3325 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3326 if (!kvm->arch.sca)
3332 kvm->arch.sca = (struct bsca_block *)
3333 ((char *) kvm->arch.sca + sca_offset);
3338 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3339 if (!kvm->arch.dbf)
3343 kvm->arch.sie_page2 =
3345 if (!kvm->arch.sie_page2)
3348 kvm->arch.sie_page2->kvm = kvm;
3349 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3352 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3355 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3358 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3361 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3362 set_kvm_facility(kvm->arch.model.fac_list, 138);
3364 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3365 set_kvm_facility(kvm->arch.model.fac_list, 74);
3367 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3368 set_kvm_facility(kvm->arch.model.fac_list, 147);
3372 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3374 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3375 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3377 kvm->arch.model.uv_feat_guest.feat = 0;
3388 mutex_init(&kvm->arch.float_int.ais_lock);
3389 spin_lock_init(&kvm->arch.float_int.lock);
3391 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3392 init_waitqueue_head(&kvm->arch.ipte_wq);
3393 mutex_init(&kvm->arch.ipte_mutex);
3395 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3399 kvm->arch.gmap = NULL;
3400 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3403 kvm->arch.mem_limit = TASK_SIZE_MAX;
3405 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3407 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3408 if (!kvm->arch.gmap)
3410 kvm->arch.gmap->private = kvm;
3411 kvm->arch.gmap->pfault_enabled = 0;
3414 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3415 kvm->arch.use_skf = sclp.has_skey;
3416 spin_lock_init(&kvm->arch.start_stop_lock);
3420 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3421 kvm->arch.pv.set_aside = NULL;
3426 free_page((unsigned long)kvm->arch.sie_page2);
3427 debug_unregister(kvm->arch.dbf);
3446 gmap_remove(vcpu->arch.gmap);
3448 if (vcpu->kvm->arch.use_cmma)
3453 free_page((unsigned long)(vcpu->arch.sie_block));
3475 if (kvm->arch.pv.mmu_notifier.ops)
3476 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3478 debug_unregister(kvm->arch.dbf);
3479 free_page((unsigned long)kvm->arch.sie_page2);
3481 gmap_remove(kvm->arch.gmap);
3491 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3492 if (!vcpu->arch.gmap)
3494 vcpu->arch.gmap->private = vcpu->kvm;
3503 read_lock(&vcpu->kvm->arch.sca_lock);
3504 if (vcpu->kvm->arch.use_esca) {
3505 struct esca_block *sca = vcpu->kvm->arch.sca;
3510 struct bsca_block *sca = vcpu->kvm->arch.sca;
3515 read_unlock(&vcpu->kvm->arch.sca_lock);
3521 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3524 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3525 vcpu->arch.sie_block->scaol = sca_phys;
3528 read_lock(&vcpu->kvm->arch.sca_lock);
3529 if (vcpu->kvm->arch.use_esca) {
3530 struct esca_block *sca = vcpu->kvm->arch.sca;
3533 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3534 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3535 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3536 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3539 struct bsca_block *sca = vcpu->kvm->arch.sca;
3542 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3543 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3544 vcpu->arch.sie_block->scaol = sca_phys;
3547 read_unlock(&vcpu->kvm->arch.sca_lock);
3570 struct bsca_block *old_sca = kvm->arch.sca;
3577 if (kvm->arch.use_esca)
3589 write_lock(&kvm->arch.sca_lock);
3594 vcpu->arch.sie_block->scaoh = scaoh;
3595 vcpu->arch.sie_block->scaol = scaol;
3596 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3598 kvm->arch.sca = new_sca;
3599 kvm->arch.use_esca = 1;
3601 write_unlock(&kvm->arch.sca_lock);
3607 old_sca, kvm->arch.sca);
3625 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3633 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3634 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3635 vcpu->arch.cputm_start = get_tod_clock_fast();
3636 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3642 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3643 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3644 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3645 vcpu->arch.cputm_start = 0;
3646 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3652 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3653 vcpu->arch.cputm_enabled = true;
3660 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3662 vcpu->arch.cputm_enabled = false;
3683 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3684 if (vcpu->arch.cputm_enabled)
3685 vcpu->arch.cputm_start = get_tod_clock_fast();
3686 vcpu->arch.sie_block->cputm = cputm;
3687 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3697 if (unlikely(!vcpu->arch.cputm_enabled))
3698 return vcpu->arch.sie_block->cputm;
3702 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3708 value = vcpu->arch.sie_block->cputm;
3710 if (likely(vcpu->arch.cputm_start))
3711 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3712 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3720 gmap_enable(vcpu->arch.enabled_gmap);
3722 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3730 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3733 vcpu->arch.enabled_gmap = gmap_get_enabled();
3734 gmap_disable(vcpu->arch.enabled_gmap);
3742 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3743 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3747 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3750 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3751 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3753 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
3758 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3781 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3784 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3785 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3786 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3787 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
3789 if (vcpu->kvm->arch.crypto.apie)
3790 vcpu->arch.sie_block->eca |= ECA_APIE;
3793 if (vcpu->kvm->arch.crypto.aes_kw) {
3794 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3797 vcpu->arch.sie_block->ecd |= ECD_ECC;
3800 if (vcpu->kvm->arch.crypto.dea_kw)
3801 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3806 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3807 vcpu->arch.sie_block->cbrlo = 0;
3817 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3823 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3825 vcpu->arch.sie_block->ibc = model->ibc;
3827 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3835 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3848 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3850 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3852 vcpu->arch.sie_block->ecb |= ECB_PTF;
3854 vcpu->arch.sie_block->ecb |= ECB_TE;
3856 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3858 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3859 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3861 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3862 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3864 vcpu->arch.sie_block->eca |= ECA_CEI;
3866 vcpu->arch.sie_block->eca |= ECA_IB;
3868 vcpu->arch.sie_block->eca |= ECA_SII;
3870 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3872 vcpu->arch.sie_block->eca |= ECA_VX;
3873 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3876 vcpu->arch.sie_block->ecd |= ECD_MEF;
3878 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3879 if (vcpu->arch.sie_block->gd) {
3880 vcpu->arch.sie_block->eca |= ECA_AIV;
3882 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3884 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3885 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3890 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3892 if (vcpu->kvm->arch.use_cmma) {
3897 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3898 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3900 vcpu->arch.sie_block->hpid = HPID_KVM;
3934 vcpu->arch.sie_block = &sie_page->sie_block;
3935 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3938 vcpu->arch.sie_block->mso = 0;
3939 vcpu->arch.sie_block->msl = sclp.hamax;
3941 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3942 spin_lock_init(&vcpu->arch.local_int.lock);
3943 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3944 seqcount_init(&vcpu->arch.cputm_seqcount);
3946 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3979 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3980 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3991 gmap_remove(vcpu->arch.gmap);
3993 free_page((unsigned long)(vcpu->arch.sie_block));
3999 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4005 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4010 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4016 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4021 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4027 return atomic_read(&vcpu->arch.sie_block->prog20) &
4033 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4044 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4104 r = put_user(vcpu->arch.sie_block->todpr,
4108 r = put_user(vcpu->arch.sie_block->epoch,
4116 r = put_user(vcpu->arch.sie_block->ckc,
4120 r = put_user(vcpu->arch.pfault_token,
4124 r = put_user(vcpu->arch.pfault_compare,
4128 r = put_user(vcpu->arch.pfault_select,
4132 r = put_user(vcpu->arch.sie_block->pp,
4136 r = put_user(vcpu->arch.sie_block->gbea,
4154 r = get_user(vcpu->arch.sie_block->todpr,
4158 r = get_user(vcpu->arch.sie_block->epoch,
4167 r = get_user(vcpu->arch.sie_block->ckc,
4171 r = get_user(vcpu->arch.pfault_token,
4173 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4177 r = get_user(vcpu->arch.pfault_compare,
4181 r = get_user(vcpu->arch.pfault_select,
4185 r = get_user(vcpu->arch.sie_block->pp,
4189 r = get_user(vcpu->arch.sie_block->gbea,
4201 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4202 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4220 vcpu->arch.sie_block->gpsw.mask = 0;
4221 vcpu->arch.sie_block->gpsw.addr = 0;
4224 vcpu->arch.sie_block->ckc = 0;
4225 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4226 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4227 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4248 vcpu->arch.sie_block->gbea = 1;
4249 vcpu->arch.sie_block->pp = 0;
4250 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4251 vcpu->arch.sie_block->todpr = 0;
4293 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4305 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4396 vcpu->arch.guestdbg.last_bp = 0;
4479 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4490 vcpu->arch.sie_block->ihcpu = 0xffff;
4511 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4521 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4530 if ((vcpu->kvm->arch.use_cmma) &&
4532 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4552 kvm->arch.epoch = gtod->tod - clk.tod;
4553 kvm->arch.epdx = 0;
4555 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4556 if (kvm->arch.epoch > gtod->tod)
4557 kvm->arch.epdx -= 1;
4562 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4563 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4591 return gmap_fault(vcpu->arch.gmap, gpa,
4615 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4616 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4624 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4625 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4646 struct kvm_arch_async_pf arch;
4648 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4650 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4651 vcpu->arch.pfault_compare)
4657 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4659 if (!vcpu->arch.gmap->pfault_enabled)
4664 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4667 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
4681 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4682 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4702 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4704 vcpu->arch.sie_block->icptcode = 0;
4705 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4731 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4740 pgm_info = vcpu->arch.pgm;
4754 vcpu->arch.sie_block->icptcode);
4755 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4760 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4761 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4765 sie_page = container_of(vcpu->arch.sie_block,
4772 if (vcpu->arch.sie_block->icptcode > 0) {
4778 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4779 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4780 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4806 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4835 exit_reason = sie64a(vcpu->arch.sie_block,
4847 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4848 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4849 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4873 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4874 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4876 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4877 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4878 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4881 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4882 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4883 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4884 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4888 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4889 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4890 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4899 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4901 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
4910 !vcpu->arch.gs_enabled) {
4912 vcpu->arch.sie_block->ecb |= ECB_GS;
4913 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4914 vcpu->arch.gs_enabled = 1;
4918 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4919 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4925 vcpu->arch.host_gscb = current->thread.gs_cb;
4926 save_gs_cb(vcpu->arch.host_gscb);
4928 if (vcpu->arch.gs_enabled) {
4945 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4951 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4953 save_access_regs(vcpu->arch.host_acrs);
4957 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4958 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4981 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4982 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4993 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4994 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4995 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
4996 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
4997 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
5001 if (vcpu->arch.gs_enabled)
5003 current->thread.gs_cb = vcpu->arch.host_gscb;
5004 restore_gs_cb(vcpu->arch.host_gscb);
5005 if (!vcpu->arch.host_gscb)
5007 vcpu->arch.host_gscb = NULL;
5017 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5018 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5020 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5022 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5023 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5024 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5025 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5027 restore_access_regs(vcpu->arch.host_acrs);
5032 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
5033 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
5049 if (vcpu->kvm->arch.pv.dumping)
5152 &vcpu->arch.sie_block->gpsw, 16);
5158 &vcpu->arch.sie_block->todpr, 4);
5162 clkcomp = vcpu->arch.sie_block->ckc >> 8;
5168 &vcpu->arch.sie_block->gcr, 128);
5219 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5226 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5255 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5261 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5275 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5282 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5315 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5329 if (!vcpu->kvm->arch.css_support) {
5330 vcpu->kvm->arch.css_support = 1;
5354 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5359 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5424 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5509 if (!vcpu->kvm->arch.pv.dumping)
5629 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5646 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5652 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5747 vmf->page = virt_to_page(vcpu->arch.sie_block);
5787 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5791 if (!kvm->arch.migration_mode)
5819 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5823 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5829 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,