Lines Matching refs:arch

60 	kvm->arch.pv.handle = 0;
61 kvm->arch.pv.guest_len = 0;
62 kvm->arch.pv.stor_base = 0;
63 kvm->arch.pv.stor_var = NULL;
81 free_pages(vcpu->arch.pv.stor_base,
84 free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
85 vcpu->arch.sie_block->pv_handle_cpu = 0;
86 vcpu->arch.sie_block->pv_handle_config = 0;
87 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
88 vcpu->arch.sie_block->sdf = 0;
94 vcpu->arch.sie_block->gbea = 1;
112 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
114 if (!vcpu->arch.pv.stor_base)
119 uvcb.num = vcpu->arch.sie_block->icpua;
120 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
121 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
126 free_pages(vcpu->arch.pv.stor_base,
130 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
148 vcpu->arch.pv.handle = uvcb.cpu_handle;
149 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
150 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
151 vcpu->arch.sie_block->sdf = 2;
159 vfree(kvm->arch.pv.stor_var);
160 free_pages(kvm->arch.pv.stor_base,
171 kvm->arch.pv.stor_var = NULL;
172 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
173 if (!kvm->arch.pv.stor_base)
186 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
191 kvm->arch.pv.stor_var = vzalloc(vlen);
192 if (!kvm->arch.pv.stor_var)
285 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
330 if (kvm->arch.pv.set_aside)
334 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
344 priv->stor_var = kvm->arch.pv.stor_var;
345 priv->stor_base = kvm->arch.pv.stor_base;
347 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
348 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
349 if (s390_replace_asce(kvm->arch.gmap))
360 kvm->arch.pv.set_aside = priv;
392 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
398 s390_replace_asce(kvm->arch.gmap);
444 if (kvm->arch.pv.set_aside) {
445 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
446 kvm->arch.pv.set_aside = NULL;
450 while (!list_empty(&kvm->arch.pv.need_cleanup)) {
451 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
506 p = kvm->arch.pv.set_aside;
507 kvm->arch.pv.set_aside = NULL;
526 list_add(&p->list, &kvm->arch.pv.need_cleanup);
538 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
573 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
574 uvcb.guest_asce = kvm->arch.gmap->asce;
575 uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
577 virt_to_phys((void *)kvm->arch.pv.stor_base);
578 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
579 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
580 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
589 kvm->arch.pv.handle = uvcb.guest_handle;
601 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
603 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
604 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
605 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
640 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
704 .cpu_handle = vcpu->arch.pv.handle,
752 .config_handle = kvm->arch.pv.handle,
883 kvm->arch.pv.dumping = false;