Lines Matching refs:pv

60 	kvm->arch.pv.handle = 0;
61 kvm->arch.pv.guest_len = 0;
62 kvm->arch.pv.stor_base = 0;
63 kvm->arch.pv.stor_var = NULL;
81 free_pages(vcpu->arch.pv.stor_base,
87 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
112 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
114 if (!vcpu->arch.pv.stor_base)
121 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
126 free_pages(vcpu->arch.pv.stor_base,
148 vcpu->arch.pv.handle = uvcb.cpu_handle;
159 vfree(kvm->arch.pv.stor_var);
160 free_pages(kvm->arch.pv.stor_base,
171 kvm->arch.pv.stor_var = NULL;
172 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
173 if (!kvm->arch.pv.stor_base)
186 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
191 kvm->arch.pv.stor_var = vzalloc(vlen);
192 if (!kvm->arch.pv.stor_var)
330 if (kvm->arch.pv.set_aside)
344 priv->stor_var = kvm->arch.pv.stor_var;
345 priv->stor_base = kvm->arch.pv.stor_base;
360 kvm->arch.pv.set_aside = priv;
444 if (kvm->arch.pv.set_aside) {
445 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
446 kvm->arch.pv.set_aside = NULL;
450 while (!list_empty(&kvm->arch.pv.need_cleanup)) {
451 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
506 p = kvm->arch.pv.set_aside;
507 kvm->arch.pv.set_aside = NULL;
526 list_add(&p->list, &kvm->arch.pv.need_cleanup);
538 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
573 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
577 virt_to_phys((void *)kvm->arch.pv.stor_base);
578 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
589 kvm->arch.pv.handle = uvcb.guest_handle;
603 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
604 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
605 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
704 .cpu_handle = vcpu->arch.pv.handle,
752 .config_handle = kvm->arch.pv.handle,
883 kvm->arch.pv.dumping = false;