Lines Matching defs:kvm
8 #include <linux/kvm.h>
19 #include "kvm-s390.h"
21 bool kvm_s390_pv_is_protected(struct kvm *kvm)
23 lockdep_assert_held(&kvm->lock);
24 return !!kvm_s390_pv_get_handle(kvm);
58 static void kvm_s390_clear_pv_state(struct kvm *kvm)
60 kvm->arch.pv.handle = 0;
61 kvm->arch.pv.guest_len = 0;
62 kvm->arch.pv.stor_base = 0;
63 kvm->arch.pv.stor_var = NULL;
75 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
118 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
135 KVM_UV_EVENT(vcpu->kvm, 3,
150 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
157 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
159 vfree(kvm->arch.pv.stor_var);
160 free_pages(kvm->arch.pv.stor_base,
162 kvm_s390_clear_pv_state(kvm);
165 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
171 kvm->arch.pv.stor_var = NULL;
172 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
173 if (!kvm->arch.pv.stor_base)
182 mutex_lock(&kvm->slots_lock);
183 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
184 mutex_unlock(&kvm->slots_lock);
186 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
191 kvm->arch.pv.stor_var = vzalloc(vlen);
192 if (!kvm->arch.pv.stor_var)
197 kvm_s390_pv_dealloc_vm(kvm);
203 * @kvm: the KVM that was associated with this leftover protected VM
209 * On success, kvm->mm->context.protected_count will be decremented atomically
214 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
224 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
238 atomic_dec(&kvm->mm->context.protected_count);
244 * @kvm: the VM whose memory is to be cleared.
249 static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
256 srcu_idx = srcu_read_lock(&kvm->srcu);
259 slot = gfn_to_memslot(kvm, 0);
263 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
268 srcu_read_unlock(&kvm->srcu, srcu_idx);
271 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
276 .handle = kvm_s390_pv_get_handle(kvm),
285 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
286 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
290 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
293 kvm_s390_pv_dealloc_vm(kvm);
304 * @kvm: the VM
315 * Context: kvm->lock needs to be held
320 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
325 lockdep_assert_held(&kvm->lock);
330 if (kvm->arch.pv.set_aside)
334 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
342 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
344 priv->stor_var = kvm->arch.pv.stor_var;
345 priv->stor_base = kvm->arch.pv.stor_base;
346 priv->handle = kvm_s390_pv_get_handle(kvm);
347 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
348 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
349 if (s390_replace_asce(kvm->arch.gmap))
358 kvm_s390_destroy_lower_2g(kvm);
359 kvm_s390_clear_pv_state(kvm);
360 kvm->arch.pv.set_aside = priv;
369 * @kvm: the KVM whose protected VM needs to be deinitialized
382 * Context: kvm->lock needs to be held
386 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
390 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
392 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
394 atomic_dec(&kvm->mm->context.protected_count);
395 kvm_s390_pv_dealloc_vm(kvm);
398 s390_replace_asce(kvm->arch.gmap);
400 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
409 * @kvm: the KVM to be cleaned up
417 * Context: kvm->lock needs to be held unless being called from
422 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
433 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
438 if (kvm_s390_pv_get_handle(kvm)) {
439 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
444 if (kvm->arch.pv.set_aside) {
445 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
446 kvm->arch.pv.set_aside = NULL;
450 while (!list_empty(&kvm->arch.pv.need_cleanup)) {
451 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
453 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
474 if (need_zap && mmget_not_zero(kvm->mm)) {
475 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
476 mmput(kvm->mm);
480 atomic_dec(&kvm->mm->context.protected_count);
486 * @kvm: the VM previously associated with the protected VM
494 * Context: kvm->lock must not be held.
499 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
504 lockdep_assert_not_held(&kvm->lock);
505 mutex_lock(&kvm->lock);
506 p = kvm->arch.pv.set_aside;
507 kvm->arch.pv.set_aside = NULL;
508 mutex_unlock(&kvm->lock);
513 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
515 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
525 mutex_lock(&kvm->lock);
526 list_add(&p->list, &kvm->arch.pv.need_cleanup);
527 mutex_unlock(&kvm->lock);
538 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
545 * When the struct kvm gets deinitialized, this notifier is also
547 * struct kvm is still valid.
549 r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
550 if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
551 kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
558 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
567 ret = kvm_s390_pv_alloc_vm(kvm);
573 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
574 uvcb.guest_asce = kvm->arch.gmap->asce;
575 uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
577 virt_to_phys((void *)kvm->arch.pv.stor_base);
578 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
579 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
580 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
585 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x",
589 kvm->arch.pv.handle = uvcb.guest_handle;
591 atomic_inc(&kvm->mm->context.protected_count);
594 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
596 atomic_dec(&kvm->mm->context.protected_count);
597 kvm_s390_pv_dealloc_vm(kvm);
601 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
602 /* Add the notifier only once. No races because we hold kvm->lock */
603 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
604 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
605 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
610 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
618 .guest_handle = kvm_s390_pv_get_handle(kvm),
624 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
629 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
635 .guest_handle = kvm_s390_pv_get_handle(kvm),
640 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
646 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
651 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
660 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
664 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
677 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
692 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
721 * @kvm: pointer to the guest's KVM struct
736 * Context: kvm->lock needs to be held
746 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
752 .config_handle = kvm->arch.pv.handle,
825 KVM_UV_EVENT(kvm, 3,
838 * @kvm: pointer to the guest's KVM struct
846 * Context: kvm->lock needs to be held
854 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
860 .config_handle = kvm_s390_pv_get_handle(kvm),
874 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
883 kvm->arch.pv.dumping = false;
884 kvm_s390_vcpu_unblock_all(kvm);