Lines Matching defs:kvm
108 static int sev_get_asid(struct kvm *kvm)
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
150 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
174 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
176 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
179 if (kvm->created_vcpus)
205 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
208 int asid = sev_get_asid(kvm);
239 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
241 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
246 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
248 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
255 if (!sev_guest(kvm))
298 ret = sev_bind_asid(kvm, start->handle, error);
307 sev_unbind_asid(kvm, start->handle);
324 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
328 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
336 lockdep_assert_held(&kvm->lock);
387 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
390 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
434 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
437 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
443 if (!sev_guest(kvm))
458 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
487 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
502 sev_unpin_memory(kvm, inpages, npages);
508 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
511 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
518 if (!sev_guest(kvm))
550 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
577 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
579 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
583 if (!sev_guest(kvm))
591 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
597 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
599 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
604 if (!sev_guest(kvm))
612 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
627 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
631 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
644 ret = sev_issue_cmd(kvm,
651 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
664 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
667 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
686 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
704 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
744 ret = __sev_dbg_decrypt(kvm, dst_paddr,
771 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
781 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
791 if (!sev_guest(kvm))
811 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
815 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
817 sev_unpin_memory(kvm, src_p, n);
838 ret = __sev_dbg_decrypt_user(kvm,
844 ret = __sev_dbg_encrypt_user(kvm,
851 sev_unpin_memory(kvm, src_p, n);
852 sev_unpin_memory(kvm, dst_p, n);
865 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
867 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
875 if (!sev_guest(kvm))
881 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
927 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
941 sev_unpin_memory(kvm, pages, n);
945 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
959 mutex_lock(&kvm->lock);
963 r = sev_guest_init(kvm, &sev_cmd);
966 r = sev_launch_start(kvm, &sev_cmd);
969 r = sev_launch_update_data(kvm, &sev_cmd);
972 r = sev_launch_measure(kvm, &sev_cmd);
975 r = sev_launch_finish(kvm, &sev_cmd);
978 r = sev_guest_status(kvm, &sev_cmd);
981 r = sev_dbg_crypt(kvm, &sev_cmd, true);
984 r = sev_dbg_crypt(kvm, &sev_cmd, false);
987 r = sev_launch_secret(kvm, &sev_cmd);
998 mutex_unlock(&kvm->lock);
1002 int svm_register_enc_region(struct kvm *kvm,
1005 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1009 if (!sev_guest(kvm))
1019 mutex_lock(&kvm->lock);
1020 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1023 mutex_unlock(&kvm->lock);
1031 mutex_unlock(&kvm->lock);
1049 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1051 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1064 static void __unregister_enc_region_locked(struct kvm *kvm,
1067 sev_unpin_memory(kvm, region->pages, region->npages);
1072 int svm_unregister_enc_region(struct kvm *kvm,
1078 mutex_lock(&kvm->lock);
1080 if (!sev_guest(kvm)) {
1085 region = find_enc_region(kvm, range);
1098 __unregister_enc_region_locked(kvm, region);
1100 mutex_unlock(&kvm->lock);
1104 mutex_unlock(&kvm->lock);
1108 void sev_vm_destroy(struct kvm *kvm)
1110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1114 if (!sev_guest(kvm))
1117 mutex_lock(&kvm->lock);
1132 __unregister_enc_region_locked(kvm,
1138 mutex_unlock(&kvm->lock);
1140 sev_unbind_asid(kvm, sev->handle);
1180 void sev_guest_memory_reclaimed(struct kvm *kvm)
1182 if (!sev_guest(kvm))
1191 int asid = sev_get_asid(svm->vcpu.kvm);