Lines Matching defs:kvm

19 #include <trace/events/kvm.h>
31 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
32 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
37 static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
39 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
46 int idx = srcu_read_lock(&kvm->srcu);
62 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
83 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
109 kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
112 srcu_read_unlock(&kvm->srcu, idx);
126 kvm_xen_set_evtchn(&e, vcpu->kvm);
248 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
312 if (v->kvm->arch.xen.runstate_update_flag)
376 if (v->kvm->arch.xen.runstate_update_flag) {
438 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT);
440 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT);
446 u64 now = get_kvmclock_ns(v->kvm);
487 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL));
522 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) {
552 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
604 int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
614 mutex_lock(&kvm->arch.xen.xen_lock);
615 kvm->arch.xen.long_mode = !!data->u.long_mode;
616 mutex_unlock(&kvm->arch.xen.xen_lock);
622 mutex_lock(&kvm->arch.xen.xen_lock);
623 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
624 mutex_unlock(&kvm->arch.xen.xen_lock);
631 mutex_lock(&kvm->arch.xen.xen_lock);
632 kvm->arch.xen.upcall_vector = data->u.vector;
633 mutex_unlock(&kvm->arch.xen.xen_lock);
639 r = kvm_xen_setattr_evtchn(kvm, data);
643 mutex_lock(&kvm->arch.xen.xen_lock);
644 kvm->arch.xen.xen_version = data->u.xen_version;
645 mutex_unlock(&kvm->arch.xen.xen_lock);
654 mutex_lock(&kvm->arch.xen.xen_lock);
655 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
656 mutex_unlock(&kvm->arch.xen.xen_lock);
667 int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
671 mutex_lock(&kvm->arch.xen.xen_lock);
675 data->u.long_mode = kvm->arch.xen.long_mode;
680 if (kvm->arch.xen.shinfo_cache.active)
681 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
688 data->u.vector = kvm->arch.xen.upcall_vector;
693 data->u.xen_version = kvm->arch.xen.xen_version;
702 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag;
710 mutex_unlock(&kvm->arch.xen.xen_lock);
718 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
719 idx = srcu_read_lock(&vcpu->kvm->srcu);
776 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode)
834 if (get_kvmclock_ns(vcpu->kvm) <
875 if (get_kvmclock_ns(vcpu->kvm) <
927 get_kvmclock_ns(vcpu->kvm));
945 srcu_read_unlock(&vcpu->kvm->srcu, idx);
946 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
954 mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
1037 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
1043 struct kvm *kvm = vcpu->kvm;
1049 vcpu->kvm->arch.xen.long_mode = lm;
1057 if (kvm_xen_hypercall_enabled(kvm)) {
1088 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64
1089 : kvm->arch.xen_hvm_config.blob_addr_32;
1090 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1091 : kvm->arch.xen_hvm_config.blob_size_32;
1112 int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
1130 mutex_lock(&kvm->arch.xen.xen_lock);
1132 if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
1134 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr)
1137 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
1139 mutex_unlock(&kvm->arch.xen.xen_lock);
1159 static inline int max_evtchn_port(struct kvm *kvm)
1161 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
1170 struct kvm *kvm = vcpu->kvm;
1171 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1177 idx = srcu_read_lock(&kvm->srcu);
1183 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1200 srcu_read_unlock(&kvm->srcu, idx);
1214 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
1267 if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
1278 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1299 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask);
1377 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm);
1407 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm);
1480 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) {
1481 r = vcpu->kvm->arch.xen.xen_version;
1536 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) {
1552 int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1554 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1564 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1566 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id);
1575 if (xe->port >= max_evtchn_port(kvm))
1580 idx = srcu_read_lock(&kvm->srcu);
1586 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1627 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
1651 srcu_read_unlock(&kvm->srcu, idx);
1661 static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
1666 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1670 if (current->mm != kvm->mm) {
1678 kthread_use_mm(kvm->mm);
1682 mutex_lock(&kvm->arch.xen.xen_lock);
1699 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
1702 rc = kvm_xen_set_evtchn_fast(xe, kvm);
1706 idx = srcu_read_lock(&kvm->srcu);
1708 srcu_read_unlock(&kvm->srcu, idx);
1711 mutex_unlock(&kvm->arch.xen.xen_lock);
1714 kthread_unuse_mm(kvm->mm);
1720 static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1726 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm);
1733 int kvm_xen_setup_evtchn(struct kvm *kvm,
1740 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
1755 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu);
1772 int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
1777 if (!uxe->port || uxe->port >= max_evtchn_port(kvm))
1789 ret = kvm_xen_set_evtchn(&e, kvm);
1819 static int kvm_xen_eventfd_update(struct kvm *kvm,
1827 mutex_lock(&kvm->arch.xen.xen_lock);
1828 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
1858 mutex_unlock(&kvm->arch.xen.xen_lock);
1866 static int kvm_xen_eventfd_assign(struct kvm *kvm,
1887 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
1921 mutex_lock(&kvm->arch.xen.xen_lock);
1922 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
1924 mutex_unlock(&kvm->arch.xen.xen_lock);
1938 static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
1942 mutex_lock(&kvm->arch.xen.xen_lock);
1943 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
1944 mutex_unlock(&kvm->arch.xen.xen_lock);
1949 synchronize_srcu(&kvm->srcu);
1956 static int kvm_xen_eventfd_reset(struct kvm *kvm)
1962 mutex_lock(&kvm->arch.xen.xen_lock);
1969 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
1974 mutex_unlock(&kvm->arch.xen.xen_lock);
1979 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
1981 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
1983 mutex_unlock(&kvm->arch.xen.xen_lock);
1985 synchronize_srcu(&kvm->srcu);
1998 static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
2003 return kvm_xen_eventfd_reset(kvm);
2005 if (!port || port >= max_evtchn_port(kvm))
2009 return kvm_xen_eventfd_deassign(kvm, port);
2011 return kvm_xen_eventfd_update(kvm, data);
2015 return kvm_xen_eventfd_assign(kvm, data);
2032 * evtchnfd is protected by kvm->srcu; the idr lookup instead
2036 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
2042 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm);
2060 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
2062 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
2064 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
2066 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
2106 void kvm_xen_init_vm(struct kvm *kvm)
2108 mutex_init(&kvm->arch.xen.xen_lock);
2109 idr_init(&kvm->arch.xen.evtchn_ports);
2110 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
2113 void kvm_xen_destroy_vm(struct kvm *kvm)
2118 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);
2120 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
2125 idr_destroy(&kvm->arch.xen.evtchn_ports);
2127 if (kvm->arch.xen_hvm_config.msr)