Lines Matching defs:kvm

3  * kvm eventfd support - use eventfd objects to signal various KVM events
13 #include <linux/kvm.h>
27 #include <trace/events/kvm.h>
29 #include <kvm/iodev.h>
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
46 struct kvm *kvm = irqfd->kvm;
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
63 srcu_read_lock_held(&resampler->kvm->irq_srcu))
76 struct kvm *kvm;
81 kvm = resampler->kvm;
83 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
86 idx = srcu_read_lock(&kvm->irq_srcu);
88 srcu_read_unlock(&kvm->irq_srcu, idx);
95 struct kvm *kvm = resampler->kvm;
97 mutex_lock(&kvm->irqfds.resampler_lock);
100 synchronize_srcu(&kvm->irq_srcu);
104 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 * synchronize_srcu(&kvm->irq_srcu) already called
109 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
114 mutex_unlock(&kvm->irqfds.resampler_lock);
125 struct kvm *kvm = irqfd->kvm;
129 synchronize_srcu(&kvm->irq_srcu);
159 /* assumes kvm->irqfds.lock is held */
169 * assumes kvm->irqfds.lock is held
183 struct kvm *kvm, int irq_source_id,
200 struct kvm *kvm = irqfd->kvm;
209 idx = srcu_read_lock(&kvm->irq_srcu);
215 if (kvm_arch_set_irq_inatomic(&irq, kvm,
219 srcu_read_unlock(&kvm->irq_srcu, idx);
227 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
241 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
257 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
263 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
288 struct kvm *kvm, unsigned int host_irq,
303 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
312 if (!kvm_arch_intc_initialized(kvm))
315 if (!kvm_arch_irqfd_allowed(kvm, args))
322 irqfd->kvm = kvm;
327 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
355 mutex_lock(&kvm->irqfds.resampler_lock);
358 &kvm->irqfds.resampler_list, link) {
370 mutex_unlock(&kvm->irqfds.resampler_lock);
374 resampler->kvm = kvm;
380 list_add_rcu(&resampler->link, &kvm->irqfds.resampler_list);
381 kvm_register_irq_ack_notifier(kvm,
387 synchronize_srcu(&kvm->irq_srcu);
389 mutex_unlock(&kvm->irqfds.resampler_lock);
399 spin_lock_irq(&kvm->irqfds.lock);
402 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
407 spin_unlock_irq(&kvm->irqfds.lock);
411 idx = srcu_read_lock(&kvm->irq_srcu);
412 irqfd_update(kvm, irqfd);
414 list_add_tail(&irqfd->list, &kvm->irqfds.items);
416 spin_unlock_irq(&kvm->irqfds.lock);
441 srcu_read_unlock(&kvm->irq_srcu, idx);
467 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
472 idx = srcu_read_lock(&kvm->irq_srcu);
473 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
475 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
476 link, srcu_read_lock_held(&kvm->irq_srcu))
478 srcu_read_unlock(&kvm->irq_srcu, idx);
482 srcu_read_unlock(&kvm->irq_srcu, idx);
488 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
492 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
493 link, srcu_read_lock_held(&kvm->irq_srcu))
498 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
504 idx = srcu_read_lock(&kvm->irq_srcu);
505 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
507 kvm_notify_acked_gsi(kvm, gsi);
508 srcu_read_unlock(&kvm->irq_srcu, idx);
511 void kvm_register_irq_ack_notifier(struct kvm *kvm,
514 mutex_lock(&kvm->irq_lock);
515 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
516 mutex_unlock(&kvm->irq_lock);
517 kvm_arch_post_irq_ack_notifier_list_update(kvm);
520 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
523 mutex_lock(&kvm->irq_lock);
525 mutex_unlock(&kvm->irq_lock);
526 synchronize_srcu(&kvm->irq_srcu);
527 kvm_arch_post_irq_ack_notifier_list_update(kvm);
532 kvm_eventfd_init(struct kvm *kvm)
535 spin_lock_init(&kvm->irqfds.lock);
536 INIT_LIST_HEAD(&kvm->irqfds.items);
537 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
538 mutex_init(&kvm->irqfds.resampler_lock);
540 INIT_LIST_HEAD(&kvm->ioeventfds);
548 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
557 spin_lock_irq(&kvm->irqfds.lock);
559 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
574 spin_unlock_irq(&kvm->irqfds.lock);
588 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
594 return kvm_irqfd_deassign(kvm, args);
596 return kvm_irqfd_assign(kvm, args);
600 * This function is called as the kvm VM fd is being released. Shutdown all
604 kvm_irqfd_release(struct kvm *kvm)
608 spin_lock_irq(&kvm->irqfds.lock);
610 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
613 spin_unlock_irq(&kvm->irqfds.lock);
617 * since we do not take a kvm* reference.
625 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
627 void kvm_irq_routing_update(struct kvm *kvm)
631 spin_lock_irq(&kvm->irqfds.lock);
633 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
639 irqfd_update(kvm, irqfd);
645 irqfd->kvm, irqfd->producer->irq,
652 spin_unlock_irq(&kvm->irqfds.lock);
655 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
662 idx = srcu_read_lock(&kvm->irq_srcu);
663 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
666 &kvm->irqfds.resampler_list, link,
667 srcu_read_lock_held(&kvm->irq_srcu)) {
670 srcu_read_unlock(&kvm->irq_srcu, idx);
675 srcu_read_unlock(&kvm->irq_srcu, idx);
687 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
810 /* assumes kvm->slots_lock held */
812 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
816 list_for_each_entry(_p, &kvm->ioeventfds, list)
837 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
868 mutex_lock(&kvm->slots_lock);
871 if (ioeventfd_check_collision(kvm, p)) {
878 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
883 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
884 list_add_tail(&p->list, &kvm->ioeventfds);
886 mutex_unlock(&kvm->slots_lock);
891 mutex_unlock(&kvm->slots_lock);
901 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
916 mutex_lock(&kvm->slots_lock);
918 list_for_each_entry(p, &kvm->ioeventfds, list) {
929 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
930 bus = kvm_get_bus(kvm, bus_idx);
937 mutex_unlock(&kvm->slots_lock);
944 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
947 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
950 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
956 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
986 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
994 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
1002 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
1008 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1011 return kvm_deassign_ioeventfd(kvm, args);
1013 return kvm_assign_ioeventfd(kvm, args);