Lines Matching defs:kvm
3 * kvm eventfd support - use eventfd objects to signal various KVM events
13 #include <linux/kvm.h>
27 #include <trace/events/kvm.h>
29 #include <kvm/iodev.h>
36 kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
46 struct kvm *kvm = irqfd->kvm;
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
54 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
67 struct kvm *kvm;
73 kvm = resampler->kvm;
75 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
78 idx = srcu_read_lock(&kvm->irq_srcu);
83 srcu_read_unlock(&kvm->irq_srcu, idx);
90 struct kvm *kvm = resampler->kvm;
92 mutex_lock(&kvm->irqfds.resampler_lock);
95 synchronize_srcu(&kvm->irq_srcu);
99 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
100 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
105 mutex_unlock(&kvm->irqfds.resampler_lock);
116 struct kvm *kvm = irqfd->kvm;
120 synchronize_srcu(&kvm->irq_srcu);
150 /* assumes kvm->irqfds.lock is held */
160 * assumes kvm->irqfds.lock is held
174 struct kvm *kvm, int irq_source_id,
191 struct kvm *kvm = irqfd->kvm;
196 idx = srcu_read_lock(&kvm->irq_srcu);
202 if (kvm_arch_set_irq_inatomic(&irq, kvm,
206 srcu_read_unlock(&kvm->irq_srcu, idx);
213 spin_lock_irqsave(&kvm->irqfds.lock, iflags);
227 spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
243 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
249 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
274 struct kvm *kvm, unsigned int host_irq,
282 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
291 if (!kvm_arch_intc_initialized(kvm))
294 if (!kvm_arch_irqfd_allowed(kvm, args))
301 irqfd->kvm = kvm;
306 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
334 mutex_lock(&kvm->irqfds.resampler_lock);
337 &kvm->irqfds.resampler_list, link) {
349 mutex_unlock(&kvm->irqfds.resampler_lock);
353 resampler->kvm = kvm;
359 list_add(&resampler->link, &kvm->irqfds.resampler_list);
360 kvm_register_irq_ack_notifier(kvm,
366 synchronize_srcu(&kvm->irq_srcu);
368 mutex_unlock(&kvm->irqfds.resampler_lock);
378 spin_lock_irq(&kvm->irqfds.lock);
381 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
386 spin_unlock_irq(&kvm->irqfds.lock);
390 idx = srcu_read_lock(&kvm->irq_srcu);
391 irqfd_update(kvm, irqfd);
393 list_add_tail(&irqfd->list, &kvm->irqfds.items);
395 spin_unlock_irq(&kvm->irqfds.lock);
420 srcu_read_unlock(&kvm->irq_srcu, idx);
446 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
451 idx = srcu_read_lock(&kvm->irq_srcu);
452 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
454 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
455 link, srcu_read_lock_held(&kvm->irq_srcu))
457 srcu_read_unlock(&kvm->irq_srcu, idx);
461 srcu_read_unlock(&kvm->irq_srcu, idx);
467 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
471 hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
472 link, srcu_read_lock_held(&kvm->irq_srcu))
477 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
483 idx = srcu_read_lock(&kvm->irq_srcu);
484 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
486 kvm_notify_acked_gsi(kvm, gsi);
487 srcu_read_unlock(&kvm->irq_srcu, idx);
490 void kvm_register_irq_ack_notifier(struct kvm *kvm,
493 mutex_lock(&kvm->irq_lock);
494 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
495 mutex_unlock(&kvm->irq_lock);
496 kvm_arch_post_irq_ack_notifier_list_update(kvm);
499 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
502 mutex_lock(&kvm->irq_lock);
504 mutex_unlock(&kvm->irq_lock);
505 synchronize_srcu(&kvm->irq_srcu);
506 kvm_arch_post_irq_ack_notifier_list_update(kvm);
511 kvm_eventfd_init(struct kvm *kvm)
514 spin_lock_init(&kvm->irqfds.lock);
515 INIT_LIST_HEAD(&kvm->irqfds.items);
516 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
517 mutex_init(&kvm->irqfds.resampler_lock);
519 INIT_LIST_HEAD(&kvm->ioeventfds);
527 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
536 spin_lock_irq(&kvm->irqfds.lock);
538 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
553 spin_unlock_irq(&kvm->irqfds.lock);
567 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
573 return kvm_irqfd_deassign(kvm, args);
575 return kvm_irqfd_assign(kvm, args);
579 * This function is called as the kvm VM fd is being released. Shutdown all
583 kvm_irqfd_release(struct kvm *kvm)
587 spin_lock_irq(&kvm->irqfds.lock);
589 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
592 spin_unlock_irq(&kvm->irqfds.lock);
596 * since we do not take a kvm* reference.
604 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
606 void kvm_irq_routing_update(struct kvm *kvm)
610 spin_lock_irq(&kvm->irqfds.lock);
612 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
613 irqfd_update(kvm, irqfd);
618 irqfd->kvm, irqfd->producer->irq,
625 spin_unlock_irq(&kvm->irqfds.lock);
635 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
758 /* assumes kvm->slots_lock held */
760 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
764 list_for_each_entry(_p, &kvm->ioeventfds, list)
785 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
816 mutex_lock(&kvm->slots_lock);
819 if (ioeventfd_check_collision(kvm, p)) {
826 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
831 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
832 list_add_tail(&p->list, &kvm->ioeventfds);
834 mutex_unlock(&kvm->slots_lock);
839 mutex_unlock(&kvm->slots_lock);
849 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
864 mutex_lock(&kvm->slots_lock);
866 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
878 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
879 bus = kvm_get_bus(kvm, bus_idx);
887 mutex_unlock(&kvm->slots_lock);
894 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
897 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
900 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
906 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
936 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
944 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
952 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
958 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
961 return kvm_deassign_ioeventfd(kvm, args);
963 return kvm_assign_ioeventfd(kvm, args);