Lines Matching refs:irqfd
44 struct kvm_kernel_irqfd *irqfd =
46 struct kvm *kvm = irqfd->kvm;
48 if (!irqfd->resampler) {
49 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
51 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
55 irqfd->gsi, 1, false);
60 struct kvm_kernel_irqfd *irqfd;
62 list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
64 eventfd_signal(irqfd->resamplefd, 1);
92 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
94 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
99 list_del_rcu(&irqfd->resampler_link);
123 struct kvm_kernel_irqfd *irqfd =
125 struct kvm *kvm = irqfd->kvm;
128 /* Make sure irqfd has been initialized in assign path. */
135 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
141 flush_work(&irqfd->inject);
143 if (irqfd->resampler) {
144 irqfd_resampler_shutdown(irqfd);
145 eventfd_ctx_put(irqfd->resamplefd);
152 irq_bypass_unregister_consumer(&irqfd->consumer);
154 eventfd_ctx_put(irqfd->eventfd);
155 kfree(irqfd);
161 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
163 return list_empty(&irqfd->list) ? false : true;
167 * Mark the irqfd as inactive and schedule it for removal
172 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
174 BUG_ON(!irqfd_is_active(irqfd));
176 list_del_init(&irqfd->list);
178 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
196 struct kvm_kernel_irqfd *irqfd =
200 struct kvm *kvm = irqfd->kvm;
207 eventfd_ctx_do_read(irqfd->eventfd, &cnt);
211 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
212 irq = irqfd->irq_entry;
213 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
218 schedule_work(&irqfd->inject);
230 * We must check if someone deactivated the irqfd before
235 * We cannot race against the irqfd going away since the
238 if (irqfd_is_active(irqfd))
239 irqfd_deactivate(irqfd);
251 struct kvm_kernel_irqfd *irqfd =
253 add_wait_queue_priority(wqh, &irqfd->wait);
257 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
263 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
265 write_seqcount_begin(&irqfd->irq_entry_sc);
269 irqfd->irq_entry = *e;
271 irqfd->irq_entry.type = 0;
273 write_seqcount_end(&irqfd->irq_entry_sc);
305 struct kvm_kernel_irqfd *irqfd, *tmp;
318 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
319 if (!irqfd)
322 irqfd->kvm = kvm;
323 irqfd->gsi = args->gsi;
324 INIT_LIST_HEAD(&irqfd->list);
325 INIT_WORK(&irqfd->inject, irqfd_inject);
326 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
327 seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
341 irqfd->eventfd = eventfd;
352 irqfd->resamplefd = resamplefd;
353 INIT_LIST_HEAD(&irqfd->resampler_link);
359 if (resampler->notifier.gsi == irqfd->gsi) {
360 irqfd->resampler = resampler;
365 if (!irqfd->resampler) {
376 resampler->notifier.gsi = irqfd->gsi;
383 irqfd->resampler = resampler;
386 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
396 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
397 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
403 if (irqfd->eventfd != tmp->eventfd)
412 irqfd_update(kvm, irqfd);
414 list_add_tail(&irqfd->list, &kvm->irqfds.items);
422 events = vfs_poll(f.file, &irqfd->pt);
425 schedule_work(&irqfd->inject);
429 irqfd->consumer.token = (void *)irqfd->eventfd;
430 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
431 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
432 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
433 irqfd->consumer.start = kvm_arch_irq_bypass_start;
434 ret = irq_bypass_register_consumer(&irqfd->consumer);
437 irqfd->consumer.token, ret);
444 * do not drop the file until the irqfd is fully initialized, otherwise
451 if (irqfd->resampler)
452 irqfd_resampler_shutdown(irqfd);
463 kfree(irqfd);
545 * shutdown any irqfd's that match fd+gsi
550 struct kvm_kernel_irqfd *irqfd, *tmp;
559 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
560 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
567 write_seqcount_begin(&irqfd->irq_entry_sc);
568 irqfd->irq_entry.type = 0;
569 write_seqcount_end(&irqfd->irq_entry_sc);
570 irqfd_deactivate(irqfd);
606 struct kvm_kernel_irqfd *irqfd, *tmp;
610 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
611 irqfd_deactivate(irqfd);
629 struct kvm_kernel_irqfd *irqfd;
633 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
636 struct kvm_kernel_irq_routing_entry old = irqfd->irq_entry;
639 irqfd_update(kvm, irqfd);
642 if (irqfd->producer &&
643 kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) {
645 irqfd->kvm, irqfd->producer->irq,
646 irqfd->gsi, 1);
687 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);