Lines Matching defs:virqfd

35 static void virqfd_deactivate(struct virqfd *virqfd)
37 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
42 struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
47 eventfd_ctx_do_read(virqfd->eventfd, &cnt);
50 if ((!virqfd->handler ||
51 virqfd->handler(virqfd->opaque, virqfd->data)) &&
52 virqfd->thread)
53 schedule_work(&virqfd->inject);
61 * The eventfd is closing, if the virqfd has not yet been
63 * virqfd pointer to it is still valid, queue it now. As
64 * with kvm irqfds, we know we won't race against the virqfd
67 if (*(virqfd->pvirqfd) == virqfd) {
68 *(virqfd->pvirqfd) = NULL;
69 virqfd_deactivate(virqfd);
81 struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
82 add_wait_queue(wqh, &virqfd->wait);
87 struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
90 eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
91 flush_work(&virqfd->inject);
92 eventfd_ctx_put(virqfd->eventfd);
94 kfree(virqfd);
99 struct virqfd *virqfd = container_of(work, struct virqfd, inject);
100 if (virqfd->thread)
101 virqfd->thread(virqfd->opaque, virqfd->data);
107 void *data, struct virqfd **pvirqfd, int fd)
111 struct virqfd *virqfd;
115 virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL_ACCOUNT);
116 if (!virqfd)
119 virqfd->pvirqfd = pvirqfd;
120 virqfd->opaque = opaque;
121 virqfd->handler = handler;
122 virqfd->thread = thread;
123 virqfd->data = data;
125 INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
126 INIT_WORK(&virqfd->inject, virqfd_inject);
140 virqfd->eventfd = ctx;
145 * we update the pointer to the virqfd under lock to avoid
146 * pushing multiple jobs to release the same virqfd.
155 *pvirqfd = virqfd;
163 init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
164 init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
166 events = vfs_poll(irqfd.file, &virqfd->pt);
174 schedule_work(&virqfd->inject);
189 kfree(virqfd);
195 void vfio_virqfd_disable(struct virqfd **pvirqfd)