Lines Matching refs:vdev

30 	struct vfio_pci_device *vdev = opaque;
32 if (likely(is_intx(vdev) && !vdev->virq_disabled))
33 eventfd_signal(vdev->ctx[0].trigger, 1);
36 void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
38 struct pci_dev *pdev = vdev->pdev;
41 spin_lock_irqsave(&vdev->irqlock, flags);
49 if (unlikely(!is_intx(vdev))) {
50 if (vdev->pci_2_3)
52 } else if (!vdev->ctx[0].masked) {
57 if (vdev->pci_2_3)
62 vdev->ctx[0].masked = true;
65 spin_unlock_irqrestore(&vdev->irqlock, flags);
76 struct vfio_pci_device *vdev = opaque;
77 struct pci_dev *pdev = vdev->pdev;
81 spin_lock_irqsave(&vdev->irqlock, flags);
87 if (unlikely(!is_intx(vdev))) {
88 if (vdev->pci_2_3)
90 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) {
96 if (vdev->pci_2_3) {
102 vdev->ctx[0].masked = (ret > 0);
105 spin_unlock_irqrestore(&vdev->irqlock, flags);
110 void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
112 if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
113 vfio_send_intx_eventfd(vdev, NULL);
118 struct vfio_pci_device *vdev = dev_id;
122 spin_lock_irqsave(&vdev->irqlock, flags);
124 if (!vdev->pci_2_3) {
125 disable_irq_nosync(vdev->pdev->irq);
126 vdev->ctx[0].masked = true;
128 } else if (!vdev->ctx[0].masked && /* may be shared */
129 pci_check_and_mask_intx(vdev->pdev)) {
130 vdev->ctx[0].masked = true;
134 spin_unlock_irqrestore(&vdev->irqlock, flags);
137 vfio_send_intx_eventfd(vdev, NULL);
142 static int vfio_intx_enable(struct vfio_pci_device *vdev)
144 if (!is_irq_none(vdev))
147 if (!vdev->pdev->irq)
150 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
151 if (!vdev->ctx)
154 vdev->num_ctx = 1;
162 vdev->ctx[0].masked = vdev->virq_disabled;
163 if (vdev->pci_2_3)
164 pci_intx(vdev->pdev, !vdev->ctx[0].masked);
166 vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
171 static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
173 struct pci_dev *pdev = vdev->pdev;
179 if (vdev->ctx[0].trigger) {
180 free_irq(pdev->irq, vdev);
181 kfree(vdev->ctx[0].name);
182 eventfd_ctx_put(vdev->ctx[0].trigger);
183 vdev->ctx[0].trigger = NULL;
189 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
191 if (!vdev->ctx[0].name)
196 kfree(vdev->ctx[0].name);
200 vdev->ctx[0].trigger = trigger;
202 if (!vdev->pci_2_3)
206 irqflags, vdev->ctx[0].name, vdev);
208 vdev->ctx[0].trigger = NULL;
209 kfree(vdev->ctx[0].name);
218 spin_lock_irqsave(&vdev->irqlock, flags);
219 if (!vdev->pci_2_3 && vdev->ctx[0].masked)
221 spin_unlock_irqrestore(&vdev->irqlock, flags);
226 static void vfio_intx_disable(struct vfio_pci_device *vdev)
228 vfio_virqfd_disable(&vdev->ctx[0].unmask);
229 vfio_virqfd_disable(&vdev->ctx[0].mask);
230 vfio_intx_set_signal(vdev, -1);
231 vdev->irq_type = VFIO_PCI_NUM_IRQS;
232 vdev->num_ctx = 0;
233 kfree(vdev->ctx);
247 static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
249 struct pci_dev *pdev = vdev->pdev;
254 if (!is_irq_none(vdev))
257 vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
258 if (!vdev->ctx)
262 cmd = vfio_pci_memory_lock_and_enable(vdev);
267 vfio_pci_memory_unlock_and_restore(vdev, cmd);
268 kfree(vdev->ctx);
271 vfio_pci_memory_unlock_and_restore(vdev, cmd);
273 vdev->num_ctx = nvec;
274 vdev->irq_type = msix ? VFIO_PCI_MSIX_IRQ_INDEX :
282 vdev->msi_qmax = fls(nvec * 2 - 1) - 1;
288 static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
291 struct pci_dev *pdev = vdev->pdev;
296 if (vector < 0 || vector >= vdev->num_ctx)
301 if (vdev->ctx[vector].trigger) {
302 irq_bypass_unregister_producer(&vdev->ctx[vector].producer);
304 cmd = vfio_pci_memory_lock_and_enable(vdev);
305 free_irq(irq, vdev->ctx[vector].trigger);
306 vfio_pci_memory_unlock_and_restore(vdev, cmd);
308 kfree(vdev->ctx[vector].name);
309 eventfd_ctx_put(vdev->ctx[vector].trigger);
310 vdev->ctx[vector].trigger = NULL;
316 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "vfio-msi%s[%d](%s)",
319 if (!vdev->ctx[vector].name)
324 kfree(vdev->ctx[vector].name);
335 cmd = vfio_pci_memory_lock_and_enable(vdev);
344 vdev->ctx[vector].name, trigger);
345 vfio_pci_memory_unlock_and_restore(vdev, cmd);
347 kfree(vdev->ctx[vector].name);
352 vdev->ctx[vector].producer.token = trigger;
353 vdev->ctx[vector].producer.irq = irq;
354 ret = irq_bypass_register_producer(&vdev->ctx[vector].producer);
358 vdev->ctx[vector].producer.token, ret);
360 vdev->ctx[vector].producer.token = NULL;
362 vdev->ctx[vector].trigger = trigger;
367 static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
372 if (start >= vdev->num_ctx || start + count > vdev->num_ctx)
377 ret = vfio_msi_set_vector_signal(vdev, j, fd, msix);
382 vfio_msi_set_vector_signal(vdev, j, -1, msix);
388 static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
390 struct pci_dev *pdev = vdev->pdev;
394 for (i = 0; i < vdev->num_ctx; i++) {
395 vfio_virqfd_disable(&vdev->ctx[i].unmask);
396 vfio_virqfd_disable(&vdev->ctx[i].mask);
399 vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
401 cmd = vfio_pci_memory_lock_and_enable(vdev);
403 vfio_pci_memory_unlock_and_restore(vdev, cmd);
409 if (vdev->nointx)
412 vdev->irq_type = VFIO_PCI_NUM_IRQS;
413 vdev->num_ctx = 0;
414 kfree(vdev->ctx);
420 static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
424 if (!is_intx(vdev) || start != 0 || count != 1)
428 vfio_pci_intx_unmask(vdev);
432 vfio_pci_intx_unmask(vdev);
436 return vfio_virqfd_enable((void *) vdev,
439 &vdev->ctx[0].unmask, fd);
441 vfio_virqfd_disable(&vdev->ctx[0].unmask);
447 static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
451 if (!is_intx(vdev) || start != 0 || count != 1)
455 vfio_pci_intx_mask(vdev);
459 vfio_pci_intx_mask(vdev);
467 static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
471 if (is_intx(vdev) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
472 vfio_intx_disable(vdev);
476 if (!(is_intx(vdev) || is_irq_none(vdev)) || start != 0 || count != 1)
483 if (is_intx(vdev))
484 return vfio_intx_set_signal(vdev, fd);
486 ret = vfio_intx_enable(vdev);
490 ret = vfio_intx_set_signal(vdev, fd);
492 vfio_intx_disable(vdev);
497 if (!is_intx(vdev))
501 vfio_send_intx_eventfd(vdev, NULL);
505 vfio_send_intx_eventfd(vdev, NULL);
510 static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
517 if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) {
518 vfio_msi_disable(vdev, msix);
522 if (!(irq_is(vdev, index) || is_irq_none(vdev)))
529 if (vdev->irq_type == index)
530 return vfio_msi_set_block(vdev, start, count,
533 ret = vfio_msi_enable(vdev, start + count, msix);
537 ret = vfio_msi_set_block(vdev, start, count, fds, msix);
539 vfio_msi_disable(vdev, msix);
544 if (!irq_is(vdev, index) || start + count > vdev->num_ctx)
548 if (!vdev->ctx[i].trigger)
551 eventfd_signal(vdev->ctx[i].trigger, 1);
555 eventfd_signal(vdev->ctx[i].trigger, 1);
616 static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
623 return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger,
627 static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
634 return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger,
638 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
642 int (*func)(struct vfio_pci_device *vdev, unsigned index,
675 if (pci_is_pcie(vdev->pdev))
692 return func(vdev, index, start, count, flags, data);