Home
last modified time | relevance | path

Searched refs:vqs (Results 1 - 25 of 101) sorted by relevance

12345

/kernel/linux/linux-6.6/drivers/vdpa/pds/
H A Dvdpa_dev.c77 pdsv->vqs[qid].desc_addr = desc_addr; in pds_vdpa_set_vq_address()
78 pdsv->vqs[qid].avail_addr = driver_addr; in pds_vdpa_set_vq_address()
79 pdsv->vqs[qid].used_addr = device_addr; in pds_vdpa_set_vq_address()
88 pdsv->vqs[qid].q_len = num; in pds_vdpa_set_vq_num()
95 iowrite16(qid, pdsv->vqs[qid].notify); in pds_vdpa_kick_vq()
103 pdsv->vqs[qid].event_cb = *cb; in pds_vdpa_set_vq_cb()
119 if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR) in pds_vdpa_release_irq()
122 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]); in pds_vdpa_release_irq()
123 pdsv->vqs[qi in pds_vdpa_release_irq()
[all...]
/kernel/linux/linux-5.10/drivers/virtio/
H A Dvirtio_pci_common.c203 vp_dev->vqs[index] = info; in vp_setup_vq()
214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq()
232 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in vp_del_vqs()
234 int v = vp_dev->vqs[vq->index]->msix_vector; in vp_del_vqs()
274 kfree(vp_dev->vqs); in vp_del_vqs()
275 vp_dev->vqs = NULL; in vp_del_vqs()
279 struct virtqueue *vqs[], vq_callback_t *callbacks[], in vp_find_vqs_msix()
288 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); in vp_find_vqs_msix()
289 if (!vp_dev->vqs) in vp_find_vqs_msix()
278 vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], bool per_vq_vectors, const bool *ctx, struct irq_affinity *desc) vp_find_vqs_msix() argument
352 vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx) vp_find_vqs_intx() argument
391 vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx, struct irq_affinity *desc) vp_find_vqs() argument
[all...]
/kernel/linux/linux-6.6/drivers/virtio/
H A Dvirtio_pci_common.c203 vp_dev->vqs[index] = info; in vp_setup_vq()
214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; in vp_del_vq()
238 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { in vp_del_vqs()
240 int v = vp_dev->vqs[vq->index]->msix_vector; in vp_del_vqs()
280 kfree(vp_dev->vqs); in vp_del_vqs()
281 vp_dev->vqs = NULL; in vp_del_vqs()
285 struct virtqueue *vqs[], vq_callback_t *callbacks[], in vp_find_vqs_msix()
294 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL); in vp_find_vqs_msix()
295 if (!vp_dev->vqs) in vp_find_vqs_msix()
284 vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], bool per_vq_vectors, const bool *ctx, struct irq_affinity *desc) vp_find_vqs_msix() argument
358 vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx) vp_find_vqs_intx() argument
397 vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char * const names[], const bool *ctx, struct irq_affinity *desc) vp_find_vqs() argument
[all...]
/kernel/linux/linux-5.10/drivers/vhost/
H A Dtest.c38 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member
45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq()
109 struct vhost_virtqueue **vqs; in vhost_test_open() local
113 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL); in vhost_test_open()
114 if (!vqs) { in vhost_test_open()
120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; in vhost_test_open()
121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; in vhost_test_open()
122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MA in vhost_test_open()
[all...]
H A Dnet.c133 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member
274 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
275 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
288 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info()
290 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info()
292 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
309 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
310 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
311 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
312 n->vqs[ in vhost_net_vq_reset()
1288 struct vhost_virtqueue **vqs; vhost_net_open() local
[all...]
H A Dvsock.c42 struct vhost_virtqueue vqs[2]; member
87 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
240 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
306 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
384 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
532 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
533 vq = &vsock->vqs[i]; in vhost_vsock_start()
564 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
565 vq = &vsock->vqs[i]; in vhost_vsock_start()
589 for (i = 0; i < ARRAY_SIZE(vsock->vqs); in vhost_vsock_stop()
609 struct vhost_virtqueue **vqs; vhost_vsock_dev_open() local
[all...]
H A Dscsi.c203 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; member
265 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
270 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
272 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
275 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
276 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
449 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
479 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
530 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
584 vq = q - vs->vqs; in vhost_scsi_complete_cmd_work()
1814 struct vhost_virtqueue **vqs; vhost_scsi_open() local
[all...]
/kernel/linux/linux-6.6/drivers/vhost/
H A Dtest.c38 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member
45 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; in handle_vq()
109 struct vhost_virtqueue **vqs; in vhost_test_open() local
113 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL); in vhost_test_open()
114 if (!vqs) { in vhost_test_open()
120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; in vhost_test_open()
121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; in vhost_test_open()
122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MA in vhost_test_open()
[all...]
H A Dnet.c134 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member
275 kfree(n->vqs[i].ubuf_info); in vhost_net_clear_ubuf_info()
276 n->vqs[i].ubuf_info = NULL; in vhost_net_clear_ubuf_info()
289 n->vqs[i].ubuf_info = in vhost_net_set_ubuf_info()
291 sizeof(*n->vqs[i].ubuf_info), in vhost_net_set_ubuf_info()
293 if (!n->vqs[i].ubuf_info) in vhost_net_set_ubuf_info()
310 n->vqs[i].done_idx = 0; in vhost_net_vq_reset()
311 n->vqs[i].upend_idx = 0; in vhost_net_vq_reset()
312 n->vqs[i].ubufs = NULL; in vhost_net_vq_reset()
313 n->vqs[ in vhost_net_vq_reset()
1297 struct vhost_virtqueue **vqs; vhost_net_open() local
[all...]
H A Dvsock.c48 struct vhost_virtqueue vqs[2]; member
93 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
262 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
288 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); in vhost_transport_send_pkt()
311 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
388 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
564 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
565 vq = &vsock->vqs[i]; in vhost_vsock_start()
587 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); in vhost_vsock_start()
596 for (i = 0; i < ARRAY_SIZE(vsock->vqs); in vhost_vsock_start()
641 struct vhost_virtqueue **vqs; vhost_vsock_dev_open() local
[all...]
H A Dscsi.c201 struct vhost_scsi_virtqueue *vqs; member
260 vq = &vs->vqs[i].vq; in vhost_scsi_init_inflight()
265 idx = vs->vqs[i].inflight_idx; in vhost_scsi_init_inflight()
267 old_inflight[i] = &vs->vqs[i].inflights[idx]; in vhost_scsi_init_inflight()
270 vs->vqs[i].inflight_idx = idx ^ 1; in vhost_scsi_init_inflight()
271 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; in vhost_scsi_init_inflight()
423 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_allocate_evt()
453 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_do_evt_work()
504 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; in vhost_scsi_evt_work()
1278 * Flush IO vqs tha in vhost_scsi_tmf_resp_work()
1924 struct vhost_virtqueue **vqs; vhost_scsi_open() local
[all...]
/kernel/linux/linux-6.6/drivers/vdpa/solidrun/
H A Dsnet_main.c77 if (snet->vqs[i] && snet->vqs[i]->irq != -1) { in snet_free_irqs()
78 devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]); in snet_free_irqs()
79 snet->vqs[i]->irq = -1; in snet_free_irqs()
91 snet->vqs[idx]->desc_area = desc_area; in snet_set_vq_address()
92 snet->vqs[idx]->driver_area = driver_area; in snet_set_vq_address()
93 snet->vqs[idx]->device_area = device_area; in snet_set_vq_address()
102 snet->vqs[idx]->num = num; in snet_set_vq_num()
109 if (unlikely(!snet->vqs[id in snet_kick_vq()
[all...]
/kernel/linux/linux-5.10/drivers/block/
H A Dvirtio_blk.c69 /* num of vqs */
71 struct virtio_blk_vq *vqs; member
183 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
186 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
200 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
277 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
278 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); in virtio_queue_rq()
280 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
286 spin_unlock_irqrestore(&vblk->vqs[qi in virtio_queue_rq()
497 struct virtqueue **vqs; init_vq() local
[all...]
/kernel/linux/linux-6.6/drivers/bluetooth/
H A Dvirtio_bt.c24 struct virtqueue *vqs[VIRTBT_NUM_VQS]; member
31 struct virtqueue *vq = vbt->vqs[VIRTBT_VQ_RX]; in virtbt_add_inbuf()
61 virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); in virtbt_open_vdev()
76 for (i = 0; i < ARRAY_SIZE(vbt->vqs); i++) { in virtbt_close_vdev()
77 struct virtqueue *vq = vbt->vqs[i]; in virtbt_close_vdev()
102 err = virtqueue_add_outbuf(vbt->vqs[VIRTBT_VQ_TX], sg, 1, skb, in virtbt_send_frame()
109 virtqueue_kick(vbt->vqs[VIRTBT_VQ_TX]); in virtbt_send_frame()
226 skb = virtqueue_get_buf(vbt->vqs[VIRTBT_VQ_RX], &len); in virtbt_rx_work()
236 virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); in virtbt_rx_work()
292 err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callback in virtbt_probe()
[all...]
/kernel/linux/linux-5.10/fs/fuse/
H A Dvirtio_fs.c60 struct virtio_fs_vq *vqs; member
133 return &fs->vqs[vq->index]; in vq_to_fsvq()
160 kfree(vfs->vqs); in release_virtio_fs_obj()
206 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues_locked()
230 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues()
287 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs()
678 struct virtqueue **vqs; in virtio_fs_setup_vqs() local
690 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
691 if (!fs->vqs) in virtio_fs_setup_vqs()
[all...]
/kernel/linux/linux-6.6/fs/fuse/
H A Dvirtio_fs.c61 struct virtio_fs_vq *vqs; member
146 return &fs->vqs[vq->index]; in vq_to_fsvq()
168 kfree(vfs->vqs); in release_virtio_fs_obj()
214 fsvq = &fs->vqs[i]; in virtio_fs_drain_all_queues_locked()
238 fsvq = &fs->vqs[i]; in virtio_fs_start_all_queues()
295 struct virtio_fs_vq *fsvq = &fs->vqs[i]; in virtio_fs_free_devs()
686 struct virtqueue **vqs; in virtio_fs_setup_vqs() local
698 fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); in virtio_fs_setup_vqs()
699 if (!fs->vqs) in virtio_fs_setup_vqs()
[all...]
/kernel/linux/linux-6.6/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c87 struct vduse_virtqueue **vqs; member
448 struct vduse_virtqueue *vq = dev->vqs[i]; in vduse_dev_reset()
481 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_set_vq_address()
515 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_kick_vq()
528 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_set_vq_cb()
540 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_set_vq_num()
549 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_set_vq_ready()
557 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_get_vq_ready()
566 struct vduse_virtqueue *vq = dev->vqs[idx]; in vduse_vdpa_set_vq_state()
585 struct vduse_virtqueue *vq = dev->vqs[id in vduse_vdpa_get_vq_state()
[all...]
/kernel/linux/linux-5.10/net/vmw_vsock/
H A Dvirtio_transport.c31 struct virtqueue *vqs[VSOCK_VQ_MAX]; member
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
50 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
59 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
100 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work()
142 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_send_pkt_work()
230 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_cancel_pkt()
254 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill()
294 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_tx_work()
321 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_R in virtio_transport_more_replies()
[all...]
/kernel/linux/linux-6.6/net/vmw_vsock/
H A Dvirtio_transport.c31 struct virtqueue *vqs[VSOCK_VQ_MAX]; member
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
100 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_send_pkt_work()
132 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_send_pkt_work()
205 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_transport_cancel_pkt()
229 vq = vsock->vqs[VSOCK_VQ_RX]; in virtio_vsock_rx_fill()
259 vq = vsock->vqs[VSOCK_VQ_TX]; in virtio_transport_tx_work()
286 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_R in virtio_transport_more_replies()
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/arm64/fp/
H A Dsve-probe-vls.c21 static unsigned int vqs[SVE_VQ_MAX]; in main() local
48 vqs[nvqs++] = vq; in main()
55 ksft_print_msg("%u\n", 16 * vqs[nvqs]); in main()
/kernel/linux/linux-6.6/tools/testing/selftests/arm64/fp/
H A Dsve-probe-vls.c22 static unsigned int vqs[SVE_VQ_MAX]; in main() local
53 vqs[nvqs++] = vq; in main()
60 ksft_print_msg("%u\n", 16 * vqs[nvqs]); in main()
/kernel/linux/linux-6.6/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim.c95 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
149 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); in vdpasim_do_reset()
150 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_do_reset()
247 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
249 if (!vdpasim->vqs) in vdpasim_create()
266 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_create()
291 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
303 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
311 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
327 struct vdpasim_virtqueue *vq = &vdpasim->vqs[id in vdpasim_set_vq_cb()
[all...]
/kernel/linux/linux-5.10/drivers/vdpa/vdpa_sim/
H A Dvdpa_sim.c83 struct vdpasim_virtqueue *vqs; member
132 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
160 vdpasim_vq_reset(&vdpasim->vqs[i]); in vdpasim_reset()
175 struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; in vdpasim_work()
176 struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; in vdpasim_work()
392 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
394 if (!vdpasim->vqs) in vdpasim_create()
416 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu); in vdpasim_create()
436 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
448 struct vdpasim_virtqueue *vq = &vdpasim->vqs[id in vdpasim_set_vq_num()
[all...]
/kernel/linux/linux-6.6/drivers/block/
H A Dvirtio_blk.c80 /* num of vqs */
83 struct virtio_blk_vq *vqs; member
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
360 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
363 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
377 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
383 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
445 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
446 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); in virtio_queue_rq()
448 virtqueue_kick(vblk->vqs[qi in virtio_queue_rq()
1027 struct virtqueue **vqs; init_vq() local
[all...]
/kernel/linux/linux-6.6/drivers/crypto/virtio/
H A Dvirtio_crypto_core.c102 struct virtqueue **vqs; in virtcrypto_find_vqs() local
116 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL); in virtcrypto_find_vqs()
117 if (!vqs) in virtcrypto_find_vqs()
138 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL); in virtcrypto_find_vqs()
142 vi->ctrl_vq = vqs[total_vqs - 1]; in virtcrypto_find_vqs()
146 vi->data_vq[i].vq = vqs[i]; in virtcrypto_find_vqs()
149 virtqueue_get_vring_size(vqs[i])); in virtcrypto_find_vqs()
160 kfree(vqs); in virtcrypto_find_vqs()
170 kfree(vqs); in virtcrypto_find_vqs()
[all...]

Completed in 25 milliseconds

12345