Lines Matching refs:qid
72 static int pds_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
77 pdsv->vqs[qid].desc_addr = desc_addr;
78 pdsv->vqs[qid].avail_addr = driver_addr;
79 pdsv->vqs[qid].used_addr = device_addr;
84 static void pds_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
88 pdsv->vqs[qid].q_len = num;
91 static void pds_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
95 iowrite16(qid, pdsv->vqs[qid].notify);
98 static void pds_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
103 pdsv->vqs[qid].event_cb = *cb;
117 static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
119 if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR)
122 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]);
123 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
126 static void pds_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
134 dev_dbg(dev, "%s: qid %d ready %d => %d\n",
135 __func__, qid, pdsv->vqs[qid].ready, ready);
136 if (ready == pdsv->vqs[qid].ready)
148 err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
151 qid, ERR_PTR(err));
155 err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
157 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",
158 __func__, qid, ERR_PTR(err));
161 pdsv->vqs[qid].ready = ready;
164 static bool pds_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
168 return pdsv->vqs[qid].ready;
171 static int pds_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
181 if (pdsv->vqs[qid].ready) {
215 pdsv->vqs[qid].avail_idx = avail;
216 pdsv->vqs[qid].used_idx = used;
221 static int pds_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
231 if (pdsv->vqs[qid].ready) {
236 avail = pdsv->vqs[qid].avail_idx;
237 used = pdsv->vqs[qid].used_idx;
257 pds_vdpa_get_vq_notification(struct vdpa_device *vdpa_dev, u16 qid)
263 area.addr = pdsv->vqs[qid].notify_pa;
274 static int pds_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, u16 qid)
278 return pdsv->vqs[qid].irq;
385 int max_vq, nintrs, qid, err;
396 for (qid = 0; qid < pdsv->num_vqs; ++qid) {
397 int irq = pci_irq_vector(pdev, qid);
399 snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
400 "vdpa-%s-%d", dev_name(dev), qid);
403 pdsv->vqs[qid].irq_name,
404 &pdsv->vqs[qid]);
406 dev_err(dev, "%s: no irq for qid %d: %pe\n",
407 __func__, qid, ERR_PTR(err));
411 pdsv->vqs[qid].irq = irq;
419 while (qid--)
420 pds_vdpa_release_irq(pdsv, qid);
433 int qid;
438 for (qid = 0; qid < pdsv->num_vqs; qid++)
439 pds_vdpa_release_irq(pdsv, qid);
489 static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid,
492 memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0]));
493 pdsv->vqs[qid].qid = qid;
494 pdsv->vqs[qid].pdsv = pdsv;
495 pdsv->vqs[qid].ready = false;
496 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
497 pdsv->vqs[qid].notify = notify;
519 dev_err(dev, "%s: reset_vq failed qid %d: %pe\n",