Lines Matching refs:pdsv

30 	struct pds_vdpa_device *pdsv = container_of(nb, struct pds_vdpa_device, nb);
31 struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
36 if (pdsv->config_cb.callback)
37 pdsv->config_cb.callback(pdsv->config_cb.private);
43 static int pds_vdpa_register_event_handler(struct pds_vdpa_device *pdsv)
45 struct device *dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
46 struct notifier_block *nb = &pdsv->nb;
64 static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device *pdsv)
66 if (pdsv->nb.notifier_call) {
67 pdsc_unregister_notify(&pdsv->nb);
68 pdsv->nb.notifier_call = NULL;
75 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
77 pdsv->vqs[qid].desc_addr = desc_addr;
78 pdsv->vqs[qid].avail_addr = driver_addr;
79 pdsv->vqs[qid].used_addr = device_addr;
86 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
88 pdsv->vqs[qid].q_len = num;
93 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
95 iowrite16(qid, pdsv->vqs[qid].notify);
101 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
103 pdsv->vqs[qid].event_cb = *cb;
117 static void pds_vdpa_release_irq(struct pds_vdpa_device *pdsv, int qid)
119 if (pdsv->vqs[qid].irq == VIRTIO_MSI_NO_VECTOR)
122 free_irq(pdsv->vqs[qid].irq, &pdsv->vqs[qid]);
123 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
128 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
129 struct device *dev = &pdsv->vdpa_dev.dev;
135 __func__, qid, pdsv->vqs[qid].ready, ready);
136 if (ready == pdsv->vqs[qid].ready)
148 err = pds_vdpa_cmd_init_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
155 err = pds_vdpa_cmd_reset_vq(pdsv, qid, invert_idx, &pdsv->vqs[qid]);
161 pdsv->vqs[qid].ready = ready;
166 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
168 return pdsv->vqs[qid].ready;
174 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
175 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
181 if (pdsv->vqs[qid].ready) {
215 pdsv->vqs[qid].avail_idx = avail;
216 pdsv->vqs[qid].used_idx = used;
224 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
225 struct pds_auxiliary_dev *padev = pdsv->vdpa_aux->padev;
231 if (pdsv->vqs[qid].ready) {
236 avail = pdsv->vqs[qid].avail_idx;
237 used = pdsv->vqs[qid].used_idx;
259 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
263 area.addr = pdsv->vqs[qid].notify_pa;
265 vd_mdev = &pdsv->vdpa_aux->vd_mdev;
276 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
278 return pdsv->vqs[qid].irq;
293 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
295 return pdsv->supported_features;
300 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
301 struct device *dev = &pdsv->vdpa_dev.dev;
313 nego_features = features & pdsv->supported_features;
322 pdsv->negotiated_features = nego_features;
327 hw_features = le64_to_cpu(pdsv->vdpa_aux->ident.hw_features);
334 vp_modern_set_features(&pdsv->vdpa_aux->vd_mdev, nego_features);
341 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
343 return pdsv->negotiated_features;
349 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
351 pdsv->config_cb.callback = cb->callback;
352 pdsv->config_cb.private = cb->private;
357 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
360 return min_t(u16, 1024, BIT(le16_to_cpu(pdsv->vdpa_aux->ident.max_qlen)));
375 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
377 return vp_modern_get_status(&pdsv->vdpa_aux->vd_mdev);
380 static int pds_vdpa_request_irqs(struct pds_vdpa_device *pdsv)
382 struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
383 struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
384 struct device *dev = &pdsv->vdpa_dev.dev;
396 for (qid = 0; qid < pdsv->num_vqs; ++qid) {
399 snprintf(pdsv->vqs[qid].irq_name, sizeof(pdsv->vqs[qid].irq_name),
403 pdsv->vqs[qid].irq_name,
404 &pdsv->vqs[qid]);
411 pdsv->vqs[qid].irq = irq;
420 pds_vdpa_release_irq(pdsv, qid);
429 static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
431 struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
432 struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
438 for (qid = 0; qid < pdsv->num_vqs; qid++)
439 pds_vdpa_release_irq(pdsv, qid);
448 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
449 struct device *dev = &pdsv->vdpa_dev.dev;
457 if (pds_vdpa_request_irqs(pdsv))
461 pds_vdpa_cmd_set_status(pdsv, status);
467 pds_vdpa_cmd_reset(pdsv);
469 for (i = 0; i < pdsv->num_vqs; i++) {
470 pdsv->vqs[i].avail_idx = 0;
471 pdsv->vqs[i].used_idx = 0;
474 pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
478 for (i = 0; i < pdsv->num_vqs; i++) {
479 pdsv->vqs[i].notify =
480 vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
481 i, &pdsv->vqs[i].notify_pa);
486 pds_vdpa_release_irqs(pdsv);
489 static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device *pdsv, int qid,
492 memset(&pdsv->vqs[qid], 0, sizeof(pdsv->vqs[0]));
493 pdsv->vqs[qid].qid = qid;
494 pdsv->vqs[qid].pdsv = pdsv;
495 pdsv->vqs[qid].ready = false;
496 pdsv->vqs[qid].irq = VIRTIO_MSI_NO_VECTOR;
497 pdsv->vqs[qid].notify = notify;
502 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
508 dev = &pdsv->vdpa_aux->padev->aux_dev.dev;
516 for (i = 0; i < pdsv->num_vqs && !err; i++) {
517 err = pds_vdpa_cmd_reset_vq(pdsv, i, 0, &pdsv->vqs[i]);
528 for (i = 0; i < pdsv->num_vqs && !err; i++)
529 pds_vdpa_init_vqs_entry(pdsv, i, pdsv->vqs[i].notify);
544 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
552 device = pdsv->vdpa_aux->vd_mdev.device;
560 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
568 device = pdsv->vdpa_aux->vd_mdev.device;
609 struct pds_vdpa_device *pdsv;
622 if (vdpa_aux->pdsv) {
627 pdsv = vdpa_alloc_device(struct pds_vdpa_device, vdpa_dev,
629 if (IS_ERR(pdsv)) {
630 dev_err(dev, "Failed to allocate vDPA structure: %pe\n", pdsv);
631 return PTR_ERR(pdsv);
634 vdpa_aux->pdsv = pdsv;
635 pdsv->vdpa_aux = vdpa_aux;
639 pdsv->vdpa_dev.dma_dev = dma_dev;
641 pdsv->supported_features = mgmt->supported_features;
645 add_config->device_features & ~pdsv->supported_features;
653 pdsv->supported_features = add_config->device_features;
656 err = pds_vdpa_cmd_reset(pdsv);
662 err = pds_vdpa_init_hw(pdsv);
668 fw_max_vqs = le16_to_cpu(pdsv->vdpa_aux->ident.max_vqs);
675 pdsv->num_vqs = 2 * vq_pairs;
676 if (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
677 pdsv->num_vqs++;
679 if (pdsv->num_vqs > fw_max_vqs) {
681 __func__, pdsv->num_vqs, fw_max_vqs);
686 if (pdsv->num_vqs != fw_max_vqs) {
687 err = pds_vdpa_cmd_set_max_vq_pairs(pdsv, vq_pairs);
700 ether_addr_copy(pdsv->mac, add_config->net.mac);
704 vc = pdsv->vdpa_aux->vd_mdev.device;
705 memcpy_fromio(pdsv->mac, vc->mac, sizeof(pdsv->mac));
706 if (is_zero_ether_addr(pdsv->mac) &&
707 (pdsv->supported_features & BIT_ULL(VIRTIO_NET_F_MAC))) {
708 eth_random_addr(pdsv->mac);
709 dev_info(dev, "setting random mac %pM\n", pdsv->mac);
712 pds_vdpa_cmd_set_mac(pdsv, pdsv->mac);
714 for (i = 0; i < pdsv->num_vqs; i++) {
717 notify = vp_modern_map_vq_notify(&pdsv->vdpa_aux->vd_mdev,
718 i, &pdsv->vqs[i].notify_pa);
719 pds_vdpa_init_vqs_entry(pdsv, i, notify);
722 pdsv->vdpa_dev.mdev = &vdpa_aux->vdpa_mdev;
724 err = pds_vdpa_register_event_handler(pdsv);
735 err = _vdpa_register_device(&pdsv->vdpa_dev, pdsv->num_vqs);
746 pds_vdpa_unregister_event_handler(pdsv);
748 put_device(&pdsv->vdpa_dev.dev);
749 vdpa_aux->pdsv = NULL;
756 struct pds_vdpa_device *pdsv = vdpa_to_pdsv(vdpa_dev);
759 pds_vdpa_unregister_event_handler(pdsv);
764 pds_vdpa_cmd_reset(vdpa_aux->pdsv);
767 vdpa_aux->pdsv = NULL;