Lines Matching refs:vd_dev

129 	struct virtio_vdpa_device *vd_dev = private;
131 virtio_config_changed(&vd_dev->vdev);
148 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
251 spin_lock_irqsave(&vd_dev->lock, flags);
252 list_add(&info->node, &vd_dev->virtqueues);
253 spin_unlock_irqrestore(&vd_dev->lock, flags);
269 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
270 struct vdpa_device *vdpa = vd_dev->vdpa;
276 spin_lock_irqsave(&vd_dev->lock, flags);
278 spin_unlock_irqrestore(&vd_dev->lock, flags);
363 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
397 cb.private = vd_dev;
431 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
432 struct vdpa_device *vdpa = vd_dev->vdpa;
440 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
441 struct vdpa_device *vdpa = vd_dev->vdpa;
483 struct virtio_vdpa_device *vd_dev =
486 kfree(vd_dev);
492 struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
495 vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
496 if (!vd_dev)
499 vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
500 vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
501 vd_dev->vdev.config = &virtio_vdpa_config_ops;
502 vd_dev->vdpa = vdpa;
503 INIT_LIST_HEAD(&vd_dev->virtqueues);
504 spin_lock_init(&vd_dev->lock);
506 vd_dev->vdev.id.device = ops->get_device_id(vdpa);
507 if (vd_dev->vdev.id.device == 0)
510 vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
511 ret = register_virtio_device(&vd_dev->vdev);
512 reg_dev = vd_dev;
516 vdpa_set_drvdata(vdpa, vd_dev);
522 put_device(&vd_dev->vdev.dev);
524 kfree(vd_dev);
530 struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
532 unregister_virtio_device(&vd_dev->vdev);