Lines Matching refs:viommu

65 	struct viommu_dev		*viommu;
66 struct mutex mutex; /* protects viommu pointer */
78 struct viommu_dev *viommu;
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
143 return len - viommu->probe_size - tail_size;
154 static int __viommu_sync_req(struct viommu_dev *viommu)
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
161 assert_spin_locked(&viommu->request_lock);
165 while (!list_empty(&viommu->requests)) {
187 static int viommu_sync_req(struct viommu_dev *viommu)
192 spin_lock_irqsave(&viommu->request_lock, flags);
193 ret = __viommu_sync_req(viommu);
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
196 spin_unlock_irqrestore(&viommu->request_lock, flags);
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
227 assert_spin_locked(&viommu->request_lock);
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
250 if (!__viommu_sync_req(viommu))
256 list_add_tail(&req->list, &viommu->requests);
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
269 spin_lock_irqsave(&viommu->request_lock, flags);
270 ret = __viommu_add_req(viommu, buf, len, false);
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
273 spin_unlock_irqrestore(&viommu->request_lock, flags);
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
288 spin_lock_irqsave(&viommu->request_lock, flags);
290 ret = __viommu_add_req(viommu, buf, len, true);
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
296 ret = __viommu_sync_req(viommu);
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
304 spin_unlock_irqrestore(&viommu->request_lock, flags);
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
460 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
474 probe_len = sizeof(*probe) + viommu->probe_size +
487 ret = viommu_send_req_sync(viommu, probe, probe_len);
495 cur < viommu->probe_size) {
503 dev_err(dev, "unknown viommu prop 0x%x\n", type);
507 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
510 if (cur >= viommu->probe_size)
522 static int viommu_fault_handler(struct viommu_dev *viommu,
547 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
553 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
564 struct viommu_dev *viommu = vq->vdev->priv;
568 dev_err(viommu->dev,
572 viommu_fault_handler(viommu, &evt->fault);
578 dev_err(viommu->dev, "could not add event buffer\n");
615 struct viommu_dev *viommu = vdev->viommu;
618 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
626 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
627 viommu->last_domain, GFP_KERNEL);
633 domain->pgsize_bitmap = viommu->pgsize_bitmap;
634 domain->geometry = viommu->geometry;
636 vdomain->map_flags = viommu->map_flags;
637 vdomain->viommu = viommu;
651 if (vdomain->viommu)
652 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
667 if (!vdomain->viommu) {
669 * Properly initialize the domain now that we know which viommu
673 } else if (vdomain->viommu != vdev->viommu) {
705 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
757 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
787 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
816 viommu_sync_req(vdomain->viommu);
872 struct viommu_dev *viommu = NULL;
878 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
879 if (!viommu)
887 vdev->viommu = viommu;
891 if (viommu->probe_size) {
893 ret = viommu_probe_endpoint(viommu, dev);
898 return &viommu->iommu;
950 static int viommu_init_vqs(struct viommu_dev *viommu)
952 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
959 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
963 static int viommu_fill_evtq(struct viommu_dev *viommu)
968 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
971 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
989 struct viommu_dev *viommu = NULL;
999 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1000 if (!viommu)
1003 spin_lock_init(&viommu->request_lock);
1004 ida_init(&viommu->domain_ids);
1005 viommu->dev = dev;
1006 viommu->vdev = vdev;
1007 INIT_LIST_HEAD(&viommu->requests);
1009 ret = viommu_init_vqs(viommu);
1014 &viommu->pgsize_bitmap);
1016 if (!viommu->pgsize_bitmap) {
1021 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1022 viommu->last_domain = ~0U;
1035 &viommu->first_domain);
1039 &viommu->last_domain);
1043 &viommu->probe_size);
1045 viommu->geometry = (struct iommu_domain_geometry) {
1052 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1054 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1059 ret = viommu_fill_evtq(viommu);
1063 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1068 iommu_device_set_ops(&viommu->iommu, &viommu_ops);
1069 iommu_device_set_fwnode(&viommu->iommu, parent_dev->fwnode);
1071 iommu_device_register(&viommu->iommu);
1093 vdev->priv = viommu;
1096 order_base_2(viommu->geometry.aperture_end));
1097 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1102 iommu_device_sysfs_remove(&viommu->iommu);
1103 iommu_device_unregister(&viommu->iommu);
1112 struct viommu_dev *viommu = vdev->priv;
1114 iommu_device_sysfs_remove(&viommu->iommu);
1115 iommu_device_unregister(&viommu->iommu);