Lines Matching refs:viommu
64 struct viommu_dev *viommu;
65 struct mutex mutex; /* protects viommu pointer */
78 struct viommu_dev *viommu;
136 static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
143 return len - viommu->probe_size - tail_size;
154 static int __viommu_sync_req(struct viommu_dev *viommu)
159 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
161 assert_spin_locked(&viommu->request_lock);
165 while (!list_empty(&viommu->requests)) {
187 static int viommu_sync_req(struct viommu_dev *viommu)
192 spin_lock_irqsave(&viommu->request_lock, flags);
193 ret = __viommu_sync_req(viommu);
195 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
196 spin_unlock_irqrestore(&viommu->request_lock, flags);
217 static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
225 struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
227 assert_spin_locked(&viommu->request_lock);
229 write_offset = viommu_get_write_desc_offset(viommu, buf, len);
250 if (!__viommu_sync_req(viommu))
256 list_add_tail(&req->list, &viommu->requests);
264 static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
269 spin_lock_irqsave(&viommu->request_lock, flags);
270 ret = __viommu_add_req(viommu, buf, len, false);
272 dev_dbg(viommu->dev, "could not add request: %d\n", ret);
273 spin_unlock_irqrestore(&viommu->request_lock, flags);
282 static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
288 spin_lock_irqsave(&viommu->request_lock, flags);
290 ret = __viommu_add_req(viommu, buf, len, true);
292 dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
296 ret = __viommu_sync_req(viommu);
298 dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
304 spin_unlock_irqrestore(&viommu->request_lock, flags);
454 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
514 static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
528 probe_len = sizeof(*probe) + viommu->probe_size +
541 ret = viommu_send_req_sync(viommu, probe, probe_len);
549 cur < viommu->probe_size) {
557 dev_err(dev, "unknown viommu prop 0x%x\n", type);
561 dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
564 if (cur >= viommu->probe_size)
576 static int viommu_fault_handler(struct viommu_dev *viommu,
601 dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
607 dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
618 struct viommu_dev *viommu = vq->vdev->priv;
622 dev_err(viommu->dev,
626 viommu_fault_handler(viommu, &evt->fault);
632 dev_err(viommu->dev, "could not add event buffer\n");
665 struct viommu_dev *viommu = vdev->viommu;
668 viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
676 ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
677 viommu->last_domain, GFP_KERNEL);
683 domain->pgsize_bitmap = viommu->pgsize_bitmap;
684 domain->geometry = viommu->geometry;
686 vdomain->map_flags = viommu->map_flags;
687 vdomain->viommu = viommu;
690 if (virtio_has_feature(viommu->vdev,
698 ida_free(&viommu->domain_ids, vdomain->id);
699 vdomain->viommu = NULL;
714 if (vdomain->viommu)
715 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
730 if (!vdomain->viommu) {
732 * Properly initialize the domain now that we know which viommu
736 } else if (vdomain->viommu != vdev->viommu) {
770 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
808 WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
846 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
883 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
912 viommu_sync_req(vdomain->viommu);
969 struct viommu_dev *viommu = NULL;
975 viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
976 if (!viommu)
984 vdev->viommu = viommu;
988 if (viommu->probe_size) {
990 ret = viommu_probe_endpoint(viommu, dev);
995 return &viommu->iommu;
1065 static int viommu_init_vqs(struct viommu_dev *viommu)
1067 struct virtio_device *vdev = dev_to_virtio(viommu->dev);
1074 return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
1078 static int viommu_fill_evtq(struct viommu_dev *viommu)
1083 struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
1086 viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
1104 struct viommu_dev *viommu = NULL;
1114 viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
1115 if (!viommu)
1118 spin_lock_init(&viommu->request_lock);
1119 ida_init(&viommu->domain_ids);
1120 viommu->dev = dev;
1121 viommu->vdev = vdev;
1122 INIT_LIST_HEAD(&viommu->requests);
1124 ret = viommu_init_vqs(viommu);
1129 &viommu->pgsize_bitmap);
1131 if (!viommu->pgsize_bitmap) {
1136 viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
1137 viommu->last_domain = ~0U;
1150 &viommu->first_domain);
1154 &viommu->last_domain);
1158 &viommu->probe_size);
1160 viommu->geometry = (struct iommu_domain_geometry) {
1167 viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
1169 viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
1174 ret = viommu_fill_evtq(viommu);
1178 ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
1183 iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
1185 vdev->priv = viommu;
1188 order_base_2(viommu->geometry.aperture_end));
1189 dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
1201 struct viommu_dev *viommu = vdev->priv;
1203 iommu_device_sysfs_remove(&viommu->iommu);
1204 iommu_device_unregister(&viommu->iommu);