Lines Matching refs:vdomain
79 struct viommu_domain *vdomain;
313 static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
328 spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
329 interval_tree_insert(&mapping->iova, &vdomain->mappings);
330 spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
338 * @vdomain: the domain
345 static size_t viommu_del_mappings(struct viommu_domain *vdomain,
354 spin_lock_irqsave(&vdomain->mappings_lock, flags);
355 next = interval_tree_iter_first(&vdomain->mappings, iova, last);
371 interval_tree_remove(node, &vdomain->mappings);
374 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
386 static int viommu_replay_mappings(struct viommu_domain *vdomain)
394 spin_lock_irqsave(&vdomain->mappings_lock, flags);
395 node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
400 .domain = cpu_to_le32(vdomain->id),
407 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
413 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
588 struct viommu_domain *vdomain;
593 vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
594 if (!vdomain)
597 mutex_init(&vdomain->mutex);
598 spin_lock_init(&vdomain->mappings_lock);
599 vdomain->mappings = RB_ROOT_CACHED;
602 iommu_get_dma_cookie(&vdomain->domain)) {
603 kfree(vdomain);
607 return &vdomain->domain;
616 struct viommu_domain *vdomain = to_viommu_domain(domain);
631 vdomain->id = (unsigned int)ret;
636 vdomain->map_flags = viommu->map_flags;
637 vdomain->viommu = viommu;
644 struct viommu_domain *vdomain = to_viommu_domain(domain);
649 viommu_del_mappings(vdomain, 0, 0);
651 if (vdomain->viommu)
652 ida_free(&vdomain->viommu->domain_ids, vdomain->id);
654 kfree(vdomain);
664 struct viommu_domain *vdomain = to_viommu_domain(domain);
666 mutex_lock(&vdomain->mutex);
667 if (!vdomain->viommu) {
673 } else if (vdomain->viommu != vdev->viommu) {
677 mutex_unlock(&vdomain->mutex);
692 * vdev->vdomain is protected by group->mutex
694 if (vdev->vdomain)
695 vdev->vdomain->nr_endpoints--;
699 .domain = cpu_to_le32(vdomain->id),
705 ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
710 if (!vdomain->nr_endpoints) {
715 ret = viommu_replay_mappings(vdomain);
720 vdomain->nr_endpoints++;
721 vdev->vdomain = vdomain;
732 struct viommu_domain *vdomain = to_viommu_domain(domain);
738 if (flags & ~vdomain->map_flags)
741 ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
747 .domain = cpu_to_le32(vdomain->id),
754 if (!vdomain->nr_endpoints)
757 ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
759 viommu_del_mappings(vdomain, iova, size);
770 struct viommu_domain *vdomain = to_viommu_domain(domain);
772 unmapped = viommu_del_mappings(vdomain, iova, size);
777 if (!vdomain->nr_endpoints)
782 .domain = cpu_to_le32(vdomain->id),
787 ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
798 struct viommu_domain *vdomain = to_viommu_domain(domain);
800 spin_lock_irqsave(&vdomain->mappings_lock, flags);
801 node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
806 spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
814 struct viommu_domain *vdomain = to_viommu_domain(domain);
816 viommu_sync_req(vdomain->viommu);