Lines Matching refs:domain

236 	struct iommu_domain *domain;	/* domain this device is attached */
243 * been attached to this domain and page tables of IO address space defined by
244 * it. It is usually referenced by 'domain' pointer.
252 struct iommu_domain domain; /* generic domain data structure */
272 struct exynos_iommu_domain *domain; /* domain we belong to */
273 struct list_head domain_node; /* node for domain clients list */
283 return container_of(dom, struct exynos_iommu_domain, domain);
443 if (data->domain)
444 ret = report_iommu_fault(&data->domain->domain,
680 if (data->domain) {
698 if (data->domain) {
739 struct exynos_iommu_domain *domain;
746 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
747 if (!domain)
751 if (iommu_get_dma_cookie(&domain->domain) != 0)
757 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
758 if (!domain->pgtable)
761 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
762 if (!domain->lv2entcnt)
767 domain->pgtable[i] = ZERO_LV2LINK;
769 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
772 BUG_ON(handle != virt_to_phys(domain->pgtable));
776 spin_lock_init(&domain->lock);
777 spin_lock_init(&domain->pgtablelock);
778 INIT_LIST_HEAD(&domain->clients);
780 domain->domain.geometry.aperture_start = 0;
781 domain->domain.geometry.aperture_end = ~0UL;
782 domain->domain.geometry.force_aperture = true;
784 return &domain->domain;
787 free_pages((unsigned long)domain->lv2entcnt, 1);
789 free_pages((unsigned long)domain->pgtable, 2);
792 iommu_put_dma_cookie(&domain->domain);
794 kfree(domain);
800 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
805 WARN_ON(!list_empty(&domain->clients));
807 spin_lock_irqsave(&domain->lock, flags);
809 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
813 data->domain = NULL;
818 spin_unlock_irqrestore(&domain->lock, flags);
823 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
827 if (lv1ent_page(domain->pgtable + i)) {
828 phys_addr_t base = lv2table_base(domain->pgtable + i);
836 free_pages((unsigned long)domain->pgtable, 2);
837 free_pages((unsigned long)domain->lv2entcnt, 1);
838 kfree(domain);
844 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
846 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
850 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
862 spin_lock_irqsave(&domain->lock, flags);
863 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
866 data->domain = NULL;
870 owner->domain = NULL;
871 spin_unlock_irqrestore(&domain->lock, flags);
882 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
885 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
891 if (owner->domain)
892 exynos_iommu_detach_device(owner->domain, dev);
896 spin_lock_irqsave(&domain->lock, flags);
900 data->domain = domain;
901 list_add_tail(&data->domain_node, &domain->clients);
904 owner->domain = iommu_domain;
905 spin_unlock_irqrestore(&domain->lock, flags);
922 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
970 spin_lock(&domain->lock);
971 list_for_each_entry(data, &domain->clients, domain_node)
973 spin_unlock(&domain->lock);
980 static int lv1set_section(struct exynos_iommu_domain *domain,
1003 spin_lock(&domain->lock);
1010 list_for_each_entry(data, &domain->clients, domain_node)
1013 spin_unlock(&domain->lock);
1082 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1088 BUG_ON(domain->pgtable == NULL);
1091 spin_lock_irqsave(&domain->pgtablelock, flags);
1093 entry = section_entry(domain->pgtable, iova);
1096 ret = lv1set_section(domain, entry, iova, paddr, prot,
1097 &domain->lv2entcnt[lv1ent_offset(iova)]);
1101 pent = alloc_lv2entry(domain, entry, iova,
1102 &domain->lv2entcnt[lv1ent_offset(iova)]);
1108 &domain->lv2entcnt[lv1ent_offset(iova)]);
1115 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1120 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1126 spin_lock_irqsave(&domain->lock, flags);
1128 list_for_each_entry(data, &domain->clients, domain_node)
1131 spin_unlock_irqrestore(&domain->lock, flags);
1138 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1144 BUG_ON(domain->pgtable == NULL);
1146 spin_lock_irqsave(&domain->pgtablelock, flags);
1148 ent = section_entry(domain->pgtable, iova);
1180 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1198 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1200 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1202 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1206 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1217 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1222 spin_lock_irqsave(&domain->pgtablelock, flags);
1224 entry = section_entry(domain->pgtable, iova);
1237 spin_unlock_irqrestore(&domain->pgtablelock, flags);
1276 if (owner->domain) {
1280 WARN_ON(owner->domain !=
1282 exynos_iommu_detach_device(owner->domain, dev);