Lines Matching refs:rk_domain
592 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
598 spin_lock_irqsave(&rk_domain->dt_lock, flags);
600 dte = rk_domain->dt[rk_iova_dte_index(iova)];
612 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
617 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
624 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
625 list_for_each(pos, &rk_domain->iommus) {
643 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
646 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain,
649 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
651 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
655 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
663 assert_spin_locked(&rk_domain->dt_lock);
666 dte_addr = &rk_domain->dt[dte_index];
685 rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES);
686 rk_table_flush(rk_domain,
687 rk_domain->dt_dma + dte_index * sizeof(u32), 1);
693 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
700 assert_spin_locked(&rk_domain->dt_lock);
710 rk_table_flush(rk_domain, pte_dma, pte_count);
715 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
723 assert_spin_locked(&rk_domain->dt_lock);
736 rk_table_flush(rk_domain, pte_dma, pte_total);
744 rk_iommu_zap_iova_first_last(rk_domain, iova, size);
749 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma,
763 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
770 spin_lock_irqsave(&rk_domain->dt_lock, flags);
779 page_table = rk_dte_get_page_table(rk_domain, iova);
781 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
785 dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
789 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
792 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
800 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
808 spin_lock_irqsave(&rk_domain->dt_lock, flags);
817 dte = rk_domain->dt[rk_iova_dte_index(iova)];
820 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
827 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size);
829 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
832 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
865 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
882 rk_domain->dt_dma);
900 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
917 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
919 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
933 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
956 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
957 list_add_tail(&iommu->node, &rk_domain->iommus);
958 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
975 struct rk_iommu_domain *rk_domain;
983 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
984 if (!rk_domain)
988 iommu_get_dma_cookie(&rk_domain->domain))
996 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
997 if (!rk_domain->dt)
1000 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt,
1002 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) {
1007 rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES);
1009 spin_lock_init(&rk_domain->iommus_lock);
1010 spin_lock_init(&rk_domain->dt_lock);
1011 INIT_LIST_HEAD(&rk_domain->iommus);
1013 rk_domain->domain.geometry.aperture_start = 0;
1014 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
1015 rk_domain->domain.geometry.force_aperture = true;
1017 return &rk_domain->domain;
1020 free_page((unsigned long)rk_domain->dt);
1023 iommu_put_dma_cookie(&rk_domain->domain);
1025 kfree(rk_domain);
1032 struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
1035 WARN_ON(!list_empty(&rk_domain->iommus));
1038 u32 dte = rk_domain->dt[i];
1048 dma_unmap_single(dma_dev, rk_domain->dt_dma,
1050 free_page((unsigned long)rk_domain->dt);
1053 iommu_put_dma_cookie(&rk_domain->domain);
1054 kfree(rk_domain);