Lines Matching refs:as
288 struct tegra_smmu_as *as;
293 as = kzalloc(sizeof(*as), GFP_KERNEL);
294 if (!as)
297 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
299 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
300 if (!as->pd) {
301 kfree(as);
305 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
306 if (!as->count) {
307 __free_page(as->pd);
308 kfree(as);
312 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
313 if (!as->pts) {
314 kfree(as->count);
315 __free_page(as->pd);
316 kfree(as);
320 spin_lock_init(&as->lock);
323 as->domain.geometry.aperture_start = 0;
324 as->domain.geometry.aperture_end = 0xffffffff;
325 as->domain.geometry.force_aperture = true;
327 return &as->domain;
332 struct tegra_smmu_as *as = to_smmu_as(domain);
336 WARN_ON_ONCE(as->use_count);
337 kfree(as->count);
338 kfree(as->pts);
339 kfree(as);
420 struct tegra_smmu_as *as)
425 if (as->use_count > 0) {
426 as->use_count++;
430 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
432 if (dma_mapping_error(smmu->dev, as->pd_dma))
436 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
441 err = tegra_smmu_alloc_asid(smmu, &as->id);
445 smmu_flush_ptc(smmu, as->pd_dma, 0);
446 smmu_flush_tlb_asid(smmu, as->id);
448 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
449 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
453 as->smmu = smmu;
454 as->use_count++;
459 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
464 struct tegra_smmu_as *as)
466 if (--as->use_count > 0)
469 tegra_smmu_free_asid(smmu, as->id);
471 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
473 as->smmu = NULL;
480 struct tegra_smmu_as *as = to_smmu_as(domain);
497 err = tegra_smmu_as_prepare(smmu, as);
501 tegra_smmu_enable(smmu, swgroup, as->id);
513 struct tegra_smmu_as *as = to_smmu_as(domain);
515 struct tegra_smmu *smmu = as->smmu;
530 tegra_smmu_disable(smmu, swgroup, as->id);
531 tegra_smmu_as_unprepare(smmu, as);
536 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
540 struct tegra_smmu *smmu = as->smmu;
541 u32 *pd = page_address(as->pd);
548 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
552 smmu_flush_ptc(smmu, as->pd_dma, offset);
553 smmu_flush_tlb_section(smmu, as->id, iova);
564 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
568 struct tegra_smmu *smmu = as->smmu;
572 pt_page = as->pts[pd_index];
576 pd = page_address(as->pd);
582 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
586 struct tegra_smmu *smmu = as->smmu;
588 if (!as->pts[pde]) {
605 as->pts[pde] = page;
607 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
612 u32 *pd = page_address(as->pd);
617 return tegra_smmu_pte_offset(as->pts[pde], iova);
620 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
624 as->count[pd_index]++;
627 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
630 struct page *page = as->pts[pde];
636 if (--as->count[pde] == 0) {
637 struct tegra_smmu *smmu = as->smmu;
638 u32 *pd = page_address(as->pd);
641 tegra_smmu_set_pde(as, iova, 0);
645 as->pts[pde] = NULL;
649 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
652 struct tegra_smmu *smmu = as->smmu;
660 smmu_flush_tlb_group(smmu, as->id, iova);
664 static struct page *as_get_pde_page(struct tegra_smmu_as *as,
669 struct page *page = as->pts[pde];
681 spin_unlock_irqrestore(&as->lock, *flags);
686 spin_lock_irqsave(&as->lock, *flags);
693 if (as->pts[pde]) {
697 page = as->pts[pde];
708 struct tegra_smmu_as *as = to_smmu_as(domain);
714 page = as_get_pde_page(as, iova, gfp, flags);
718 pte = as_get_pte(as, iova, &pte_dma, page);
724 tegra_smmu_pte_get_use(as, iova);
734 tegra_smmu_set_pte(as, iova, pte, pte_dma,
744 struct tegra_smmu_as *as = to_smmu_as(domain);
748 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
752 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
753 tegra_smmu_pte_put_use(as, iova);
761 struct tegra_smmu_as *as = to_smmu_as(domain);
765 spin_lock_irqsave(&as->lock, flags);
767 spin_unlock_irqrestore(&as->lock, flags);
775 struct tegra_smmu_as *as = to_smmu_as(domain);
778 spin_lock_irqsave(&as->lock, flags);
780 spin_unlock_irqrestore(&as->lock, flags);
788 struct tegra_smmu_as *as = to_smmu_as(domain);
793 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
797 pfn = *pte & as->smmu->pfn_mask;