Lines Matching refs:domain
212 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
215 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
218 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
221 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
224 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
227 if (domain->mmu != domain->mmu->root)
228 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
230 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
255 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
261 false, domain, IMCTR))
262 dev_err_ratelimited(domain->mmu->dev,
266 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
270 reg = ipmmu_ctx_read_root(domain, IMCTR);
272 ipmmu_ctx_write_all(domain, IMCTR, reg);
274 ipmmu_tlb_sync(domain);
280 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
283 struct ipmmu_vmsa_device *mmu = domain->mmu;
293 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
295 mmu->utlb_ctx[utlb] = domain->context_id;
300 struct ipmmu_vmsa_domain *domain = cookie;
302 ipmmu_tlb_invalidate(domain);
321 struct ipmmu_vmsa_domain *domain)
330 mmu->domains[ret] = domain;
353 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
359 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
360 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
361 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
368 if (domain->mmu->features->twobit_imttbcr_sl0)
373 if (domain->mmu->features->cache_snoop)
377 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
380 ipmmu_ctx_write_root(domain, IMMAIR0,
381 domain->cfg.arm_lpae_s1_cfg.mair);
384 if (domain->mmu->features->setup_imbuscr)
385 ipmmu_ctx_write_root(domain, IMBUSCR,
386 ipmmu_ctx_read_root(domain, IMBUSCR) &
393 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
402 ipmmu_ctx_write_all(domain, IMCTR,
406 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
421 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
422 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
423 domain->cfg.ias = 32;
424 domain->cfg.oas = 40;
425 domain->cfg.tlb = &ipmmu_flush_ops;
426 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
427 domain->io_domain.geometry.force_aperture = true;
432 domain->cfg.coherent_walk = false;
433 domain->cfg.iommu_dev = domain->mmu->root->dev;
438 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
442 domain->context_id = ret;
444 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
445 domain);
446 if (!domain->iop) {
447 ipmmu_domain_free_context(domain->mmu->root,
448 domain->context_id);
452 ipmmu_domain_setup_context(domain);
456 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
458 if (!domain->mmu)
467 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
468 ipmmu_tlb_sync(domain);
469 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
476 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
479 struct ipmmu_vmsa_device *mmu = domain->mmu;
483 status = ipmmu_ctx_read_root(domain, IMSTR);
487 iova = ipmmu_ctx_read_root(domain, IMELAR);
489 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
497 ipmmu_ctx_write_root(domain, IMSTR, 0);
516 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
556 struct ipmmu_vmsa_domain *domain;
561 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
562 if (!domain)
565 mutex_init(&domain->mutex);
567 return &domain->io_domain;
572 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
575 * Free the domain resources. We assume that all devices have already
578 ipmmu_domain_destroy_context(domain);
579 free_io_pgtable_ops(domain->iop);
580 kfree(domain);
588 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
597 mutex_lock(&domain->mutex);
599 if (!domain->mmu) {
600 /* The domain hasn't been used yet, initialize it. */
601 domain->mmu = mmu;
602 ret = ipmmu_domain_init_context(domain);
605 domain->mmu = NULL;
608 domain->context_id);
610 } else if (domain->mmu != mmu) {
613 * different IOMMUs to the same domain.
617 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
619 mutex_unlock(&domain->mutex);
625 ipmmu_utlb_enable(domain, fwspec->ids[i]);
634 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
636 return domain->iop->map_pages(domain->iop, iova, paddr, pgsize, pgcount,
644 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
646 return domain->iop->unmap_pages(domain->iop, iova, pgsize, pgcount, gather);
651 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
653 if (domain->mmu)
654 ipmmu_tlb_flush_all(domain);
666 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
670 return domain->iop->iova_to_phys(domain->iop, iova);
759 * VAs. This will allocate a corresponding IOMMU domain.