Lines Matching refs:domain

210 static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
216 static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
222 static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
225 if (domain->mmu != domain->mmu->root)
226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
253 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
257 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
260 dev_err_ratelimited(domain->mmu->dev,
268 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
272 reg = ipmmu_ctx_read_root(domain, IMCTR);
274 ipmmu_ctx_write_all(domain, IMCTR, reg);
276 ipmmu_tlb_sync(domain);
282 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
285 struct ipmmu_vmsa_device *mmu = domain->mmu;
295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
297 mmu->utlb_ctx[utlb] = domain->context_id;
303 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
306 struct ipmmu_vmsa_device *mmu = domain->mmu;
314 struct ipmmu_vmsa_domain *domain = cookie;
316 ipmmu_tlb_invalidate(domain);
336 struct ipmmu_vmsa_domain *domain)
345 mmu->domains[ret] = domain;
368 static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
374 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
375 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
376 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
383 if (domain->mmu->features->twobit_imttbcr_sl0)
388 if (domain->mmu->features->cache_snoop)
392 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
395 ipmmu_ctx_write_root(domain, IMMAIR0,
396 domain->cfg.arm_lpae_s1_cfg.mair);
399 if (domain->mmu->features->setup_imbuscr)
400 ipmmu_ctx_write_root(domain, IMBUSCR,
401 ipmmu_ctx_read_root(domain, IMBUSCR) &
408 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
417 ipmmu_ctx_write_all(domain, IMCTR,
421 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
436 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
437 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
438 domain->cfg.ias = 32;
439 domain->cfg.oas = 40;
440 domain->cfg.tlb = &ipmmu_flush_ops;
441 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
442 domain->io_domain.geometry.force_aperture = true;
447 domain->cfg.coherent_walk = false;
448 domain->cfg.iommu_dev = domain->mmu->root->dev;
453 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
457 domain->context_id = ret;
459 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
460 domain);
461 if (!domain->iop) {
462 ipmmu_domain_free_context(domain->mmu->root,
463 domain->context_id);
467 ipmmu_domain_setup_context(domain);
471 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
473 if (!domain->mmu)
482 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
483 ipmmu_tlb_sync(domain);
484 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
491 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
494 struct ipmmu_vmsa_device *mmu = domain->mmu;
498 status = ipmmu_ctx_read_root(domain, IMSTR);
502 iova = ipmmu_ctx_read_root(domain, IMELAR);
504 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
512 ipmmu_ctx_write_root(domain, IMSTR, 0);
531 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
571 struct ipmmu_vmsa_domain *domain;
573 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
574 if (!domain)
577 mutex_init(&domain->mutex);
579 return &domain->io_domain;
605 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
608 * Free the domain resources. We assume that all devices have already
612 ipmmu_domain_destroy_context(domain);
613 free_io_pgtable_ops(domain->iop);
614 kfree(domain);
622 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
631 mutex_lock(&domain->mutex);
633 if (!domain->mmu) {
634 /* The domain hasn't been used yet, initialize it. */
635 domain->mmu = mmu;
636 ret = ipmmu_domain_init_context(domain);
639 domain->mmu = NULL;
642 domain->context_id);
644 } else if (domain->mmu != mmu) {
647 * different IOMMUs to the same domain.
649 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
650 dev_name(mmu->dev), dev_name(domain->mmu->dev));
653 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
655 mutex_unlock(&domain->mutex);
661 ipmmu_utlb_enable(domain, fwspec->ids[i]);
670 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
674 ipmmu_utlb_disable(domain, fwspec->ids[i]);
684 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
686 if (!domain)
689 return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
695 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
697 return domain->iop->unmap(domain->iop, iova, size, gather);
702 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
704 if (domain->mmu)
705 ipmmu_tlb_flush_all(domain);
717 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
721 return domain->iop->iova_to_phys(domain->iop, iova);
815 * VAs. This will allocate a corresponding IOMMU domain.