Lines Matching refs:mmu
74 struct ipmmu_vmsa_device *mmu;
152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
154 return mmu->root == mmu;
159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
162 if (ipmmu_is_root(mmu))
163 *rootp = mmu;
180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
182 return ioread32(mmu->base + offset);
185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
188 iowrite32(data, mmu->base + offset);
191 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
194 return mmu->features->ctx_offset_base +
195 context_id * mmu->features->ctx_offset_stride + reg;
198 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
201 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
207 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
225 if (domain->mmu != domain->mmu->root)
226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
231 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
233 return mmu->features->utlb_offset_base + reg;
236 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
239 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
242 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
245 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
260 dev_err_ratelimited(domain->mmu->dev,
285 struct ipmmu_vmsa_device *mmu = domain->mmu;
293 ipmmu_imuasid_write(mmu, utlb, 0);
295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
297 mmu->utlb_ctx[utlb] = domain->context_id;
306 struct ipmmu_vmsa_device *mmu = domain->mmu;
308 ipmmu_imuctr_write(mmu, utlb, 0);
309 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
335 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
341 spin_lock_irqsave(&mmu->lock, flags);
343 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
344 if (ret != mmu->num_ctx) {
345 mmu->domains[ret] = domain;
346 set_bit(ret, mmu->ctx);
350 spin_unlock_irqrestore(&mmu->lock, flags);
355 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
360 spin_lock_irqsave(&mmu->lock, flags);
362 clear_bit(context_id, mmu->ctx);
363 mmu->domains[context_id] = NULL;
365 spin_unlock_irqrestore(&mmu->lock, flags);
383 if (domain->mmu->features->twobit_imttbcr_sl0)
388 if (domain->mmu->features->cache_snoop)
399 if (domain->mmu->features->setup_imbuscr)
448 domain->cfg.iommu_dev = domain->mmu->root->dev;
453 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
462 ipmmu_domain_free_context(domain->mmu->root,
473 if (!domain->mmu)
484 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
494 struct ipmmu_vmsa_device *mmu = domain->mmu;
516 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
519 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
531 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
534 dev_err_ratelimited(mmu->dev,
543 struct ipmmu_vmsa_device *mmu = dev;
548 spin_lock_irqsave(&mmu->lock, flags);
553 for (i = 0; i < mmu->num_ctx; i++) {
554 if (!mmu->domains[i])
556 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
560 spin_unlock_irqrestore(&mmu->lock, flags);
621 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
626 if (!mmu) {
633 if (!domain->mmu) {
635 domain->mmu = mmu;
639 domain->mmu = NULL;
644 } else if (domain->mmu != mmu) {
650 dev_name(mmu->dev), dev_name(domain->mmu->dev));
704 if (domain->mmu)
810 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
822 if (!mmu->mapping) {
828 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
833 mmu->mapping = mapping;
837 ret = arm_iommu_attach_device(dev, mmu->mapping);
846 if (mmu->mapping)
847 arm_iommu_release_mapping(mmu->mapping);
854 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
859 if (!mmu)
862 return &mmu->iommu;
883 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
886 if (mmu->group)
887 return iommu_group_ref_get(mmu->group);
891 mmu->group = group;
919 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
924 for (i = 0; i < mmu->num_ctx; ++i)
925 ipmmu_ctx_write(mmu, i, IMCTR, 0);
1000 struct ipmmu_vmsa_device *mmu;
1005 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
1006 if (!mmu) {
1011 mmu->dev = &pdev->dev;
1012 spin_lock_init(&mmu->lock);
1013 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1014 mmu->features = of_device_get_match_data(&pdev->dev);
1015 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1022 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1023 if (IS_ERR(mmu->base))
1024 return PTR_ERR(mmu->base);
1038 if (mmu->features->use_ns_alias_offset)
1039 mmu->base += IM_NS_ALIAS_OFFSET;
1041 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1047 if (!mmu->features->has_cache_leaf_nodes ||
1049 mmu->root = mmu;
1051 mmu->root = ipmmu_find_root();
1056 if (!mmu->root)
1060 if (ipmmu_is_root(mmu)) {
1066 dev_name(&pdev->dev), mmu);
1072 ipmmu_device_reset(mmu);
1074 if (mmu->features->reserved_context) {
1076 set_bit(0, mmu->ctx);
1085 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1086 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1091 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1092 iommu_device_set_fwnode(&mmu->iommu,
1095 ret = iommu_device_register(&mmu->iommu);
1111 platform_set_drvdata(pdev, mmu);
1118 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1120 iommu_device_sysfs_remove(&mmu->iommu);
1121 iommu_device_unregister(&mmu->iommu);
1123 arm_iommu_release_mapping(mmu->mapping);
1125 ipmmu_device_reset(mmu);
1133 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1137 if (ipmmu_is_root(mmu)) {
1138 ipmmu_device_reset(mmu);
1140 for (i = 0; i < mmu->num_ctx; i++) {
1141 if (!mmu->domains[i])
1144 ipmmu_domain_setup_context(mmu->domains[i]);
1149 for (i = 0; i < mmu->features->num_utlbs; i++) {
1150 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1153 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);