Lines Matching defs:sdev

82 	struct sprd_iommu_device	*sdev;
93 sprd_iommu_write(struct sprd_iommu_device *sdev, unsigned int reg, u32 val)
95 writel_relaxed(val, sdev->base + reg);
99 sprd_iommu_read(struct sprd_iommu_device *sdev, unsigned int reg)
101 return readl_relaxed(sdev->base + reg);
105 sprd_iommu_update_bits(struct sprd_iommu_device *sdev, unsigned int reg,
108 u32 t = sprd_iommu_read(sdev, reg);
111 sprd_iommu_write(sdev, reg, t);
115 sprd_iommu_get_version(struct sprd_iommu_device *sdev)
117 int ver = (sprd_iommu_read(sdev, SPRD_IOMMU_VERSION) &
159 struct sprd_iommu_device *sdev = dom->sdev;
163 if (sdev->ver == SPRD_IOMMU_EX)
169 sprd_iommu_write(sdev, reg, val);
174 struct sprd_iommu_device *sdev = dom->sdev;
178 if (sdev->ver == SPRD_IOMMU_EX)
185 sprd_iommu_write(sdev, reg, val);
191 struct sprd_iommu_device *sdev = dom->sdev;
194 if (sdev->ver == SPRD_IOMMU_EX)
199 sprd_iommu_write(sdev, reg, val);
202 static void sprd_iommu_default_ppn(struct sprd_iommu_device *sdev)
204 u32 val = sdev->prot_page_pa >> SPRD_IOMMU_PAGE_SHIFT;
206 if (sdev->ver == SPRD_IOMMU_EX) {
207 sprd_iommu_write(sdev, SPRD_EX_DEFAULT_PPN, val);
208 } else if (sdev->ver == SPRD_IOMMU_VAU) {
209 sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_RD, val);
210 sprd_iommu_write(sdev, SPRD_VAU_DEFAULT_PPN_WR, val);
214 static void sprd_iommu_hw_en(struct sprd_iommu_device *sdev, bool en)
219 if (sdev->ver == SPRD_IOMMU_EX)
226 sprd_iommu_update_bits(sdev, reg_cfg, mask, 0, val);
234 if (!dom->sdev)
238 dma_free_coherent(dom->sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
239 dom->sdev = NULL;
240 sprd_iommu_hw_en(dom->sdev, false);
254 struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
259 if (sdev->dom == dom)
264 dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
268 dom->sdev = sdev;
271 sdev->dom = dom;
278 sprd_iommu_hw_en(sdev, false);
282 sprd_iommu_default_ppn(sdev);
283 sprd_iommu_hw_en(sdev, true);
301 if (!dom->sdev) {
307 dev_err(dom->sdev->dev, "(iova(0x%lx) + size(%zx)) are not in the range!\n",
354 if (dom->sdev->ver == SPRD_IOMMU_EX)
360 sprd_iommu_write(dom->sdev, reg, 0xffffffff);
392 struct sprd_iommu_device *sdev;
397 sdev = dev_iommu_priv_get(dev);
399 return &sdev->iommu;
404 struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
406 return iommu_group_ref_get(sdev->group);
452 static int sprd_iommu_clk_enable(struct sprd_iommu_device *sdev)
456 eb = devm_clk_get_optional(sdev->dev, NULL);
463 sdev->eb = eb;
467 static void sprd_iommu_clk_disable(struct sprd_iommu_device *sdev)
469 if (sdev->eb)
470 clk_disable_unprepare(sdev->eb);
475 struct sprd_iommu_device *sdev;
480 sdev = devm_kzalloc(dev, sizeof(*sdev), GFP_KERNEL);
481 if (!sdev)
489 sdev->base = base;
491 sdev->prot_page_va = dma_alloc_coherent(dev, SPRD_IOMMU_PAGE_SIZE,
492 &sdev->prot_page_pa, GFP_KERNEL);
493 if (!sdev->prot_page_va)
496 platform_set_drvdata(pdev, sdev);
497 sdev->dev = dev;
500 sdev->group = iommu_group_alloc();
501 if (IS_ERR(sdev->group)) {
502 ret = PTR_ERR(sdev->group);
506 ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
510 ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
514 ret = sprd_iommu_clk_enable(sdev);
518 ret = sprd_iommu_get_version(sdev);
523 sdev->ver = ret;
528 sprd_iommu_clk_disable(sdev);
530 iommu_device_unregister(&sdev->iommu);
532 iommu_device_sysfs_remove(&sdev->iommu);
534 iommu_group_put(sdev->group);
536 dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
542 struct sprd_iommu_device *sdev = platform_get_drvdata(pdev);
544 dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
546 iommu_group_put(sdev->group);
547 sdev->group = NULL;
550 iommu_device_sysfs_remove(&sdev->iommu);
551 iommu_device_unregister(&sdev->iommu);