Lines Matching refs:data
161 #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
172 struct mtk_iommu_data *data;
174 for_each_m4u(data)
175 return data;
187 struct mtk_iommu_data *data = cookie;
189 for_each_m4u(data) {
191 data->base + data->plat_data->inv_sel_reg);
192 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
200 struct mtk_iommu_data *data = cookie;
205 for_each_m4u(data) {
206 spin_lock_irqsave(&data->tlb_lock, flags);
208 data->base + data->plat_data->inv_sel_reg);
210 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
212 data->base + REG_MMU_INVLD_END_A);
214 data->base + REG_MMU_INVALIDATE);
217 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
220 dev_warn(data->dev,
225 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
226 spin_unlock_irqrestore(&data->tlb_lock, flags);
234 struct mtk_iommu_data *data = cookie;
235 struct iommu_domain *domain = &data->m4u_dom->domain;
249 struct mtk_iommu_data *data = dev_id;
250 struct mtk_iommu_domain *dom = data->m4u_dom;
256 int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
258 regval = readl_relaxed(data->base + REG_MMU0_INT_ID);
259 fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA);
260 fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA);
262 regval = readl_relaxed(data->base + REG_MMU1_INT_ID);
263 fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA);
264 fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA);
269 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) {
275 fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm];
277 if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
280 data->dev,
287 regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
289 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
291 mtk_iommu_tlb_flush_all(data);
296 static void mtk_iommu_config(struct mtk_iommu_data *data,
307 larb_mmu = &data->larb_imu[larbid];
321 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
332 .iommu_dev = data->dev,
335 dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
337 dev_err(data->dev, "Failed to alloc io pgtable\n");
388 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
391 if (!data)
395 if (!data->m4u_dom) {
396 data->m4u_dom = dom;
398 data->base + REG_MMU_PT_BASE_ADDR);
401 mtk_iommu_config(data, dev, true);
408 struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
410 if (!data)
413 mtk_iommu_config(data, dev, false);
420 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
423 if (data->enable_4GB)
447 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
454 data);
461 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
465 if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
474 struct mtk_iommu_data *data;
479 data = dev_iommu_priv_get(dev);
481 return &data->iommu;
496 struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
498 if (!data)
502 if (!data->m4u_group) {
503 data->m4u_group = iommu_group_alloc();
504 if (IS_ERR(data->m4u_group))
507 iommu_group_ref_get(data->m4u_group);
509 return data->m4u_group;
551 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
556 ret = clk_prepare_enable(data->bclk);
558 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
562 if (data->plat_data->m4u_plat == M4U_MT8173) {
566 regval = readl_relaxed(data->base + REG_MMU_CTRL_REG);
569 writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
577 writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
586 writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
588 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR))
589 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
591 regval = lower_32_bits(data->protect_base) |
592 upper_32_bits(data->protect_base);
593 writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
595 if (data->enable_4GB &&
596 MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) {
602 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
604 writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
605 if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) {
607 regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL);
609 writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL);
612 if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) {
616 regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL);
618 if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
621 writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL);
623 if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
624 dev_name(data->dev), (void *)data)) {
625 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
626 clk_disable_unprepare(data->bclk);
627 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
641 struct mtk_iommu_data *data;
652 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
653 if (!data)
655 data->dev = dev;
656 data->plat_data = of_device_get_match_data(dev);
662 data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
664 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
665 switch (data->plat_data->m4u_plat) {
684 data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
688 data->base = devm_ioremap_resource(dev, res);
689 if (IS_ERR(data->base))
690 return PTR_ERR(data->base);
693 data->irq = platform_get_irq(pdev, 0);
694 if (data->irq < 0)
695 return data->irq;
697 if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
698 data->bclk = devm_clk_get(dev, "bclk");
699 if (IS_ERR(data->bclk))
700 return PTR_ERR(data->bclk);
731 data->larb_imu[id].dev = &plarbdev->dev;
737 platform_set_drvdata(pdev, data);
739 ret = mtk_iommu_hw_init(data);
743 ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
748 iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
749 iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
751 ret = iommu_device_register(&data->iommu);
755 spin_lock_init(&data->tlb_lock);
756 list_add_tail(&data->list, &m4ulist);
766 struct mtk_iommu_data *data = platform_get_drvdata(pdev);
768 iommu_device_sysfs_remove(&data->iommu);
769 iommu_device_unregister(&data->iommu);
771 list_del(&data->list);
773 clk_disable_unprepare(data->bclk);
774 devm_free_irq(&pdev->dev, data->irq, data);
781 struct mtk_iommu_data *data = dev_get_drvdata(dev);
782 struct mtk_iommu_suspend_reg *reg = &data->reg;
783 void __iomem *base = data->base;
793 clk_disable_unprepare(data->bclk);
799 struct mtk_iommu_data *data = dev_get_drvdata(dev);
800 struct mtk_iommu_suspend_reg *reg = &data->reg;
801 struct mtk_iommu_domain *m4u_dom = data->m4u_dom;
802 void __iomem *base = data->base;
805 ret = clk_prepare_enable(data->bclk);
807 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
865 { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data},
866 { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data},
867 { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data},
868 { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
869 { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},