Lines Matching refs:data

252 	struct iommu_domain domain; /* generic domain data structure */
256 * This structure hold all data of a single SYSMMU controller, this includes
259 * tree. It is usually referenced by 'data' pointer.
286 static void sysmmu_unblock(struct sysmmu_drvdata *data)
288 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
291 static bool sysmmu_block(struct sysmmu_drvdata *data)
295 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
296 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
299 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
300 sysmmu_unblock(data);
307 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
309 if (MMU_MAJ_VER(data->version) < 5)
310 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
312 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
315 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
320 if (MMU_MAJ_VER(data->version) < 5) {
323 data->sfrbase + REG_MMU_FLUSH_ENTRY);
329 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
332 data->sfrbase + REG_V5_MMU_FLUSH_START);
334 data->sfrbase + REG_V5_MMU_FLUSH_END);
335 writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
340 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
342 if (MMU_MAJ_VER(data->version) < 5)
343 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
346 data->sfrbase + REG_V5_PT_BASE_PFN);
348 __sysmmu_tlb_invalidate(data);
351 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
353 BUG_ON(clk_prepare_enable(data->clk_master));
354 BUG_ON(clk_prepare_enable(data->clk));
355 BUG_ON(clk_prepare_enable(data->pclk));
356 BUG_ON(clk_prepare_enable(data->aclk));
359 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
361 clk_disable_unprepare(data->aclk);
362 clk_disable_unprepare(data->pclk);
363 clk_disable_unprepare(data->clk);
364 clk_disable_unprepare(data->clk_master);
367 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
371 __sysmmu_enable_clocks(data);
373 ver = readl(data->sfrbase + REG_MMU_VERSION);
377 data->version = MAKE_MMU_VER(1, 0);
379 data->version = MMU_RAW_VER(ver);
381 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
382 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
384 __sysmmu_disable_clocks(data);
387 static void show_fault_information(struct sysmmu_drvdata *data,
393 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
394 dev_name(data->master), finfo->name, fault_addr);
395 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
396 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
397 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
400 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
407 struct sysmmu_drvdata *data = dev_id;
414 WARN_ON(!data->active);
416 if (MMU_MAJ_VER(data->version) < 5) {
428 spin_lock(&data->lock);
430 clk_enable(data->clk_master);
432 itype = __ffs(readl(data->sfrbase + reg_status));
440 fault_addr = readl(data->sfrbase + finfo->addr_reg);
441 show_fault_information(data, finfo, fault_addr);
443 if (data->domain)
444 ret = report_iommu_fault(&data->domain->domain,
445 data->master, fault_addr, finfo->type);
449 writel(1 << itype, data->sfrbase + reg_clear);
451 sysmmu_unblock(data);
453 clk_disable(data->clk_master);
455 spin_unlock(&data->lock);
460 static void __sysmmu_disable(struct sysmmu_drvdata *data)
464 clk_enable(data->clk_master);
466 spin_lock_irqsave(&data->lock, flags);
467 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
468 writel(0, data->sfrbase + REG_MMU_CFG);
469 data->active = false;
470 spin_unlock_irqrestore(&data->lock, flags);
472 __sysmmu_disable_clocks(data);
475 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
479 if (data->version <= MAKE_MMU_VER(3, 1))
481 else if (data->version <= MAKE_MMU_VER(3, 2))
488 writel(cfg, data->sfrbase + REG_MMU_CFG);
491 static void __sysmmu_enable(struct sysmmu_drvdata *data)
495 __sysmmu_enable_clocks(data);
497 spin_lock_irqsave(&data->lock, flags);
498 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
499 __sysmmu_init_config(data);
500 __sysmmu_set_ptbase(data, data->pgtable);
501 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
502 data->active = true;
503 spin_unlock_irqrestore(&data->lock, flags);
511 clk_disable(data->clk_master);
514 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
519 spin_lock_irqsave(&data->lock, flags);
520 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
521 clk_enable(data->clk_master);
522 if (sysmmu_block(data)) {
523 if (data->version >= MAKE_MMU_VER(5, 0))
524 __sysmmu_tlb_invalidate(data);
526 __sysmmu_tlb_invalidate_entry(data, iova, 1);
527 sysmmu_unblock(data);
529 clk_disable(data->clk_master);
531 spin_unlock_irqrestore(&data->lock, flags);
534 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
539 spin_lock_irqsave(&data->lock, flags);
540 if (data->active) {
543 clk_enable(data->clk_master);
555 if (MMU_MAJ_VER(data->version) == 2)
558 if (sysmmu_block(data)) {
559 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
560 sysmmu_unblock(data);
562 clk_disable(data->clk_master);
564 spin_unlock_irqrestore(&data->lock, flags);
573 struct sysmmu_drvdata *data;
576 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
577 if (!data)
581 data->sfrbase = devm_ioremap_resource(dev, res);
582 if (IS_ERR(data->sfrbase))
583 return PTR_ERR(data->sfrbase);
590 dev_name(dev), data);
596 data->clk = devm_clk_get(dev, "sysmmu");
597 if (PTR_ERR(data->clk) == -ENOENT)
598 data->clk = NULL;
599 else if (IS_ERR(data->clk))
600 return PTR_ERR(data->clk);
602 data->aclk = devm_clk_get(dev, "aclk");
603 if (PTR_ERR(data->aclk) == -ENOENT)
604 data->aclk = NULL;
605 else if (IS_ERR(data->aclk))
606 return PTR_ERR(data->aclk);
608 data->pclk = devm_clk_get(dev, "pclk");
609 if (PTR_ERR(data->pclk) == -ENOENT)
610 data->pclk = NULL;
611 else if (IS_ERR(data->pclk))
612 return PTR_ERR(data->pclk);
614 if (!data->clk && (!data->aclk || !data->pclk)) {
619 data->clk_master = devm_clk_get(dev, "master");
620 if (PTR_ERR(data->clk_master) == -ENOENT)
621 data->clk_master = NULL;
622 else if (IS_ERR(data->clk_master))
623 return PTR_ERR(data->clk_master);
625 data->sysmmu = dev;
626 spin_lock_init(&data->lock);
628 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
629 dev_name(data->sysmmu));
633 iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
634 iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
636 ret = iommu_device_register(&data->iommu);
640 platform_set_drvdata(pdev, data);
642 __sysmmu_get_version(data);
644 if (MMU_MAJ_VER(data->version) < 5) {
667 iommu_device_sysfs_remove(&data->iommu);
673 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
674 struct device *master = data->master;
680 if (data->domain) {
681 dev_dbg(data->sysmmu, "saving state\n");
682 __sysmmu_disable(data);
691 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
692 struct device *master = data->master;
698 if (data->domain) {
699 dev_dbg(data->sysmmu, "restoring state\n");
700 __sysmmu_enable(data);
801 struct sysmmu_drvdata *data, *next;
809 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
810 spin_lock(&data->lock);
811 __sysmmu_disable(data);
812 data->pgtable = 0;
813 data->domain = NULL;
814 list_del_init(&data->domain_node);
815 spin_unlock(&data->lock);
847 struct sysmmu_drvdata *data, *next;
855 list_for_each_entry(data, &owner->controllers, owner_node) {
856 pm_runtime_get_noresume(data->sysmmu);
857 if (pm_runtime_active(data->sysmmu))
858 __sysmmu_disable(data);
859 pm_runtime_put(data->sysmmu);
863 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
864 spin_lock(&data->lock);
865 data->pgtable = 0;
866 data->domain = NULL;
867 list_del_init(&data->domain_node);
868 spin_unlock(&data->lock);
884 struct sysmmu_drvdata *data;
897 list_for_each_entry(data, &owner->controllers, owner_node) {
898 spin_lock(&data->lock);
899 data->pgtable = pagetable;
900 data->domain = domain;
901 list_add_tail(&data->domain_node, &domain->clients);
902 spin_unlock(&data->lock);
907 list_for_each_entry(data, &owner->controllers, owner_node) {
908 pm_runtime_get_noresume(data->sysmmu);
909 if (pm_runtime_active(data->sysmmu))
910 __sysmmu_enable(data);
911 pm_runtime_put(data->sysmmu);
968 struct sysmmu_drvdata *data;
971 list_for_each_entry(data, &domain->clients, domain_node)
972 sysmmu_tlb_invalidate_flpdcache(data, iova);
1005 struct sysmmu_drvdata *data;
1010 list_for_each_entry(data, &domain->clients, domain_node)
1011 sysmmu_tlb_invalidate_flpdcache(data, iova);
1123 struct sysmmu_drvdata *data;
1128 list_for_each_entry(data, &domain->clients, domain_node)
1129 sysmmu_tlb_invalidate_entry(data, iova, size);
1245 struct sysmmu_drvdata *data;
1250 list_for_each_entry(data, &owner->controllers, owner_node) {
1256 data->link = device_link_add(dev, data->sysmmu,
1262 data = list_first_entry(&owner->controllers,
1265 return &data->iommu;
1271 struct sysmmu_drvdata *data;
1287 list_for_each_entry(data, &owner->controllers, owner_node)
1288 device_link_del(data->link);
1296 struct sysmmu_drvdata *data, *entry;
1301 data = platform_get_drvdata(sysmmu);
1302 if (!data) {
1320 if (entry == data)
1323 list_add_tail(&data->owner_node, &owner->controllers);
1324 data->master = dev;