Lines Matching refs:smmu
24 struct tegra_smmu *smmu;
54 struct tegra_smmu *smmu;
70 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
73 writel(value, smmu->regs + offset);
76 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
78 return readl(smmu->regs + offset);
87 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
88 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
166 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
169 return (addr & smmu->pfn_mask) == addr;
172 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
174 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
177 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
179 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
182 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
187 offset &= ~(smmu->mc->soc->atom_size - 1);
189 if (smmu->mc->soc->num_address_bits > 32) {
195 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
199 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
202 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
204 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
207 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
212 if (smmu->soc->num_asids == 4)
218 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
221 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
227 if (smmu->soc->num_asids == 4)
233 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
236 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
242 if (smmu->soc->num_asids == 4)
248 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
251 static inline void smmu_flush(struct tegra_smmu *smmu)
253 smmu_readl(smmu, SMMU_PTB_ASID);
256 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
260 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
261 if (id >= smmu->soc->num_asids)
264 set_bit(id, smmu->asids);
270 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
272 clear_bit(id, smmu->asids);
332 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
337 for (i = 0; i < smmu->soc->num_swgroups; i++) {
338 if (smmu->soc->swgroups[i].swgroup == swgroup) {
339 group = &smmu->soc->swgroups[i];
347 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
354 group = tegra_smmu_find_swgroup(smmu, swgroup);
356 value = smmu_readl(smmu, group->reg);
360 smmu_writel(smmu, value, group->reg);
368 for (i = 0; i < smmu->soc->num_clients; i++) {
369 const struct tegra_mc_client *client = &smmu->soc->clients[i];
374 value = smmu_readl(smmu, client->regs.smmu.reg);
375 value |= BIT(client->regs.smmu.bit);
376 smmu_writel(smmu, value, client->regs.smmu.reg);
380 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
387 group = tegra_smmu_find_swgroup(smmu, swgroup);
389 value = smmu_readl(smmu, group->reg);
393 smmu_writel(smmu, value, group->reg);
396 for (i = 0; i < smmu->soc->num_clients; i++) {
397 const struct tegra_mc_client *client = &smmu->soc->clients[i];
402 value = smmu_readl(smmu, client->regs.smmu.reg);
403 value &= ~BIT(client->regs.smmu.bit);
404 smmu_writel(smmu, value, client->regs.smmu.reg);
408 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
414 mutex_lock(&smmu->lock);
421 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
423 if (dma_mapping_error(smmu->dev, as->pd_dma)) {
429 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
434 err = tegra_smmu_alloc_asid(smmu, &as->id);
438 smmu_flush_ptc(smmu, as->pd_dma, 0);
439 smmu_flush_tlb_asid(smmu, as->id);
441 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
443 smmu_writel(smmu, value, SMMU_PTB_DATA);
444 smmu_flush(smmu);
446 as->smmu = smmu;
449 mutex_unlock(&smmu->lock);
454 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
456 mutex_unlock(&smmu->lock);
461 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
464 mutex_lock(&smmu->lock);
467 mutex_unlock(&smmu->lock);
471 tegra_smmu_free_asid(smmu, as->id);
473 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
475 as->smmu = NULL;
477 mutex_unlock(&smmu->lock);
484 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
493 err = tegra_smmu_as_prepare(smmu, as);
497 tegra_smmu_enable(smmu, fwspec->ids[index], as->id);
507 tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
508 tegra_smmu_as_unprepare(smmu, as);
519 struct tegra_smmu *smmu = as->smmu;
526 tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
527 tegra_smmu_as_unprepare(smmu, as);
535 struct tegra_smmu *smmu = as->smmu;
543 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
547 smmu_flush_ptc(smmu, as->pd_dma, offset);
548 smmu_flush_tlb_section(smmu, as->id, iova);
549 smmu_flush(smmu);
563 struct tegra_smmu *smmu = as->smmu;
572 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
581 struct tegra_smmu *smmu = as->smmu;
586 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
588 if (dma_mapping_error(smmu->dev, dma)) {
593 if (!smmu_dma_addr_valid(smmu, dma)) {
594 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
609 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
632 struct tegra_smmu *smmu = as->smmu;
634 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
638 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
647 struct tegra_smmu *smmu = as->smmu;
652 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
654 smmu_flush_ptc(smmu, pte_dma, offset);
655 smmu_flush_tlb_group(smmu, as->id, iova);
656 smmu_flush(smmu);
792 pfn = *pte & as->smmu->pfn_mask;
812 return mc->smmu;
815 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
818 const struct iommu_ops *ops = smmu->iommu.ops;
840 struct tegra_smmu *smmu = NULL;
847 smmu = tegra_smmu_find(args.np);
848 if (smmu) {
849 err = tegra_smmu_configure(smmu, dev, &args);
861 smmu = dev_iommu_priv_get(dev);
862 if (!smmu)
865 return &smmu->iommu;
869 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
873 for (i = 0; i < smmu->soc->num_groups; i++)
874 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
875 if (smmu->soc->groups[i].swgroups[j] == swgroup)
876 return &smmu->soc->groups[i];
884 struct tegra_smmu *smmu = group->smmu;
886 mutex_lock(&smmu->lock);
888 mutex_unlock(&smmu->lock);
894 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
901 soc = tegra_smmu_find_group(smmu, swgroup);
903 mutex_lock(&smmu->lock);
906 list_for_each_entry(group, &smmu->groups, list)
909 mutex_unlock(&smmu->lock);
913 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
915 mutex_unlock(&smmu->lock);
921 group->smmu = smmu;
930 devm_kfree(smmu->dev, group);
931 mutex_unlock(&smmu->lock);
938 list_add_tail(&group->list, &smmu->groups);
939 mutex_unlock(&smmu->lock);
960 dev_iommu_priv_set(dev, mc->smmu);
998 struct tegra_smmu *smmu = s->private;
1005 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1006 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1010 value = smmu_readl(smmu, group->reg);
1030 struct tegra_smmu *smmu = s->private;
1037 for (i = 0; i < smmu->soc->num_clients; i++) {
1038 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1041 value = smmu_readl(smmu, client->regs.smmu.reg);
1043 if (value & BIT(client->regs.smmu.bit))
1056 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1058 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1059 if (!smmu->debugfs)
1062 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1064 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1068 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1070 debugfs_remove_recursive(smmu->debugfs);
1077 struct tegra_smmu *smmu;
1081 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1082 if (!smmu)
1093 mc->smmu = smmu;
1095 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL);
1096 if (!smmu->asids)
1099 INIT_LIST_HEAD(&smmu->groups);
1100 mutex_init(&smmu->lock);
1102 smmu->regs = mc->regs;
1103 smmu->soc = soc;
1104 smmu->dev = dev;
1105 smmu->mc = mc;
1107 smmu->pfn_mask =
1110 mc->soc->num_address_bits, smmu->pfn_mask);
1111 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1112 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1113 smmu->tlb_mask);
1120 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1123 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1128 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1130 smmu_flush_ptc_all(smmu);
1131 smmu_flush_tlb(smmu);
1132 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1133 smmu_flush(smmu);
1137 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1141 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev);
1143 iommu_device_sysfs_remove(&smmu->iommu);
1148 tegra_smmu_debugfs_init(smmu);
1150 return smmu;
1153 void tegra_smmu_remove(struct tegra_smmu *smmu)
1155 iommu_device_unregister(&smmu->iommu);
1156 iommu_device_sysfs_remove(&smmu->iommu);
1159 tegra_smmu_debugfs_exit(smmu);