Lines Matching refs:smmu

23 	struct tegra_smmu *smmu;
53 struct tegra_smmu *smmu;
69 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
72 writel(value, smmu->regs + offset);
75 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
77 return readl(smmu->regs + offset);
86 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
87 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
165 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
168 return (addr & smmu->pfn_mask) == addr;
171 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
173 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
176 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
178 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
181 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
186 offset &= ~(smmu->mc->soc->atom_size - 1);
188 if (smmu->mc->soc->num_address_bits > 32) {
194 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
198 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
201 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
203 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
206 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
211 if (smmu->soc->num_asids == 4)
217 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
220 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
226 if (smmu->soc->num_asids == 4)
232 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
235 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
241 if (smmu->soc->num_asids == 4)
247 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
250 static inline void smmu_flush(struct tegra_smmu *smmu)
252 smmu_readl(smmu, SMMU_PTB_ASID);
255 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
259 mutex_lock(&smmu->lock);
261 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
262 if (id >= smmu->soc->num_asids) {
263 mutex_unlock(&smmu->lock);
267 set_bit(id, smmu->asids);
270 mutex_unlock(&smmu->lock);
274 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
276 mutex_lock(&smmu->lock);
277 clear_bit(id, smmu->asids);
278 mutex_unlock(&smmu->lock);
343 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
348 for (i = 0; i < smmu->soc->num_swgroups; i++) {
349 if (smmu->soc->swgroups[i].swgroup == swgroup) {
350 group = &smmu->soc->swgroups[i];
358 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
365 group = tegra_smmu_find_swgroup(smmu, swgroup);
367 value = smmu_readl(smmu, group->reg);
371 smmu_writel(smmu, value, group->reg);
379 for (i = 0; i < smmu->soc->num_clients; i++) {
380 const struct tegra_mc_client *client = &smmu->soc->clients[i];
385 value = smmu_readl(smmu, client->smmu.reg);
386 value |= BIT(client->smmu.bit);
387 smmu_writel(smmu, value, client->smmu.reg);
391 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
398 group = tegra_smmu_find_swgroup(smmu, swgroup);
400 value = smmu_readl(smmu, group->reg);
404 smmu_writel(smmu, value, group->reg);
407 for (i = 0; i < smmu->soc->num_clients; i++) {
408 const struct tegra_mc_client *client = &smmu->soc->clients[i];
413 value = smmu_readl(smmu, client->smmu.reg);
414 value &= ~BIT(client->smmu.bit);
415 smmu_writel(smmu, value, client->smmu.reg);
419 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
430 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
432 if (dma_mapping_error(smmu->dev, as->pd_dma))
436 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
441 err = tegra_smmu_alloc_asid(smmu, &as->id);
445 smmu_flush_ptc(smmu, as->pd_dma, 0);
446 smmu_flush_tlb_asid(smmu, as->id);
448 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
450 smmu_writel(smmu, value, SMMU_PTB_DATA);
451 smmu_flush(smmu);
453 as->smmu = smmu;
459 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
463 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
469 tegra_smmu_free_asid(smmu, as->id);
471 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
473 as->smmu = NULL;
479 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
490 if (args.np != smmu->dev->of_node) {
497 err = tegra_smmu_as_prepare(smmu, as);
501 tegra_smmu_enable(smmu, swgroup, as->id);
515 struct tegra_smmu *smmu = as->smmu;
523 if (args.np != smmu->dev->of_node) {
530 tegra_smmu_disable(smmu, swgroup, as->id);
531 tegra_smmu_as_unprepare(smmu, as);
540 struct tegra_smmu *smmu = as->smmu;
548 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
552 smmu_flush_ptc(smmu, as->pd_dma, offset);
553 smmu_flush_tlb_section(smmu, as->id, iova);
554 smmu_flush(smmu);
568 struct tegra_smmu *smmu = as->smmu;
577 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
586 struct tegra_smmu *smmu = as->smmu;
591 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
593 if (dma_mapping_error(smmu->dev, dma)) {
598 if (!smmu_dma_addr_valid(smmu, dma)) {
599 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
614 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
637 struct tegra_smmu *smmu = as->smmu;
639 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
643 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
652 struct tegra_smmu *smmu = as->smmu;
657 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
659 smmu_flush_ptc(smmu, pte_dma, offset);
660 smmu_flush_tlb_group(smmu, as->id, iova);
661 smmu_flush(smmu);
797 pfn = *pte & as->smmu->pfn_mask;
815 return mc->smmu;
818 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev,
821 const struct iommu_ops *ops = smmu->iommu.ops;
843 struct tegra_smmu *smmu = NULL;
850 smmu = tegra_smmu_find(args.np);
851 if (smmu) {
852 err = tegra_smmu_configure(smmu, dev, &args);
863 dev_iommu_priv_set(dev, smmu);
872 if (!smmu)
875 return &smmu->iommu;
884 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup)
888 for (i = 0; i < smmu->soc->num_groups; i++)
889 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++)
890 if (smmu->soc->groups[i].swgroups[j] == swgroup)
891 return &smmu->soc->groups[i];
899 struct tegra_smmu *smmu = group->smmu;
901 mutex_lock(&smmu->lock);
903 mutex_unlock(&smmu->lock);
906 static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu,
914 soc = tegra_smmu_find_group(smmu, swgroup);
916 mutex_lock(&smmu->lock);
919 list_for_each_entry(group, &smmu->groups, list)
922 mutex_unlock(&smmu->lock);
926 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL);
928 mutex_unlock(&smmu->lock);
934 group->smmu = smmu;
939 devm_kfree(smmu->dev, group);
940 mutex_unlock(&smmu->lock);
947 list_add_tail(&group->list, &smmu->groups);
948 mutex_unlock(&smmu->lock);
956 struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
959 group = tegra_smmu_group_get(smmu, fwspec->ids[0]);
1007 struct tegra_smmu *smmu = s->private;
1014 for (i = 0; i < smmu->soc->num_swgroups; i++) {
1015 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
1019 value = smmu_readl(smmu, group->reg);
1039 struct tegra_smmu *smmu = s->private;
1046 for (i = 0; i < smmu->soc->num_clients; i++) {
1047 const struct tegra_mc_client *client = &smmu->soc->clients[i];
1050 value = smmu_readl(smmu, client->smmu.reg);
1052 if (value & BIT(client->smmu.bit))
1065 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
1067 smmu->debugfs = debugfs_create_dir("smmu", NULL);
1068 if (!smmu->debugfs)
1071 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
1073 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
1077 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
1079 debugfs_remove_recursive(smmu->debugfs);
1086 struct tegra_smmu *smmu;
1091 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1092 if (!smmu)
1103 mc->smmu = smmu;
1107 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
1108 if (!smmu->asids)
1111 INIT_LIST_HEAD(&smmu->groups);
1112 mutex_init(&smmu->lock);
1114 smmu->regs = mc->regs;
1115 smmu->soc = soc;
1116 smmu->dev = dev;
1117 smmu->mc = mc;
1119 smmu->pfn_mask =
1122 mc->soc->num_address_bits, smmu->pfn_mask);
1123 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1;
1124 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
1125 smmu->tlb_mask);
1132 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
1135 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
1140 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
1142 smmu_flush_ptc_all(smmu);
1143 smmu_flush_tlb(smmu);
1144 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
1145 smmu_flush(smmu);
1149 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
1153 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
1154 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
1156 err = iommu_device_register(&smmu->iommu);
1158 iommu_device_sysfs_remove(&smmu->iommu);
1164 iommu_device_unregister(&smmu->iommu);
1165 iommu_device_sysfs_remove(&smmu->iommu);
1170 tegra_smmu_debugfs_init(smmu);
1172 return smmu;
1175 void tegra_smmu_remove(struct tegra_smmu *smmu)
1177 iommu_device_unregister(&smmu->iommu);
1178 iommu_device_sysfs_remove(&smmu->iommu);
1181 tegra_smmu_debugfs_exit(smmu);