Lines Matching refs:gart

10 #define dev_fmt(fmt)	"gart: " fmt
58 #define FLUSH_GART_REGS(gart) readl_relaxed((gart)->regs + GART_CONFIG)
60 #define for_each_gart_pte(gart, iova) \
61 for (iova = gart->iovmm_base; \
62 iova < gart->iovmm_end; \
65 static inline void gart_set_pte(struct gart_device *gart,
68 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
69 writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
72 static inline unsigned long gart_read_pte(struct gart_device *gart,
77 writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
78 pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
83 static void do_gart_setup(struct gart_device *gart, const u32 *data)
87 for_each_gart_pte(gart, iova)
88 gart_set_pte(gart, iova, data ? *(data++) : 0);
90 writel_relaxed(1, gart->regs + GART_CONFIG);
91 FLUSH_GART_REGS(gart);
94 static inline bool gart_iova_range_invalid(struct gart_device *gart,
97 return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
98 iova + bytes > gart->iovmm_end);
101 static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
103 return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
109 struct gart_device *gart = gart_handle;
112 spin_lock(&gart->dom_lock);
114 if (gart->active_domain && gart->active_domain != domain) {
118 gart->active_domain = domain;
119 gart->active_devices++;
122 spin_unlock(&gart->dom_lock);
130 struct gart_device *gart = gart_handle;
132 spin_lock(&gart->dom_lock);
137 if (--gart->active_devices == 0)
138 gart->active_domain = NULL;
141 spin_unlock(&gart->dom_lock);
167 static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
170 if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
171 dev_err(gart->dev, "Page entry is in-use\n");
175 gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
183 struct gart_device *gart = gart_handle;
186 if (gart_iova_range_invalid(gart, iova, bytes))
189 spin_lock(&gart->pte_lock);
190 ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
191 spin_unlock(&gart->pte_lock);
196 static inline int __gart_iommu_unmap(struct gart_device *gart,
199 if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
200 dev_err(gart->dev, "Page entry is invalid\n");
204 gart_set_pte(gart, iova, 0);
212 struct gart_device *gart = gart_handle;
215 if (gart_iova_range_invalid(gart, iova, bytes))
218 spin_lock(&gart->pte_lock);
219 err = __gart_iommu_unmap(gart, iova);
220 spin_unlock(&gart->pte_lock);
228 struct gart_device *gart = gart_handle;
231 if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
234 spin_lock(&gart->pte_lock);
235 pte = gart_read_pte(gart, iova);
236 spin_unlock(&gart->pte_lock);
287 int tegra_gart_suspend(struct gart_device *gart)
289 u32 *data = gart->savedata;
297 writel_relaxed(0, gart->regs + GART_CONFIG);
298 FLUSH_GART_REGS(gart);
300 for_each_gart_pte(gart, iova)
301 *(data++) = gart_read_pte(gart, iova);
306 int tegra_gart_resume(struct gart_device *gart)
308 do_gart_setup(gart, gart->savedata);
315 struct gart_device *gart;
328 gart = kzalloc(sizeof(*gart), GFP_KERNEL);
329 if (!gart)
332 gart_handle = gart;
334 gart->dev = dev;
335 gart->regs = mc->regs + GART_REG_BASE;
336 gart->iovmm_base = res->start;
337 gart->iovmm_end = res->end + 1;
338 spin_lock_init(&gart->pte_lock);
339 spin_lock_init(&gart->dom_lock);
341 do_gart_setup(gart, NULL);
343 err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
347 err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
351 gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
353 if (!gart->savedata) {
358 return gart;
361 iommu_device_unregister(&gart->iommu);
363 iommu_device_sysfs_remove(&gart->iommu);
365 kfree(gart);