Lines Matching refs:mem
60 struct mm_iommu_table_group_mem_t *mem, *mem2;
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74 if (!mem) {
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
81 mem->dev_hpa = dev_hpa;
84 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
93 if (!mem->hpas) {
94 kfree(mem);
108 mem->hpages + entry);
125 atomic64_set(&mem->mapped, 1);
126 mem->used = 1;
127 mem->ua = ua;
128 mem->entries = entries;
144 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
152 struct page *page = mem->hpages[i];
154 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
156 mem->pageshift = min(mem->pageshift, pageshift);
161 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
165 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
169 *pmem = mem;
175 unpin_user_pages(mem->hpages, pinned);
177 vfree(mem->hpas);
178 kfree(mem);
202 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
207 if (!mem->hpas)
210 for (i = 0; i < mem->entries; ++i) {
211 if (!mem->hpas[i])
214 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
218 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
223 mem->hpas[i] = 0;
227 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
230 mm_iommu_unpin(mem);
231 vfree(mem->hpas);
232 kfree(mem);
237 struct mm_iommu_table_group_mem_t *mem = container_of(head,
240 mm_iommu_do_free(mem);
243 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
245 list_del_rcu(&mem->next);
246 call_rcu(&mem->rcu, mm_iommu_free);
249 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
256 if (mem->used == 0) {
261 --mem->used;
263 if (mem->used)
267 if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) {
268 ++mem->used;
273 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
274 unlock_entries = mem->entries;
277 mm_iommu_release(mem);
291 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
294 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
295 if ((mem->ua <= ua) &&
296 (ua + size <= mem->ua +
297 (mem->entries << PAGE_SHIFT))) {
298 ret = mem;
311 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
315 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next,
317 if ((mem->ua == ua) && (mem->entries == entries)) {
318 ret = mem;
319 ++mem->used;
330 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
333 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
336 if (entry >= mem->entries)
339 if (pageshift > mem->pageshift)
342 if (!mem->hpas) {
343 *hpa = mem->dev_hpa + (ua - mem->ua);
347 va = &mem->hpas[entry];
357 struct mm_iommu_table_group_mem_t *mem;
361 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
362 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
365 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
366 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
383 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
385 if (atomic64_inc_not_zero(&mem->mapped))
393 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
395 atomic64_add_unless(&mem->mapped, -1, 1);