Lines Matching refs:mem
60 struct mm_iommu_table_group_mem_t *mem, *mem2;
73 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
74 if (!mem) {
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
81 mem->dev_hpa = dev_hpa;
84 mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
93 if (!mem->hpas) {
94 kfree(mem);
108 mem->hpages + entry, NULL);
125 atomic64_set(&mem->mapped, 1);
126 mem->used = 1;
127 mem->ua = ua;
128 mem->entries = entries;
143 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
151 struct page *page = mem->hpages[i];
153 if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
155 mem->pageshift = min(mem->pageshift, pageshift);
160 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
164 list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
168 *pmem = mem;
174 unpin_user_pages(mem->hpages, pinned);
176 vfree(mem->hpas);
177 kfree(mem);
201 static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
206 if (!mem->hpas)
209 for (i = 0; i < mem->entries; ++i) {
210 if (!mem->hpas[i])
213 page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
217 if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
222 mem->hpas[i] = 0;
226 static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
229 mm_iommu_unpin(mem);
230 vfree(mem->hpas);
231 kfree(mem);
236 struct mm_iommu_table_group_mem_t *mem = container_of(head,
239 mm_iommu_do_free(mem);
242 static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
244 list_del_rcu(&mem->next);
245 call_rcu(&mem->rcu, mm_iommu_free);
248 long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
255 if (mem->used == 0) {
260 --mem->used;
262 if (mem->used)
266 if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
267 ++mem->used;
272 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
273 unlock_entries = mem->entries;
276 mm_iommu_release(mem);
290 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
292 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
293 if ((mem->ua <= ua) &&
294 (ua + size <= mem->ua +
295 (mem->entries << PAGE_SHIFT))) {
296 ret = mem;
308 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
310 list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
312 if ((mem->ua <= ua) &&
313 (ua + size <= mem->ua +
314 (mem->entries << PAGE_SHIFT))) {
315 ret = mem;
326 struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
330 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
331 if ((mem->ua == ua) && (mem->entries == entries)) {
332 ret = mem;
333 ++mem->used;
344 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
347 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
350 if (entry >= mem->entries)
353 if (pageshift > mem->pageshift)
356 if (!mem->hpas) {
357 *hpa = mem->dev_hpa + (ua - mem->ua);
361 va = &mem->hpas[entry];
368 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
371 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
374 if (entry >= mem->entries)
377 if (pageshift > mem->pageshift)
380 if (!mem->hpas) {
381 *hpa = mem->dev_hpa + (ua - mem->ua);
385 pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
396 struct mm_iommu_table_group_mem_t *mem;
401 mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
402 if (!mem)
405 if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
408 entry = (ua - mem->ua) >> PAGE_SHIFT;
409 va = &mem->hpas[entry];
421 struct mm_iommu_table_group_mem_t *mem;
424 list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
425 if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
428 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
429 if ((mem->dev_hpa <= hpa) && (hpa < end)) {
445 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
447 if (atomic64_inc_not_zero(&mem->mapped))
455 void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
457 atomic64_add_unless(&mem->mapped, -1, 1);