Lines Matching refs:entries
34 u64 entries; /* number of entries in hpas/hpages[] */
57 unsigned long entries, unsigned long dev_hpa,
66 ret = account_locked_vm(mm, entries, true);
70 locked_entries = entries;
80 mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
88 * we use @ua and @entries natural alignment to allow IOMMU pages
91 mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
92 mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
102 chunk = min(chunk, entries);
103 for (entry = 0; entry < entries; entry += chunk) {
104 unsigned long n = min(entries - entry, chunk);
118 if (pinned != entries) {
128 mem->entries = entries;
134 if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
136 (mem2->entries << PAGE_SHIFT)))) {
150 for (i = 0; i < entries; ++i) {
185 long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
188 return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
194 unsigned long entries, unsigned long dev_hpa,
197 return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
209 for (i = 0; i < mem->entries; ++i) {
273 unlock_entries = mem->entries;
295 (mem->entries << PAGE_SHIFT))) {
314 (mem->entries << PAGE_SHIFT))) {
324 unsigned long ua, unsigned long entries)
331 if ((mem->ua == ua) && (mem->entries == entries)) {
350 if (entry >= mem->entries)
374 if (entry >= mem->entries)
428 end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);