Lines Matching defs:limit_pfn
25 unsigned long limit_pfn);
77 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
79 if (limit_pfn <= iovad->dma_32bit_pfn)
113 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
117 * Ideally what we'd like to judge here is whether limit_pfn is close
124 if (limit_pfn > iovad->dma_32bit_pfn)
128 while (to_iova(node)->pfn_hi < limit_pfn)
132 while (node->rb_left && to_iova(node->rb_left)->pfn_lo >= limit_pfn)
141 if (to_iova(next)->pfn_lo >= limit_pfn) {
179 unsigned long size, unsigned long limit_pfn,
187 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
194 if (limit_pfn <= iovad->dma_32bit_pfn &&
198 curr = __get_cached_rbnode(iovad, limit_pfn);
212 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
213 high_pfn = limit_pfn;
215 curr = iova_find_limit(iovad, limit_pfn);
306 * @limit_pfn: - max limit address
308 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
309 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
315 unsigned long limit_pfn,
325 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
432 * @limit_pfn: - max limit address
440 unsigned long limit_pfn, bool flush_rcache)
454 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
459 new_iova = alloc_iova(iovad, size, limit_pfn, true);
698 unsigned long limit_pfn)
704 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--)
840 unsigned long limit_pfn)
866 iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
880 unsigned long limit_pfn)
887 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);