Lines Matching defs:iovad
20 static bool iova_rcache_insert(struct iova_domain *iovad,
23 static unsigned long iova_rcache_get(struct iova_domain *iovad,
26 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
27 static void free_iova_rcaches(struct iova_domain *iovad);
36 struct iova_domain *iovad;
38 iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);
40 free_cpu_cached_iovas(cpu, iovad);
44 static void free_global_cached_iovas(struct iova_domain *iovad);
52 init_iova_domain(struct iova_domain *iovad, unsigned long granule,
62 spin_lock_init(&iovad->iova_rbtree_lock);
63 iovad->rbroot = RB_ROOT;
64 iovad->cached_node = &iovad->anchor.node;
65 iovad->cached32_node = &iovad->anchor.node;
66 iovad->granule = granule;
67 iovad->start_pfn = start_pfn;
68 iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
69 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
70 iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
71 rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
72 rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
77 __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
79 if (limit_pfn <= iovad->dma_32bit_pfn)
80 return iovad->cached32_node;
82 return iovad->cached_node;
86 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
88 if (new->pfn_hi < iovad->dma_32bit_pfn)
89 iovad->cached32_node = &new->node;
91 iovad->cached_node = &new->node;
95 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
99 cached_iova = to_iova(iovad->cached32_node);
101 (free->pfn_hi < iovad->dma_32bit_pfn &&
103 iovad->cached32_node = rb_next(&free->node);
105 if (free->pfn_lo < iovad->dma_32bit_pfn)
106 iovad->max32_alloc_size = iovad->dma_32bit_pfn;
108 cached_iova = to_iova(iovad->cached_node);
110 iovad->cached_node = rb_next(&free->node);
113 static struct rb_node *iova_find_limit(struct iova_domain *iovad, unsigned long limit_pfn)
124 if (limit_pfn > iovad->dma_32bit_pfn)
125 return &iovad->anchor.node;
127 node = iovad->rbroot.rb_node;
178 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
187 unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
193 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
194 if (limit_pfn <= iovad->dma_32bit_pfn &&
195 size >= iovad->max32_alloc_size)
198 curr = __get_cached_rbnode(iovad, limit_pfn);
212 if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
215 curr = iova_find_limit(iovad, limit_pfn);
219 iovad->max32_alloc_size = size;
228 iova_insert_rbtree(&iovad->rbroot, new, prev);
229 __cached_rbnode_insert_update(iovad, new);
231 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
235 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
304 * @iovad: - iova domain in question
308 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
309 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
314 alloc_iova(struct iova_domain *iovad, unsigned long size,
325 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
338 private_find_iova(struct iova_domain *iovad, unsigned long pfn)
340 struct rb_node *node = iovad->rbroot.rb_node;
342 assert_spin_locked(&iovad->iova_rbtree_lock);
358 static void remove_iova(struct iova_domain *iovad, struct iova *iova)
360 assert_spin_locked(&iovad->iova_rbtree_lock);
361 __cached_rbnode_delete_update(iovad, iova);
362 rb_erase(&iova->node, &iovad->rbroot);
367 * @iovad: - iova domain in question.
372 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
378 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
379 iova = private_find_iova(iovad, pfn);
380 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
387 * @iovad: iova domain in question.
392 __free_iova(struct iova_domain *iovad, struct iova *iova)
396 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
397 remove_iova(iovad, iova);
398 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
405 * @iovad: - iova domain in question.
411 free_iova(struct iova_domain *iovad, unsigned long pfn)
416 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
417 iova = private_find_iova(iovad, pfn);
419 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
422 remove_iova(iovad, iova);
423 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
430 * @iovad: - iova domain in question
439 alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
454 iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
459 new_iova = alloc_iova(iovad, size, limit_pfn, true);
469 free_cpu_cached_iovas(cpu, iovad);
470 free_global_cached_iovas(iovad);
480 * @iovad: - iova domain in question.
487 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
489 if (iova_rcache_insert(iovad, pfn, size))
492 free_iova(iovad, pfn);
496 static void iova_domain_free_rcaches(struct iova_domain *iovad)
499 &iovad->cpuhp_dead);
500 free_iova_rcaches(iovad);
505 * @iovad: - iova domain in question.
508 void put_iova_domain(struct iova_domain *iovad)
512 if (iovad->rcaches)
513 iova_domain_free_rcaches(iovad);
515 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
546 __insert_new_range(struct iova_domain *iovad,
553 iova_insert_rbtree(&iovad->rbroot, iova, NULL);
570 * @iovad: - iova domain pointer
577 reserve_iova(struct iova_domain *iovad,
586 if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad))))
589 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
590 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
606 iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
609 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
665 iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
670 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
673 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
678 remove_iova(iovad, iova);
682 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
720 int iova_domain_init_rcaches(struct iova_domain *iovad)
725 iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
728 if (!iovad->rcaches)
735 rcache = &iovad->rcaches[i];
758 &iovad->cpuhp_dead);
764 free_iova_rcaches(iovad);
775 static bool __iova_rcache_insert(struct iova_domain *iovad,
816 iova_magazine_free_pfns(mag_to_free, iovad);
823 static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
831 return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
878 static unsigned long iova_rcache_get(struct iova_domain *iovad,
887 return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
893 static void free_iova_rcaches(struct iova_domain *iovad)
901 rcache = &iovad->rcaches[i];
914 kfree(iovad->rcaches);
915 iovad->rcaches = NULL;
921 static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
929 rcache = &iovad->rcaches[i];
932 iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
933 iova_magazine_free_pfns(cpu_rcache->prev, iovad);
941 static void free_global_cached_iovas(struct iova_domain *iovad)
948 rcache = &iovad->rcaches[i];
951 iova_magazine_free_pfns(rcache->depot[j], iovad);