Lines Matching refs:vma
51 void i915_vma_free(struct i915_vma *vma)
53 return kmem_cache_free(global.slab_vmas, vma);
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
66 if (!vma->node.stack) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma->node.start, vma->node.size, reason);
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
74 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma->node.start, vma->node.size, reason, buf);
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
108 struct i915_vma *vma;
114 vma = i915_vma_alloc();
115 if (vma == NULL)
118 kref_init(&vma->ref);
119 mutex_init(&vma->pages_mutex);
120 vma->vm = i915_vm_get(vm);
121 vma->ops = &vm->vma_ops;
122 vma->obj = obj;
123 vma->resv = obj->base.resv;
124 vma->size = obj->base.size;
125 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
127 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
132 might_lock(&vma->active.mutex);
136 INIT_LIST_HEAD(&vma->closed_link);
139 vma->ggtt_view = *view;
145 vma->size = view->partial.size;
146 vma->size <<= PAGE_SHIFT;
147 GEM_BUG_ON(vma->size > obj->base.size);
149 vma->size = intel_rotation_info_size(&view->rotated);
150 vma->size <<= PAGE_SHIFT;
152 vma->size = intel_remapped_info_size(&view->remapped);
153 vma->size <<= PAGE_SHIFT;
157 if (unlikely(vma->size > vm->total))
160 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
162 spin_lock(&obj->vma.lock);
165 if (unlikely(overflows_type(vma->size, u32)))
168 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
171 if (unlikely(vma->fence_size < vma->size || /* overflow */
172 vma->fence_size > vm->total))
175 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
177 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
180 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
182 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
186 p = &obj->vma.tree.rb_node;
195 * already created a matching vma, so return the older instance
206 rb_link_node(&vma->obj_node, rb, p);
207 rb_insert_color(&vma->obj_node, &obj->vma.tree);
209 if (i915_vma_is_ggtt(vma))
211 * We put the GGTT vma at the start of the vma-list, followed
212 * by the ppGGTT vma. This allows us to break early when
213 * iterating over only the GGTT vma for an object, see
216 list_add(&vma->obj_link, &obj->vma.list);
218 list_add_tail(&vma->obj_link, &obj->vma.list);
220 spin_unlock(&obj->vma.lock);
222 return vma;
225 spin_unlock(&obj->vma.lock);
228 i915_vma_free(vma);
239 rb = obj->vma.tree.rb_node;
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
244 cmp = i915_vma_compare(vma, vm, view);
246 return vma;
268 * Returns the vma, or an error pointer.
275 struct i915_vma *vma;
280 spin_lock(&obj->vma.lock);
281 vma = vma_lookup(obj, vm, view);
282 spin_unlock(&obj->vma.lock);
284 /* vma_create() will resolve the race if another creates the vma */
285 if (unlikely(!vma))
286 vma = vma_create(obj, vm, view);
288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
289 return vma;
296 struct i915_vma *vma;
306 struct i915_vma *vma = vw->vma;
308 vma->ops->bind_vma(vw->vm, &vw->stash,
309 vma, vw->cache_level, vw->flags);
346 int i915_vma_wait_for_bind(struct i915_vma *vma)
350 if (rcu_access_pointer(vma->active.excl.fence)) {
354 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
367 * @vma: VMA to map
376 int i915_vma_bind(struct i915_vma *vma,
384 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
385 GEM_BUG_ON(vma->size > vma->node.size);
387 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388 vma->node.size,
389 vma->vm->total)))
398 vma_flags = atomic_read(&vma->flags);
405 GEM_BUG_ON(!vma->pages);
407 trace_i915_vma_bind(vma, bind_flags);
408 if (work && bind_flags & vma->vm->bind_async_flags) {
411 work->vma = vma;
420 * Also note that we do not want to track the async vma as
424 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
434 if (vma->obj) {
435 __i915_gem_object_pin_pages(vma->obj);
436 work->pinned = i915_gem_object_get(vma->obj);
439 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
442 if (vma->obj)
443 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
445 atomic_or(bind_flags, &vma->flags);
449 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
454 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
459 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
460 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
462 ptr = READ_ONCE(vma->iomap);
464 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
465 vma->node.start,
466 vma->node.size);
472 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
474 ptr = vma->iomap;
478 __i915_vma_pin(vma);
480 err = i915_vma_pin_fence(vma);
484 i915_vma_set_ggtt_write(vma);
490 __i915_vma_unpin(vma);
495 void i915_vma_flush_writes(struct i915_vma *vma)
497 if (i915_vma_unset_ggtt_write(vma))
498 intel_gt_flush_ggtt_writes(vma->vm->gt);
501 void i915_vma_unpin_iomap(struct i915_vma *vma)
503 GEM_BUG_ON(vma->iomap == NULL);
505 i915_vma_flush_writes(vma);
507 i915_vma_unpin_fence(vma);
508 i915_vma_unpin(vma);
513 struct i915_vma *vma;
516 vma = fetch_and_zero(p_vma);
517 if (!vma)
520 obj = vma->obj;
523 i915_vma_unpin(vma);
531 bool i915_vma_misplaced(const struct i915_vma *vma,
534 if (!drm_mm_node_allocated(&vma->node))
537 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
540 if (vma->node.size < size)
544 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
547 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
551 vma->node.start < (flags & PIN_OFFSET_MASK))
555 vma->node.start != (flags & PIN_OFFSET_MASK))
561 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
565 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
566 GEM_BUG_ON(!vma->fence_size);
568 fenceable = (vma->node.size >= vma->fence_size &&
569 IS_ALIGNED(vma->node.start, vma->fence_alignment));
571 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
574 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
576 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
579 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
581 struct drm_mm_node *node = &vma->node;
591 if (!i915_vm_has_cache_coloring(vma->vm))
594 /* Only valid to be called on an already inserted vma */
612 * i915_vma_insert - finds a slot for the vma in its address space
613 * @vma: the vma
626 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
632 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
633 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
635 size = max(size, vma->size);
636 alignment = max(alignment, vma->display_alignment);
638 size = max_t(typeof(size), size, vma->fence_size);
640 alignment, vma->fence_alignment);
650 end = vma->vm->total;
652 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
669 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
670 color = vma->obj->cache_level;
678 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
693 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
701 rounddown_pow_of_two(vma->page_sizes.sg |
709 GEM_BUG_ON(i915_vma_is_ggtt(vma));
713 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
717 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
723 GEM_BUG_ON(vma->node.start < start);
724 GEM_BUG_ON(vma->node.start + vma->node.size > end);
726 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
727 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
729 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
735 i915_vma_detach(struct i915_vma *vma)
737 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
738 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
742 * vma, we can drop its hold on the backing storage and allow
745 list_del(&vma->vm_link);
748 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
753 bound = atomic_read(&vma->flags);
765 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
774 mutex_lock(&vma->vm->mutex);
785 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
786 mutex_unlock(&vma->vm->mutex);
791 static int vma_get_pages(struct i915_vma *vma)
795 if (atomic_add_unless(&vma->pages_count, 1, 0))
799 if (mutex_lock_interruptible(&vma->pages_mutex))
802 if (!atomic_read(&vma->pages_count)) {
803 if (vma->obj) {
804 err = i915_gem_object_pin_pages(vma->obj);
809 err = vma->ops->set_pages(vma);
811 if (vma->obj)
812 i915_gem_object_unpin_pages(vma->obj);
816 atomic_inc(&vma->pages_count);
819 mutex_unlock(&vma->pages_mutex);
824 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
827 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
828 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
829 if (atomic_sub_return(count, &vma->pages_count) == 0) {
830 vma->ops->clear_pages(vma);
831 GEM_BUG_ON(vma->pages);
832 if (vma->obj)
833 i915_gem_object_unpin_pages(vma->obj);
835 mutex_unlock(&vma->pages_mutex);
838 static void vma_put_pages(struct i915_vma *vma)
840 if (atomic_add_unless(&vma->pages_count, -1, 1))
843 __vma_put_pages(vma, 1);
846 static void vma_unbind_pages(struct i915_vma *vma)
850 lockdep_assert_held(&vma->vm->mutex);
853 count = atomic_read(&vma->pages_count);
857 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
860 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
869 if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
878 /* First try and grab the pin without rebinding the vma */
879 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
882 err = vma_get_pages(vma);
887 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
889 if (flags & vma->vm->bind_async_flags) {
896 work->vm = i915_vm_get(vma->vm);
899 if (vma->vm->allocate_va_range) {
900 err = i915_vm_alloc_pt_stash(vma->vm,
902 vma->size);
906 err = i915_vm_pin_pt_stash(vma->vm,
914 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
916 * We conflate the Global GTT with the user's vma when using the
930 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
937 if (unlikely(i915_vma_is_closed(vma))) {
942 bound = atomic_read(&vma->flags);
954 __i915_vma_pin(vma);
958 err = i915_active_acquire(&vma->active);
963 err = i915_vma_insert(vma, size, alignment, flags);
967 if (i915_is_ggtt(vma->vm))
968 __i915_vma_set_map_and_fenceable(vma);
971 GEM_BUG_ON(!vma->pages);
972 err = i915_vma_bind(vma,
973 vma->obj ? vma->obj->cache_level : 0,
980 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
981 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
983 __i915_vma_pin(vma);
984 GEM_BUG_ON(!i915_vma_is_pinned(vma));
985 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
986 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
989 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
990 i915_vma_detach(vma);
991 drm_mm_remove_node(&vma->node);
994 i915_active_release(&vma->active);
996 mutex_unlock(&vma->vm->mutex);
1002 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1003 vma_put_pages(vma);
1018 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1021 struct i915_address_space *vm = vma->vm;
1024 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1027 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1030 err = i915_vma_wait_for_bind(vma);
1032 i915_vma_unpin(vma);
1046 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1060 GEM_BUG_ON(i915_vma_is_closed(vma));
1061 list_add(&vma->closed_link, >->closed_vma);
1064 void i915_vma_close(struct i915_vma *vma)
1066 struct intel_gt *gt = vma->vm->gt;
1069 if (i915_vma_is_ggtt(vma))
1072 GEM_BUG_ON(!atomic_read(&vma->open_count));
1073 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1076 __vma_close(vma, gt);
1081 static void __i915_vma_remove_closed(struct i915_vma *vma)
1083 struct intel_gt *gt = vma->vm->gt;
1086 list_del_init(&vma->closed_link);
1090 void i915_vma_reopen(struct i915_vma *vma)
1092 if (i915_vma_is_closed(vma))
1093 __i915_vma_remove_closed(vma);
1098 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1100 if (drm_mm_node_allocated(&vma->node)) {
1101 mutex_lock(&vma->vm->mutex);
1102 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1103 WARN_ON(__i915_vma_unbind(vma));
1104 mutex_unlock(&vma->vm->mutex);
1105 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1107 GEM_BUG_ON(i915_vma_is_active(vma));
1109 if (vma->obj) {
1110 struct drm_i915_gem_object *obj = vma->obj;
1112 spin_lock(&obj->vma.lock);
1113 list_del(&vma->obj_link);
1114 if (!RB_EMPTY_NODE(&vma->obj_node))
1115 rb_erase(&vma->obj_node, &obj->vma.tree);
1116 spin_unlock(&obj->vma.lock);
1119 __i915_vma_remove_closed(vma);
1120 i915_vm_put(vma->vm);
1122 i915_active_fini(&vma->active);
1123 i915_vma_free(vma);
1128 struct i915_vma *vma, *next;
1132 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1133 struct drm_i915_gem_object *obj = vma->obj;
1134 struct i915_address_space *vm = vma->vm;
1146 list_move(&vma->closed_link, &closed);
1150 /* As the GT is held idle, no vma can be reopened as we destroy them */
1151 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1152 struct drm_i915_gem_object *obj = vma->obj;
1153 struct i915_address_space *vm = vma->vm;
1155 INIT_LIST_HEAD(&vma->closed_link);
1156 __i915_vma_put(vma);
1163 static void __i915_vma_iounmap(struct i915_vma *vma)
1165 GEM_BUG_ON(i915_vma_is_pinned(vma));
1167 if (vma->iomap == NULL)
1170 io_mapping_unmap(vma->iomap);
1171 vma->iomap = NULL;
1174 void i915_vma_revoke_mmap(struct i915_vma *vma)
1179 if (!i915_vma_has_userfault(vma))
1182 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1183 GEM_BUG_ON(!vma->obj->userfault_count);
1185 node = &vma->mmo->vma_node;
1186 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1187 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1189 vma->size,
1192 i915_vma_unset_userfault(vma);
1193 if (!--vma->obj->userfault_count)
1194 list_del(&vma->obj->userfault_link);
1198 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1200 return __i915_request_await_exclusive(rq, &vma->active);
1203 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1207 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1209 /* Wait for the vma to be bound before we start! */
1210 err = __i915_request_await_bind(rq, vma);
1214 return i915_active_add_request(&vma->active, rq);
1217 int i915_vma_move_to_active(struct i915_vma *vma,
1221 struct drm_i915_gem_object *obj = vma->obj;
1226 err = __i915_vma_move_to_active(vma, rq);
1240 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1244 err = dma_resv_reserve_shared(vma->resv, 1);
1248 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1252 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1253 i915_active_add_request(&vma->fence->active, rq);
1258 GEM_BUG_ON(!i915_vma_is_active(vma));
1262 void __i915_vma_evict(struct i915_vma *vma)
1264 GEM_BUG_ON(i915_vma_is_pinned(vma));
1266 if (i915_vma_is_map_and_fenceable(vma)) {
1268 i915_vma_revoke_mmap(vma);
1277 * bit from set-domain, as we mark all GGTT vma associated
1278 * with an object. We know this is for another vma, as we
1279 * are currently unbinding this one -- so if this vma will be
1283 i915_vma_flush_writes(vma);
1286 i915_vma_revoke_fence(vma);
1288 __i915_vma_iounmap(vma);
1289 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1291 GEM_BUG_ON(vma->fence);
1292 GEM_BUG_ON(i915_vma_has_userfault(vma));
1294 if (likely(atomic_read(&vma->vm->open))) {
1295 trace_i915_vma_unbind(vma);
1296 vma->ops->unbind_vma(vma->vm, vma);
1299 &vma->flags);
1301 i915_vma_detach(vma);
1302 vma_unbind_pages(vma);
1305 int __i915_vma_unbind(struct i915_vma *vma)
1309 lockdep_assert_held(&vma->vm->mutex);
1311 if (!drm_mm_node_allocated(&vma->node))
1314 if (i915_vma_is_pinned(vma)) {
1315 vma_print_allocator(vma, "is pinned");
1320 * After confirming that no one else is pinning this vma, wait for
1324 ret = i915_vma_sync(vma);
1328 GEM_BUG_ON(i915_vma_is_active(vma));
1329 __i915_vma_evict(vma);
1331 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1335 int i915_vma_unbind(struct i915_vma *vma)
1337 struct i915_address_space *vm = vma->vm;
1342 err = i915_vma_sync(vma);
1346 if (!drm_mm_node_allocated(&vma->node))
1349 if (i915_vma_is_pinned(vma)) {
1350 vma_print_allocator(vma, "is pinned");
1354 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1358 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1362 err = __i915_vma_unbind(vma);
1371 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1373 i915_gem_object_make_unshrinkable(vma->obj);
1374 return vma;
1377 void i915_vma_make_shrinkable(struct i915_vma *vma)
1379 i915_gem_object_make_shrinkable(vma->obj);
1382 void i915_vma_make_purgeable(struct i915_vma *vma)
1384 i915_gem_object_make_purgeable(vma->obj);