Lines Matching refs:vma

46 static inline void assert_vma_held_evict(const struct i915_vma *vma)
53 if (kref_read(&vma->vm->ref))
54 assert_object_held_shared(vma->obj);
64 static void i915_vma_free(struct i915_vma *vma)
66 return kmem_cache_free(slab_vmas, vma);
73 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
77 if (!vma->node.stack) {
78 drm_dbg(vma->obj->base.dev,
79 "vma.node [%08llx + %08llx] %s: unknown owner\n",
80 vma->node.start, vma->node.size, reason);
84 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
85 drm_dbg(vma->obj->base.dev,
86 "vma.node [%08llx + %08llx] %s: inserted at %s\n",
87 vma->node.start, vma->node.size, reason, buf);
92 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
119 struct i915_vma *vma;
126 vma = i915_vma_alloc();
127 if (vma == NULL)
130 vma->ops = &vm->vma_ops;
131 vma->obj = obj;
132 vma->size = obj->base.size;
133 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
135 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
140 might_lock(&vma->active.mutex);
144 INIT_LIST_HEAD(&vma->closed_link);
145 INIT_LIST_HEAD(&vma->obj_link);
146 RB_CLEAR_NODE(&vma->obj_node);
149 vma->gtt_view = *view;
155 vma->size = view->partial.size;
156 vma->size <<= PAGE_SHIFT;
157 GEM_BUG_ON(vma->size > obj->base.size);
159 vma->size = intel_rotation_info_size(&view->rotated);
160 vma->size <<= PAGE_SHIFT;
162 vma->size = intel_remapped_info_size(&view->remapped);
163 vma->size <<= PAGE_SHIFT;
167 if (unlikely(vma->size > vm->total))
170 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
178 vma->vm = vm;
179 list_add_tail(&vma->vm_link, &vm->unbound_list);
181 spin_lock(&obj->vma.lock);
183 if (unlikely(overflows_type(vma->size, u32)))
186 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
189 if (unlikely(vma->fence_size < vma->size || /* overflow */
190 vma->fence_size > vm->total))
193 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
195 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
198 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
200 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
204 p = &obj->vma.tree.rb_node;
213 * already created a matching vma, so return the older instance
224 rb_link_node(&vma->obj_node, rb, p);
225 rb_insert_color(&vma->obj_node, &obj->vma.tree);
227 if (i915_vma_is_ggtt(vma))
229 * We put the GGTT vma at the start of the vma-list, followed
230 * by the ppGGTT vma. This allows us to break early when
231 * iterating over only the GGTT vma for an object, see
234 list_add(&vma->obj_link, &obj->vma.list);
236 list_add_tail(&vma->obj_link, &obj->vma.list);
238 spin_unlock(&obj->vma.lock);
241 return vma;
244 spin_unlock(&obj->vma.lock);
245 list_del_init(&vma->vm_link);
248 i915_vma_free(vma);
259 rb = obj->vma.tree.rb_node;
261 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
264 cmp = i915_vma_compare(vma, vm, view);
266 return vma;
288 * Returns the vma, or an error pointer.
295 struct i915_vma *vma;
300 spin_lock(&obj->vma.lock);
301 vma = i915_vma_lookup(obj, vm, view);
302 spin_unlock(&obj->vma.lock);
304 /* vma_create() will resolve the race if another creates the vma */
305 if (unlikely(!vma))
306 vma = vma_create(obj, vm, view);
308 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
309 return vma;
373 int i915_vma_wait_for_bind(struct i915_vma *vma)
377 if (rcu_access_pointer(vma->active.excl.fence)) {
381 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
393 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
395 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
416 struct i915_vma *vma)
418 struct drm_i915_gem_object *obj = vma->obj;
420 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
423 vma->ops, vma->private, __i915_vma_offset(vma),
424 __i915_vma_size(vma), vma->size, vma->guard);
429 * @vma: VMA to map
433 * @vma_res: pointer to a preallocated vma resource. The resource is either
440 int i915_vma_bind(struct i915_vma *vma,
450 lockdep_assert_held(&vma->vm->mutex);
451 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
452 GEM_BUG_ON(vma->size > i915_vma_size(vma));
454 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
455 vma->node.size,
456 vma->vm->total))) {
469 vma_flags = atomic_read(&vma->flags);
478 GEM_BUG_ON(!atomic_read(&vma->pages_count));
481 if (work && bind_flags & vma->vm->bind_async_flags)
482 ret = i915_vma_resource_bind_dep_await(vma->vm,
484 vma->node.start,
485 vma->node.size,
491 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
492 vma->node.size, true);
498 if (vma->resource || !vma_res) {
503 i915_vma_resource_init_from_vma(vma_res, vma);
504 vma->resource = vma_res;
506 trace_i915_vma_bind(vma, bind_flags);
507 if (work && bind_flags & vma->vm->bind_async_flags) {
510 work->vma_res = i915_vma_resource_get(vma->resource);
519 * Also note that we do not want to track the async vma as
523 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
532 work->obj = i915_gem_object_get(vma->obj);
534 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
536 i915_vma_resource_free(vma->resource);
537 vma->resource = NULL;
541 vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
545 atomic_or(bind_flags, &vma->flags);
549 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
554 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
557 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
558 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
559 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
561 ptr = READ_ONCE(vma->iomap);
569 if (i915_gem_object_is_lmem(vma->obj)) {
570 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
571 vma->obj->base.size);
572 } else if (i915_vma_is_map_and_fenceable(vma)) {
573 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
574 i915_vma_offset(vma),
575 i915_vma_size(vma));
578 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
591 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
593 __i915_gem_object_release_map(vma->obj);
596 ptr = vma->iomap;
600 __i915_vma_pin(vma);
602 err = i915_vma_pin_fence(vma);
606 i915_vma_set_ggtt_write(vma);
612 __i915_vma_unpin(vma);
617 void i915_vma_flush_writes(struct i915_vma *vma)
619 if (i915_vma_unset_ggtt_write(vma))
620 intel_gt_flush_ggtt_writes(vma->vm->gt);
623 void i915_vma_unpin_iomap(struct i915_vma *vma)
625 GEM_BUG_ON(vma->iomap == NULL);
629 i915_vma_flush_writes(vma);
631 i915_vma_unpin_fence(vma);
632 i915_vma_unpin(vma);
637 struct i915_vma *vma;
640 vma = fetch_and_zero(p_vma);
641 if (!vma)
644 obj = vma->obj;
647 i915_vma_unpin(vma);
655 bool i915_vma_misplaced(const struct i915_vma *vma,
658 if (!drm_mm_node_allocated(&vma->node))
661 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
664 if (i915_vma_size(vma) < size)
668 if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
671 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
675 i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
679 i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
683 vma->guard < (flags & PIN_OFFSET_MASK))
689 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
693 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
694 GEM_BUG_ON(!vma->fence_size);
696 fenceable = (i915_vma_size(vma) >= vma->fence_size &&
697 IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
699 mappable = i915_ggtt_offset(vma) + vma->fence_size <=
700 i915_vm_to_ggtt(vma->vm)->mappable_end;
703 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
705 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
708 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
710 struct drm_mm_node *node = &vma->node;
720 if (!i915_vm_has_cache_coloring(vma->vm))
723 /* Only valid to be called on an already inserted vma */
741 * i915_vma_insert - finds a slot for the vma in its address space
742 * @vma: the vma
756 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
763 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
764 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
767 size = max(size, vma->size);
768 alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
770 size = max_t(typeof(size), size, vma->fence_size);
772 alignment, vma->fence_alignment);
779 guard = vma->guard; /* retain guard across rebinds */
794 end = vma->vm->total;
796 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
801 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
809 drm_dbg(vma->obj->base.dev,
817 if (i915_vm_has_cache_coloring(vma->vm))
818 color = vma->obj->pat_index;
829 * of the vma->node due to the guard pages.
834 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
851 vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
852 !HAS_64K_PAGES(vma->vm->i915)) {
860 rounddown_pow_of_two(vma->page_sizes.sg |
868 GEM_BUG_ON(i915_vma_is_ggtt(vma));
872 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
876 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
882 GEM_BUG_ON(vma->node.start < start);
883 GEM_BUG_ON(vma->node.start + vma->node.size > end);
885 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
886 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
888 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
889 vma->guard = guard;
895 i915_vma_detach(struct i915_vma *vma)
897 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
898 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
902 * vma, we can drop its hold on the backing storage and allow
905 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
908 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
912 bound = atomic_read(&vma->flags);
930 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
1268 __i915_vma_get_pages(struct i915_vma *vma)
1273 * The vma->pages are only valid within the lifespan of the borrowed
1275 * must be the vma->pages. A simple rule is that vma->pages must only
1278 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1280 switch (vma->gtt_view.type) {
1282 GEM_BUG_ON(vma->gtt_view.type);
1285 pages = vma->obj->mm.pages;
1290 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1295 intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1299 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1304 drm_err(&vma->vm->i915->drm,
1306 vma->gtt_view.type, PTR_ERR(pages));
1310 vma->pages = pages;
1315 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1319 if (atomic_add_unless(&vma->pages_count, 1, 0))
1322 err = i915_gem_object_pin_pages(vma->obj);
1326 err = __i915_vma_get_pages(vma);
1330 vma->page_sizes = vma->obj->mm.page_sizes;
1331 atomic_inc(&vma->pages_count);
1336 __i915_gem_object_unpin_pages(vma->obj);
1350 * Before we release the pages that were bound by this vma, we
1362 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1365 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1367 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1368 if (vma->pages != vma->obj->mm.pages) {
1369 sg_free_table(vma->pages);
1370 kfree(vma->pages);
1372 vma->pages = NULL;
1374 i915_gem_object_unpin_pages(vma->obj);
1378 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1380 if (atomic_add_unless(&vma->pages_count, -1, 1))
1383 __vma_put_pages(vma, 1);
1386 static void vma_unbind_pages(struct i915_vma *vma)
1390 lockdep_assert_held(&vma->vm->mutex);
1393 count = atomic_read(&vma->pages_count);
1397 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1400 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1410 assert_vma_held(vma);
1418 /* First try and grab the pin without rebinding the vma */
1419 if (try_qad_pin(vma, flags))
1422 err = i915_vma_get_pages(vma);
1427 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1429 if (flags & vma->vm->bind_async_flags) {
1431 err = i915_vm_lock_objects(vma->vm, ww);
1441 work->vm = vma->vm;
1443 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1450 if (vma->vm->allocate_va_range) {
1451 err = i915_vm_alloc_pt_stash(vma->vm,
1453 vma->size);
1457 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1470 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1472 * We conflate the Global GTT with the user's vma when using the
1486 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1493 if (unlikely(i915_vma_is_closed(vma))) {
1498 bound = atomic_read(&vma->flags);
1511 __i915_vma_pin(vma);
1515 err = i915_active_acquire(&vma->active);
1520 err = i915_vma_insert(vma, ww, size, alignment, flags);
1524 if (i915_is_ggtt(vma->vm))
1525 __i915_vma_set_map_and_fenceable(vma);
1528 GEM_BUG_ON(!vma->pages);
1529 err = i915_vma_bind(vma,
1530 vma->obj->pat_index,
1538 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1539 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1542 __i915_vma_pin(vma);
1543 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1545 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1546 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1549 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1550 i915_vma_detach(vma);
1551 drm_mm_remove_node(&vma->node);
1554 i915_active_release(&vma->active);
1556 mutex_unlock(&vma->vm->mutex);
1564 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1569 i915_vma_put_pages(vma);
1584 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1587 struct i915_address_space *vm = vma->vm;
1593 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1597 err = i915_vma_wait_for_bind(vma);
1599 i915_vma_unpin(vma);
1619 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1625 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1628 return __i915_ggtt_pin(vma, ww, align, flags);
1630 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1633 err = i915_gem_object_lock(vma->obj, &_ww);
1635 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1651 struct i915_vma *vma;
1653 spin_lock(&obj->vma.lock);
1654 for_each_ggtt_vma(vma, obj) {
1655 i915_vma_clear_scanout(vma);
1656 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
1658 spin_unlock(&obj->vma.lock);
1661 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1675 GEM_BUG_ON(i915_vma_is_closed(vma));
1676 list_add(&vma->closed_link, &gt->closed_vma);
1679 void i915_vma_close(struct i915_vma *vma)
1681 struct intel_gt *gt = vma->vm->gt;
1684 if (i915_vma_is_ggtt(vma))
1687 GEM_BUG_ON(!atomic_read(&vma->open_count));
1688 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1691 __vma_close(vma, gt);
1696 static void __i915_vma_remove_closed(struct i915_vma *vma)
1698 list_del_init(&vma->closed_link);
1701 void i915_vma_reopen(struct i915_vma *vma)
1703 struct intel_gt *gt = vma->vm->gt;
1706 if (i915_vma_is_closed(vma))
1707 __i915_vma_remove_closed(vma);
1711 static void force_unbind(struct i915_vma *vma)
1713 if (!drm_mm_node_allocated(&vma->node))
1716 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1717 WARN_ON(__i915_vma_unbind(vma));
1718 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1721 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1724 struct drm_i915_gem_object *obj = vma->obj;
1726 GEM_BUG_ON(i915_vma_is_active(vma));
1728 spin_lock(&obj->vma.lock);
1729 list_del(&vma->obj_link);
1730 if (!RB_EMPTY_NODE(&vma->obj_node))
1731 rb_erase(&vma->obj_node, &obj->vma.tree);
1733 spin_unlock(&obj->vma.lock);
1736 __i915_vma_remove_closed(vma);
1740 i915_vm_resv_put(vma->vm);
1743 i915_active_wait(&vma->active);
1744 i915_active_fini(&vma->active);
1745 GEM_WARN_ON(vma->resource);
1746 i915_vma_free(vma);
1750 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1753 * This function should be called when it's decided the vma isn't needed
1766 * vma freeing from __i915_gem_object_pages_fini().
1768 * Because of locks taken during destruction, a vma is also guaranteed to
1772 * - obj->vma.lock
1775 void i915_vma_destroy_locked(struct i915_vma *vma)
1777 lockdep_assert_held(&vma->vm->mutex);
1779 force_unbind(vma);
1780 list_del_init(&vma->vm_link);
1781 release_references(vma, vma->vm->gt, false);
1784 void i915_vma_destroy(struct i915_vma *vma)
1789 mutex_lock(&vma->vm->mutex);
1790 force_unbind(vma);
1791 list_del_init(&vma->vm_link);
1792 vm_ddestroy = vma->vm_ddestroy;
1793 vma->vm_ddestroy = false;
1795 /* vma->vm may be freed when releasing vma->vm->mutex. */
1796 gt = vma->vm->gt;
1797 mutex_unlock(&vma->vm->mutex);
1798 release_references(vma, gt, vm_ddestroy);
1803 struct i915_vma *vma, *next;
1807 list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1808 struct drm_i915_gem_object *obj = vma->obj;
1809 struct i915_address_space *vm = vma->vm;
1821 list_move(&vma->closed_link, &closed);
1825 /* As the GT is held idle, no vma can be reopened as we destroy them */
1826 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1827 struct drm_i915_gem_object *obj = vma->obj;
1828 struct i915_address_space *vm = vma->vm;
1831 INIT_LIST_HEAD(&vma->closed_link);
1832 i915_vma_destroy(vma);
1837 list_add(&vma->closed_link, &gt->closed_vma);
1846 static void __i915_vma_iounmap(struct i915_vma *vma)
1848 GEM_BUG_ON(i915_vma_is_pinned(vma));
1850 if (vma->iomap == NULL)
1853 if (page_unmask_bits(vma->iomap))
1854 __i915_gem_object_release_map(vma->obj);
1856 io_mapping_unmap(vma->iomap);
1857 vma->iomap = NULL;
1860 void i915_vma_revoke_mmap(struct i915_vma *vma)
1865 if (!i915_vma_has_userfault(vma))
1868 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1869 GEM_BUG_ON(!vma->obj->userfault_count);
1871 node = &vma->mmo->vma_node;
1872 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1873 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1875 vma->size,
1878 i915_vma_unset_userfault(vma);
1879 if (!--vma->obj->userfault_count)
1880 list_del(&vma->obj->userfault_link);
1884 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1886 return __i915_request_await_exclusive(rq, &vma->active);
1889 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1893 /* Wait for the vma to be bound before we start! */
1894 err = __i915_request_await_bind(rq, vma);
1898 return i915_active_add_request(&vma->active, rq);
1901 int _i915_vma_move_to_active(struct i915_vma *vma,
1906 struct drm_i915_gem_object *obj = vma->obj;
1911 GEM_BUG_ON(!vma->pages);
1914 err = i915_request_await_object(rq, vma->obj, flags & EXEC_OBJECT_WRITE);
1918 err = __i915_vma_move_to_active(vma, rq);
1932 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1963 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1966 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1967 i915_active_add_request(&vma->fence->active, rq);
1972 GEM_BUG_ON(!i915_vma_is_active(vma));
1976 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1978 struct i915_vma_resource *vma_res = vma->resource;
1981 GEM_BUG_ON(i915_vma_is_pinned(vma));
1982 assert_vma_held_evict(vma);
1984 if (i915_vma_is_map_and_fenceable(vma)) {
1986 i915_vma_revoke_mmap(vma);
1995 * bit from set-domain, as we mark all GGTT vma associated
1996 * with an object. We know this is for another vma, as we
1997 * are currently unbinding this one -- so if this vma will be
2001 i915_vma_flush_writes(vma);
2004 i915_vma_revoke_fence(vma);
2006 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
2009 __i915_vma_iounmap(vma);
2011 GEM_BUG_ON(vma->fence);
2012 GEM_BUG_ON(i915_vma_has_userfault(vma));
2015 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
2018 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
2019 kref_read(&vma->vm->ref);
2020 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
2021 vma->vm->skip_pte_rewrite;
2022 trace_i915_vma_unbind(vma);
2026 vma->obj->mm.tlb);
2030 vma->resource = NULL;
2033 &vma->flags);
2035 i915_vma_detach(vma);
2043 vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
2052 vma_unbind_pages(vma);
2056 int __i915_vma_unbind(struct i915_vma *vma)
2060 lockdep_assert_held(&vma->vm->mutex);
2061 assert_vma_held_evict(vma);
2063 if (!drm_mm_node_allocated(&vma->node))
2066 if (i915_vma_is_pinned(vma)) {
2067 vma_print_allocator(vma, "is pinned");
2072 * After confirming that no one else is pinning this vma, wait for
2076 ret = i915_vma_sync(vma);
2080 GEM_BUG_ON(i915_vma_is_active(vma));
2081 __i915_vma_evict(vma, false);
2083 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2087 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2091 lockdep_assert_held(&vma->vm->mutex);
2093 if (!drm_mm_node_allocated(&vma->node))
2096 if (i915_vma_is_pinned(vma) ||
2097 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2102 * object's dma_resv when the vma active goes away. When doing that
2105 * the next vma from the object, in case there are many, will
2109 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2115 fence = __i915_vma_evict(vma, true);
2117 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2122 int i915_vma_unbind(struct i915_vma *vma)
2124 struct i915_address_space *vm = vma->vm;
2128 assert_object_held_shared(vma->obj);
2131 err = i915_vma_sync(vma);
2135 if (!drm_mm_node_allocated(&vma->node))
2138 if (i915_vma_is_pinned(vma)) {
2139 vma_print_allocator(vma, "is pinned");
2143 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2147 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2151 err = __i915_vma_unbind(vma);
2160 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2162 struct drm_i915_gem_object *obj = vma->obj;
2163 struct i915_address_space *vm = vma->vm;
2174 if (!drm_mm_node_allocated(&vma->node))
2177 if (i915_vma_is_pinned(vma)) {
2178 vma_print_allocator(vma, "is pinned");
2194 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2206 fence = __i915_vma_unbind_async(vma);
2222 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2226 i915_gem_object_lock(vma->obj, NULL);
2227 err = i915_vma_unbind(vma);
2228 i915_gem_object_unlock(vma->obj);
2233 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2235 i915_gem_object_make_unshrinkable(vma->obj);
2236 return vma;
2239 void i915_vma_make_shrinkable(struct i915_vma *vma)
2241 i915_gem_object_make_shrinkable(vma->obj);
2244 void i915_vma_make_purgeable(struct i915_vma *vma)
2246 i915_gem_object_make_purgeable(vma->obj);