18c2ecf20Sopenharmony_ci/* 28c2ecf20Sopenharmony_ci * Copyright © 2008-2010 Intel Corporation 38c2ecf20Sopenharmony_ci * 48c2ecf20Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 58c2ecf20Sopenharmony_ci * copy of this software and associated documentation files (the "Software"), 68c2ecf20Sopenharmony_ci * to deal in the Software without restriction, including without limitation 78c2ecf20Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense, 88c2ecf20Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the 98c2ecf20Sopenharmony_ci * Software is furnished to do so, subject to the following conditions: 108c2ecf20Sopenharmony_ci * 118c2ecf20Sopenharmony_ci * The above copyright notice and this permission notice (including the next 128c2ecf20Sopenharmony_ci * paragraph) shall be included in all copies or substantial portions of the 138c2ecf20Sopenharmony_ci * Software. 148c2ecf20Sopenharmony_ci * 158c2ecf20Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 168c2ecf20Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 178c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 188c2ecf20Sopenharmony_ci * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 198c2ecf20Sopenharmony_ci * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 208c2ecf20Sopenharmony_ci * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 218c2ecf20Sopenharmony_ci * IN THE SOFTWARE. 228c2ecf20Sopenharmony_ci * 238c2ecf20Sopenharmony_ci * Authors: 248c2ecf20Sopenharmony_ci * Eric Anholt <eric@anholt.net> 258c2ecf20Sopenharmony_ci * Chris Wilson <chris@chris-wilson.co.uuk> 268c2ecf20Sopenharmony_ci * 278c2ecf20Sopenharmony_ci */ 288c2ecf20Sopenharmony_ci 298c2ecf20Sopenharmony_ci#include "gem/i915_gem_context.h" 308c2ecf20Sopenharmony_ci#include "gt/intel_gt_requests.h" 318c2ecf20Sopenharmony_ci 328c2ecf20Sopenharmony_ci#include "i915_drv.h" 338c2ecf20Sopenharmony_ci#include "i915_trace.h" 348c2ecf20Sopenharmony_ci 358c2ecf20Sopenharmony_ciI915_SELFTEST_DECLARE(static struct igt_evict_ctl { 368c2ecf20Sopenharmony_ci bool fail_if_busy:1; 378c2ecf20Sopenharmony_ci} igt_evict_ctl;) 388c2ecf20Sopenharmony_ci 398c2ecf20Sopenharmony_cistatic int ggtt_flush(struct intel_gt *gt) 408c2ecf20Sopenharmony_ci{ 418c2ecf20Sopenharmony_ci /* 428c2ecf20Sopenharmony_ci * Not everything in the GGTT is tracked via vma (otherwise we 438c2ecf20Sopenharmony_ci * could evict as required with minimal stalling) so we are forced 448c2ecf20Sopenharmony_ci * to idle the GPU and explicitly retire outstanding requests in 458c2ecf20Sopenharmony_ci * the hopes that we can then remove contexts and the like only 468c2ecf20Sopenharmony_ci * bound by their active reference. 478c2ecf20Sopenharmony_ci */ 488c2ecf20Sopenharmony_ci return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 498c2ecf20Sopenharmony_ci} 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_cistatic bool 528c2ecf20Sopenharmony_cimark_free(struct drm_mm_scan *scan, 538c2ecf20Sopenharmony_ci struct i915_vma *vma, 548c2ecf20Sopenharmony_ci unsigned int flags, 558c2ecf20Sopenharmony_ci struct list_head *unwind) 568c2ecf20Sopenharmony_ci{ 578c2ecf20Sopenharmony_ci if (i915_vma_is_pinned(vma)) 588c2ecf20Sopenharmony_ci return false; 598c2ecf20Sopenharmony_ci 608c2ecf20Sopenharmony_ci list_add(&vma->evict_link, unwind); 618c2ecf20Sopenharmony_ci return drm_mm_scan_add_block(scan, &vma->node); 628c2ecf20Sopenharmony_ci} 638c2ecf20Sopenharmony_ci 648c2ecf20Sopenharmony_ci/** 658c2ecf20Sopenharmony_ci * i915_gem_evict_something - Evict vmas to make room for binding a new one 668c2ecf20Sopenharmony_ci * @vm: address space to evict from 678c2ecf20Sopenharmony_ci * @min_size: size of the desired free space 688c2ecf20Sopenharmony_ci * @alignment: alignment constraint of the desired free space 698c2ecf20Sopenharmony_ci * @color: color for the desired space 708c2ecf20Sopenharmony_ci * @start: start (inclusive) of the range from which to evict objects 718c2ecf20Sopenharmony_ci * @end: end (exclusive) of the range from which to evict objects 728c2ecf20Sopenharmony_ci * @flags: additional flags to control the eviction algorithm 738c2ecf20Sopenharmony_ci * 748c2ecf20Sopenharmony_ci * This function will try to evict vmas until a free space satisfying the 758c2ecf20Sopenharmony_ci * requirements is found. Callers must check first whether any such hole exists 768c2ecf20Sopenharmony_ci * already before calling this function. 778c2ecf20Sopenharmony_ci * 788c2ecf20Sopenharmony_ci * This function is used by the object/vma binding code. 798c2ecf20Sopenharmony_ci * 808c2ecf20Sopenharmony_ci * Since this function is only used to free up virtual address space it only 818c2ecf20Sopenharmony_ci * ignores pinned vmas, and not object where the backing storage itself is 828c2ecf20Sopenharmony_ci * pinned. Hence obj->pages_pin_count does not protect against eviction. 838c2ecf20Sopenharmony_ci * 848c2ecf20Sopenharmony_ci * To clarify: This is for freeing up virtual address space, not for freeing 858c2ecf20Sopenharmony_ci * memory in e.g. the shrinker. 868c2ecf20Sopenharmony_ci */ 878c2ecf20Sopenharmony_ciint 888c2ecf20Sopenharmony_cii915_gem_evict_something(struct i915_address_space *vm, 898c2ecf20Sopenharmony_ci u64 min_size, u64 alignment, 908c2ecf20Sopenharmony_ci unsigned long color, 918c2ecf20Sopenharmony_ci u64 start, u64 end, 928c2ecf20Sopenharmony_ci unsigned flags) 938c2ecf20Sopenharmony_ci{ 948c2ecf20Sopenharmony_ci struct drm_mm_scan scan; 958c2ecf20Sopenharmony_ci struct list_head eviction_list; 968c2ecf20Sopenharmony_ci struct i915_vma *vma, *next; 978c2ecf20Sopenharmony_ci struct drm_mm_node *node; 988c2ecf20Sopenharmony_ci enum drm_mm_insert_mode mode; 998c2ecf20Sopenharmony_ci struct i915_vma *active; 1008c2ecf20Sopenharmony_ci int ret; 1018c2ecf20Sopenharmony_ci 1028c2ecf20Sopenharmony_ci lockdep_assert_held(&vm->mutex); 1038c2ecf20Sopenharmony_ci trace_i915_gem_evict(vm, min_size, alignment, flags); 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci /* 1068c2ecf20Sopenharmony_ci * The goal is to evict objects and amalgamate space in rough LRU order. 1078c2ecf20Sopenharmony_ci * Since both active and inactive objects reside on the same list, 1088c2ecf20Sopenharmony_ci * in a mix of creation and last scanned order, as we process the list 1098c2ecf20Sopenharmony_ci * we sort it into inactive/active, which keeps the active portion 1108c2ecf20Sopenharmony_ci * in a rough MRU order. 1118c2ecf20Sopenharmony_ci * 1128c2ecf20Sopenharmony_ci * The retirement sequence is thus: 1138c2ecf20Sopenharmony_ci * 1. Inactive objects (already retired, random order) 1148c2ecf20Sopenharmony_ci * 2. Active objects (will stall on unbinding, oldest scanned first) 1158c2ecf20Sopenharmony_ci */ 1168c2ecf20Sopenharmony_ci mode = DRM_MM_INSERT_BEST; 1178c2ecf20Sopenharmony_ci if (flags & PIN_HIGH) 1188c2ecf20Sopenharmony_ci mode = DRM_MM_INSERT_HIGH; 1198c2ecf20Sopenharmony_ci if (flags & PIN_MAPPABLE) 1208c2ecf20Sopenharmony_ci mode = DRM_MM_INSERT_LOW; 1218c2ecf20Sopenharmony_ci drm_mm_scan_init_with_range(&scan, &vm->mm, 1228c2ecf20Sopenharmony_ci min_size, alignment, color, 1238c2ecf20Sopenharmony_ci start, end, mode); 1248c2ecf20Sopenharmony_ci 1258c2ecf20Sopenharmony_ci intel_gt_retire_requests(vm->gt); 1268c2ecf20Sopenharmony_ci 1278c2ecf20Sopenharmony_cisearch_again: 1288c2ecf20Sopenharmony_ci active = NULL; 1298c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&eviction_list); 1308c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { 1318c2ecf20Sopenharmony_ci if (vma == active) { /* now seen this vma twice */ 1328c2ecf20Sopenharmony_ci if (flags & PIN_NONBLOCK) 1338c2ecf20Sopenharmony_ci break; 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci active = ERR_PTR(-EAGAIN); 1368c2ecf20Sopenharmony_ci } 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci /* 1398c2ecf20Sopenharmony_ci * We keep this list in a rough least-recently scanned order 1408c2ecf20Sopenharmony_ci * of active elements (inactive elements are cheap to reap). 1418c2ecf20Sopenharmony_ci * New entries are added to the end, and we move anything we 1428c2ecf20Sopenharmony_ci * scan to the end. The assumption is that the working set 1438c2ecf20Sopenharmony_ci * of applications is either steady state (and thanks to the 1448c2ecf20Sopenharmony_ci * userspace bo cache it almost always is) or volatile and 1458c2ecf20Sopenharmony_ci * frequently replaced after a frame, which are self-evicting! 1468c2ecf20Sopenharmony_ci * Given that assumption, the MRU order of the scan list is 1478c2ecf20Sopenharmony_ci * fairly static, and keeping it in least-recently scan order 1488c2ecf20Sopenharmony_ci * is suitable. 1498c2ecf20Sopenharmony_ci * 1508c2ecf20Sopenharmony_ci * To notice when we complete one full cycle, we record the 1518c2ecf20Sopenharmony_ci * first active element seen, before moving it to the tail. 1528c2ecf20Sopenharmony_ci */ 1538c2ecf20Sopenharmony_ci if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) { 1548c2ecf20Sopenharmony_ci if (!active) 1558c2ecf20Sopenharmony_ci active = vma; 1568c2ecf20Sopenharmony_ci 1578c2ecf20Sopenharmony_ci list_move_tail(&vma->vm_link, &vm->bound_list); 1588c2ecf20Sopenharmony_ci continue; 1598c2ecf20Sopenharmony_ci } 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_ci if (mark_free(&scan, vma, flags, &eviction_list)) 1628c2ecf20Sopenharmony_ci goto found; 1638c2ecf20Sopenharmony_ci } 1648c2ecf20Sopenharmony_ci 1658c2ecf20Sopenharmony_ci /* Nothing found, clean up and bail out! */ 1668c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 1678c2ecf20Sopenharmony_ci ret = drm_mm_scan_remove_block(&scan, &vma->node); 1688c2ecf20Sopenharmony_ci BUG_ON(ret); 1698c2ecf20Sopenharmony_ci } 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_ci /* 1728c2ecf20Sopenharmony_ci * Can we unpin some objects such as idle hw contents, 1738c2ecf20Sopenharmony_ci * or pending flips? But since only the GGTT has global entries 1748c2ecf20Sopenharmony_ci * such as scanouts, rinbuffers and contexts, we can skip the 1758c2ecf20Sopenharmony_ci * purge when inspecting per-process local address spaces. 1768c2ecf20Sopenharmony_ci */ 1778c2ecf20Sopenharmony_ci if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 1788c2ecf20Sopenharmony_ci return -ENOSPC; 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ci /* 1818c2ecf20Sopenharmony_ci * Not everything in the GGTT is tracked via VMA using 1828c2ecf20Sopenharmony_ci * i915_vma_move_to_active(), otherwise we could evict as required 1838c2ecf20Sopenharmony_ci * with minimal stalling. Instead we are forced to idle the GPU and 1848c2ecf20Sopenharmony_ci * explicitly retire outstanding requests which will then remove 1858c2ecf20Sopenharmony_ci * the pinning for active objects such as contexts and ring, 1868c2ecf20Sopenharmony_ci * enabling us to evict them on the next iteration. 1878c2ecf20Sopenharmony_ci * 1888c2ecf20Sopenharmony_ci * To ensure that all user contexts are evictable, we perform 1898c2ecf20Sopenharmony_ci * a switch to the perma-pinned kernel context. This all also gives 1908c2ecf20Sopenharmony_ci * us a termination condition, when the last retired context is 1918c2ecf20Sopenharmony_ci * the kernel's there is no more we can evict. 1928c2ecf20Sopenharmony_ci */ 1938c2ecf20Sopenharmony_ci if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) 1948c2ecf20Sopenharmony_ci return -EBUSY; 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci ret = ggtt_flush(vm->gt); 1978c2ecf20Sopenharmony_ci if (ret) 1988c2ecf20Sopenharmony_ci return ret; 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_ci cond_resched(); 2018c2ecf20Sopenharmony_ci 2028c2ecf20Sopenharmony_ci flags |= PIN_NONBLOCK; 2038c2ecf20Sopenharmony_ci goto search_again; 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_cifound: 2068c2ecf20Sopenharmony_ci /* drm_mm doesn't allow any other other operations while 2078c2ecf20Sopenharmony_ci * scanning, therefore store to-be-evicted objects on a 2088c2ecf20Sopenharmony_ci * temporary list and take a reference for all before 2098c2ecf20Sopenharmony_ci * calling unbind (which may remove the active reference 2108c2ecf20Sopenharmony_ci * of any of our objects, thus corrupting the list). 2118c2ecf20Sopenharmony_ci */ 2128c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 2138c2ecf20Sopenharmony_ci if (drm_mm_scan_remove_block(&scan, &vma->node)) 2148c2ecf20Sopenharmony_ci __i915_vma_pin(vma); 2158c2ecf20Sopenharmony_ci else 2168c2ecf20Sopenharmony_ci list_del(&vma->evict_link); 2178c2ecf20Sopenharmony_ci } 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci /* Unbinding will emit any required flushes */ 2208c2ecf20Sopenharmony_ci ret = 0; 2218c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 2228c2ecf20Sopenharmony_ci __i915_vma_unpin(vma); 2238c2ecf20Sopenharmony_ci if (ret == 0) 2248c2ecf20Sopenharmony_ci ret = __i915_vma_unbind(vma); 2258c2ecf20Sopenharmony_ci } 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { 2288c2ecf20Sopenharmony_ci vma = container_of(node, struct i915_vma, node); 2298c2ecf20Sopenharmony_ci 2308c2ecf20Sopenharmony_ci /* If we find any non-objects (!vma), we cannot evict them */ 2318c2ecf20Sopenharmony_ci if (vma->node.color != I915_COLOR_UNEVICTABLE) 2328c2ecf20Sopenharmony_ci ret = __i915_vma_unbind(vma); 2338c2ecf20Sopenharmony_ci else 2348c2ecf20Sopenharmony_ci ret = -ENOSPC; /* XXX search failed, try again? */ 2358c2ecf20Sopenharmony_ci } 2368c2ecf20Sopenharmony_ci 2378c2ecf20Sopenharmony_ci return ret; 2388c2ecf20Sopenharmony_ci} 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_ci/** 2418c2ecf20Sopenharmony_ci * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one 2428c2ecf20Sopenharmony_ci * @vm: address space to evict from 2438c2ecf20Sopenharmony_ci * @target: range (and color) to evict for 2448c2ecf20Sopenharmony_ci * @flags: additional flags to control the eviction algorithm 2458c2ecf20Sopenharmony_ci * 2468c2ecf20Sopenharmony_ci * This function will try to evict vmas that overlap the target node. 2478c2ecf20Sopenharmony_ci * 2488c2ecf20Sopenharmony_ci * To clarify: This is for freeing up virtual address space, not for freeing 2498c2ecf20Sopenharmony_ci * memory in e.g. the shrinker. 2508c2ecf20Sopenharmony_ci */ 2518c2ecf20Sopenharmony_ciint i915_gem_evict_for_node(struct i915_address_space *vm, 2528c2ecf20Sopenharmony_ci struct drm_mm_node *target, 2538c2ecf20Sopenharmony_ci unsigned int flags) 2548c2ecf20Sopenharmony_ci{ 2558c2ecf20Sopenharmony_ci LIST_HEAD(eviction_list); 2568c2ecf20Sopenharmony_ci struct drm_mm_node *node; 2578c2ecf20Sopenharmony_ci u64 start = target->start; 2588c2ecf20Sopenharmony_ci u64 end = start + target->size; 2598c2ecf20Sopenharmony_ci struct i915_vma *vma, *next; 2608c2ecf20Sopenharmony_ci int ret = 0; 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci lockdep_assert_held(&vm->mutex); 2638c2ecf20Sopenharmony_ci GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); 2648c2ecf20Sopenharmony_ci GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci trace_i915_gem_evict_node(vm, target, flags); 2678c2ecf20Sopenharmony_ci 2688c2ecf20Sopenharmony_ci /* 2698c2ecf20Sopenharmony_ci * Retire before we search the active list. Although we have 2708c2ecf20Sopenharmony_ci * reasonable accuracy in our retirement lists, we may have 2718c2ecf20Sopenharmony_ci * a stray pin (preventing eviction) that can only be resolved by 2728c2ecf20Sopenharmony_ci * retiring. 2738c2ecf20Sopenharmony_ci */ 2748c2ecf20Sopenharmony_ci intel_gt_retire_requests(vm->gt); 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci if (i915_vm_has_cache_coloring(vm)) { 2778c2ecf20Sopenharmony_ci /* Expand search to cover neighbouring guard pages (or lack!) */ 2788c2ecf20Sopenharmony_ci if (start) 2798c2ecf20Sopenharmony_ci start -= I915_GTT_PAGE_SIZE; 2808c2ecf20Sopenharmony_ci 2818c2ecf20Sopenharmony_ci /* Always look at the page afterwards to avoid the end-of-GTT */ 2828c2ecf20Sopenharmony_ci end += I915_GTT_PAGE_SIZE; 2838c2ecf20Sopenharmony_ci } 2848c2ecf20Sopenharmony_ci GEM_BUG_ON(start >= end); 2858c2ecf20Sopenharmony_ci 2868c2ecf20Sopenharmony_ci drm_mm_for_each_node_in_range(node, &vm->mm, start, end) { 2878c2ecf20Sopenharmony_ci /* If we find any non-objects (!vma), we cannot evict them */ 2888c2ecf20Sopenharmony_ci if (node->color == I915_COLOR_UNEVICTABLE) { 2898c2ecf20Sopenharmony_ci ret = -ENOSPC; 2908c2ecf20Sopenharmony_ci break; 2918c2ecf20Sopenharmony_ci } 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_ci GEM_BUG_ON(!drm_mm_node_allocated(node)); 2948c2ecf20Sopenharmony_ci vma = container_of(node, typeof(*vma), node); 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci /* 2978c2ecf20Sopenharmony_ci * If we are using coloring to insert guard pages between 2988c2ecf20Sopenharmony_ci * different cache domains within the address space, we have 2998c2ecf20Sopenharmony_ci * to check whether the objects on either side of our range 3008c2ecf20Sopenharmony_ci * abutt and conflict. If they are in conflict, then we evict 3018c2ecf20Sopenharmony_ci * those as well to make room for our guard pages. 3028c2ecf20Sopenharmony_ci */ 3038c2ecf20Sopenharmony_ci if (i915_vm_has_cache_coloring(vm)) { 3048c2ecf20Sopenharmony_ci if (node->start + node->size == target->start) { 3058c2ecf20Sopenharmony_ci if (node->color == target->color) 3068c2ecf20Sopenharmony_ci continue; 3078c2ecf20Sopenharmony_ci } 3088c2ecf20Sopenharmony_ci if (node->start == target->start + target->size) { 3098c2ecf20Sopenharmony_ci if (node->color == target->color) 3108c2ecf20Sopenharmony_ci continue; 3118c2ecf20Sopenharmony_ci } 3128c2ecf20Sopenharmony_ci } 3138c2ecf20Sopenharmony_ci 3148c2ecf20Sopenharmony_ci if (i915_vma_is_pinned(vma)) { 3158c2ecf20Sopenharmony_ci ret = -ENOSPC; 3168c2ecf20Sopenharmony_ci break; 3178c2ecf20Sopenharmony_ci } 3188c2ecf20Sopenharmony_ci 3198c2ecf20Sopenharmony_ci if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) { 3208c2ecf20Sopenharmony_ci ret = -ENOSPC; 3218c2ecf20Sopenharmony_ci break; 3228c2ecf20Sopenharmony_ci } 3238c2ecf20Sopenharmony_ci 3248c2ecf20Sopenharmony_ci /* 3258c2ecf20Sopenharmony_ci * Never show fear in the face of dragons! 3268c2ecf20Sopenharmony_ci * 3278c2ecf20Sopenharmony_ci * We cannot directly remove this node from within this 3288c2ecf20Sopenharmony_ci * iterator and as with i915_gem_evict_something() we employ 3298c2ecf20Sopenharmony_ci * the vma pin_count in order to prevent the action of 3308c2ecf20Sopenharmony_ci * unbinding one vma from freeing (by dropping its active 3318c2ecf20Sopenharmony_ci * reference) another in our eviction list. 3328c2ecf20Sopenharmony_ci */ 3338c2ecf20Sopenharmony_ci __i915_vma_pin(vma); 3348c2ecf20Sopenharmony_ci list_add(&vma->evict_link, &eviction_list); 3358c2ecf20Sopenharmony_ci } 3368c2ecf20Sopenharmony_ci 3378c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { 3388c2ecf20Sopenharmony_ci __i915_vma_unpin(vma); 3398c2ecf20Sopenharmony_ci if (ret == 0) 3408c2ecf20Sopenharmony_ci ret = __i915_vma_unbind(vma); 3418c2ecf20Sopenharmony_ci } 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci return ret; 3448c2ecf20Sopenharmony_ci} 3458c2ecf20Sopenharmony_ci 3468c2ecf20Sopenharmony_ci/** 3478c2ecf20Sopenharmony_ci * i915_gem_evict_vm - Evict all idle vmas from a vm 3488c2ecf20Sopenharmony_ci * @vm: Address space to cleanse 3498c2ecf20Sopenharmony_ci * 3508c2ecf20Sopenharmony_ci * This function evicts all vmas from a vm. 3518c2ecf20Sopenharmony_ci * 3528c2ecf20Sopenharmony_ci * This is used by the execbuf code as a last-ditch effort to defragment the 3538c2ecf20Sopenharmony_ci * address space. 3548c2ecf20Sopenharmony_ci * 3558c2ecf20Sopenharmony_ci * To clarify: This is for freeing up virtual address space, not for freeing 3568c2ecf20Sopenharmony_ci * memory in e.g. the shrinker. 3578c2ecf20Sopenharmony_ci */ 3588c2ecf20Sopenharmony_ciint i915_gem_evict_vm(struct i915_address_space *vm) 3598c2ecf20Sopenharmony_ci{ 3608c2ecf20Sopenharmony_ci int ret = 0; 3618c2ecf20Sopenharmony_ci 3628c2ecf20Sopenharmony_ci lockdep_assert_held(&vm->mutex); 3638c2ecf20Sopenharmony_ci trace_i915_gem_evict_vm(vm); 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci /* Switch back to the default context in order to unpin 3668c2ecf20Sopenharmony_ci * the existing context objects. However, such objects only 3678c2ecf20Sopenharmony_ci * pin themselves inside the global GTT and performing the 3688c2ecf20Sopenharmony_ci * switch otherwise is ineffective. 3698c2ecf20Sopenharmony_ci */ 3708c2ecf20Sopenharmony_ci if (i915_is_ggtt(vm)) { 3718c2ecf20Sopenharmony_ci ret = ggtt_flush(vm->gt); 3728c2ecf20Sopenharmony_ci if (ret) 3738c2ecf20Sopenharmony_ci return ret; 3748c2ecf20Sopenharmony_ci } 3758c2ecf20Sopenharmony_ci 3768c2ecf20Sopenharmony_ci do { 3778c2ecf20Sopenharmony_ci struct i915_vma *vma, *vn; 3788c2ecf20Sopenharmony_ci LIST_HEAD(eviction_list); 3798c2ecf20Sopenharmony_ci 3808c2ecf20Sopenharmony_ci list_for_each_entry(vma, &vm->bound_list, vm_link) { 3818c2ecf20Sopenharmony_ci if (i915_vma_is_pinned(vma)) 3828c2ecf20Sopenharmony_ci continue; 3838c2ecf20Sopenharmony_ci 3848c2ecf20Sopenharmony_ci __i915_vma_pin(vma); 3858c2ecf20Sopenharmony_ci list_add(&vma->evict_link, &eviction_list); 3868c2ecf20Sopenharmony_ci } 3878c2ecf20Sopenharmony_ci if (list_empty(&eviction_list)) 3888c2ecf20Sopenharmony_ci break; 3898c2ecf20Sopenharmony_ci 3908c2ecf20Sopenharmony_ci ret = 0; 3918c2ecf20Sopenharmony_ci list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) { 3928c2ecf20Sopenharmony_ci __i915_vma_unpin(vma); 3938c2ecf20Sopenharmony_ci if (ret == 0) 3948c2ecf20Sopenharmony_ci ret = __i915_vma_unbind(vma); 3958c2ecf20Sopenharmony_ci if (ret != -EINTR) /* "Get me out of here!" */ 3968c2ecf20Sopenharmony_ci ret = 0; 3978c2ecf20Sopenharmony_ci } 3988c2ecf20Sopenharmony_ci } while (ret == 0); 3998c2ecf20Sopenharmony_ci 4008c2ecf20Sopenharmony_ci return ret; 4018c2ecf20Sopenharmony_ci} 4028c2ecf20Sopenharmony_ci 4038c2ecf20Sopenharmony_ci#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 4048c2ecf20Sopenharmony_ci#include "selftests/i915_gem_evict.c" 4058c2ecf20Sopenharmony_ci#endif 406