18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR MIT 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 48c2ecf20Sopenharmony_ci * Copyright (c) 2012 David Airlie <airlied@linux.ie> 58c2ecf20Sopenharmony_ci * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com> 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 88c2ecf20Sopenharmony_ci * copy of this software and associated documentation files (the "Software"), 98c2ecf20Sopenharmony_ci * to deal in the Software without restriction, including without limitation 108c2ecf20Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense, 118c2ecf20Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the 128c2ecf20Sopenharmony_ci * Software is furnished to do so, subject to the following conditions: 138c2ecf20Sopenharmony_ci * 148c2ecf20Sopenharmony_ci * The above copyright notice and this permission notice shall be included in 158c2ecf20Sopenharmony_ci * all copies or substantial portions of the Software. 168c2ecf20Sopenharmony_ci * 178c2ecf20Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 188c2ecf20Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 198c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 208c2ecf20Sopenharmony_ci * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 218c2ecf20Sopenharmony_ci * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 228c2ecf20Sopenharmony_ci * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 238c2ecf20Sopenharmony_ci * OTHER DEALINGS IN THE SOFTWARE. 248c2ecf20Sopenharmony_ci */ 258c2ecf20Sopenharmony_ci 268c2ecf20Sopenharmony_ci#include <linux/mm.h> 278c2ecf20Sopenharmony_ci#include <linux/module.h> 288c2ecf20Sopenharmony_ci#include <linux/rbtree.h> 298c2ecf20Sopenharmony_ci#include <linux/slab.h> 308c2ecf20Sopenharmony_ci#include <linux/spinlock.h> 318c2ecf20Sopenharmony_ci#include <linux/types.h> 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci#include <drm/drm_mm.h> 348c2ecf20Sopenharmony_ci#include <drm/drm_vma_manager.h> 358c2ecf20Sopenharmony_ci 368c2ecf20Sopenharmony_ci/** 378c2ecf20Sopenharmony_ci * DOC: vma offset manager 388c2ecf20Sopenharmony_ci * 398c2ecf20Sopenharmony_ci * The vma-manager is responsible to map arbitrary driver-dependent memory 408c2ecf20Sopenharmony_ci * regions into the linear user address-space. It provides offsets to the 418c2ecf20Sopenharmony_ci * caller which can then be used on the address_space of the drm-device. It 428c2ecf20Sopenharmony_ci * takes care to not overlap regions, size them appropriately and to not 438c2ecf20Sopenharmony_ci * confuse mm-core by inconsistent fake vm_pgoff fields. 448c2ecf20Sopenharmony_ci * Drivers shouldn't use this for object placement in VMEM. This manager should 458c2ecf20Sopenharmony_ci * only be used to manage mappings into linear user-space VMs. 468c2ecf20Sopenharmony_ci * 478c2ecf20Sopenharmony_ci * We use drm_mm as backend to manage object allocations. But it is highly 488c2ecf20Sopenharmony_ci * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to 498c2ecf20Sopenharmony_ci * speed up offset lookups. 508c2ecf20Sopenharmony_ci * 518c2ecf20Sopenharmony_ci * You must not use multiple offset managers on a single address_space. 528c2ecf20Sopenharmony_ci * Otherwise, mm-core will be unable to tear down memory mappings as the VM will 538c2ecf20Sopenharmony_ci * no longer be linear. 548c2ecf20Sopenharmony_ci * 558c2ecf20Sopenharmony_ci * This offset manager works on page-based addresses. That is, every argument 568c2ecf20Sopenharmony_ci * and return code (with the exception of drm_vma_node_offset_addr()) is given 578c2ecf20Sopenharmony_ci * in number of pages, not number of bytes. That means, object sizes and offsets 588c2ecf20Sopenharmony_ci * must always be page-aligned (as usual). 598c2ecf20Sopenharmony_ci * If you want to get a valid byte-based user-space address for a given offset, 608c2ecf20Sopenharmony_ci * please see drm_vma_node_offset_addr(). 618c2ecf20Sopenharmony_ci * 628c2ecf20Sopenharmony_ci * Additionally to offset management, the vma offset manager also handles access 638c2ecf20Sopenharmony_ci * management. For every open-file context that is allowed to access a given 648c2ecf20Sopenharmony_ci * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this 658c2ecf20Sopenharmony_ci * open-file with the offset of the node will fail with -EACCES. To revoke 668c2ecf20Sopenharmony_ci * access again, use drm_vma_node_revoke(). However, the caller is responsible 678c2ecf20Sopenharmony_ci * for destroying already existing mappings, if required. 688c2ecf20Sopenharmony_ci */ 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_ci/** 718c2ecf20Sopenharmony_ci * drm_vma_offset_manager_init - Initialize new offset-manager 728c2ecf20Sopenharmony_ci * @mgr: Manager object 738c2ecf20Sopenharmony_ci * @page_offset: Offset of available memory area (page-based) 748c2ecf20Sopenharmony_ci * @size: Size of available address space range (page-based) 758c2ecf20Sopenharmony_ci * 768c2ecf20Sopenharmony_ci * Initialize a new offset-manager. The offset and area size available for the 778c2ecf20Sopenharmony_ci * manager are given as @page_offset and @size. Both are interpreted as 788c2ecf20Sopenharmony_ci * page-numbers, not bytes. 798c2ecf20Sopenharmony_ci * 808c2ecf20Sopenharmony_ci * Adding/removing nodes from the manager is locked internally and protected 818c2ecf20Sopenharmony_ci * against concurrent access. However, node allocation and destruction is left 828c2ecf20Sopenharmony_ci * for the caller. While calling into the vma-manager, a given node must 838c2ecf20Sopenharmony_ci * always be guaranteed to be referenced. 848c2ecf20Sopenharmony_ci */ 858c2ecf20Sopenharmony_civoid drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, 868c2ecf20Sopenharmony_ci unsigned long page_offset, unsigned long size) 878c2ecf20Sopenharmony_ci{ 888c2ecf20Sopenharmony_ci rwlock_init(&mgr->vm_lock); 898c2ecf20Sopenharmony_ci drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); 908c2ecf20Sopenharmony_ci} 918c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_offset_manager_init); 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_ci/** 948c2ecf20Sopenharmony_ci * drm_vma_offset_manager_destroy() - Destroy offset manager 958c2ecf20Sopenharmony_ci * @mgr: Manager object 968c2ecf20Sopenharmony_ci * 978c2ecf20Sopenharmony_ci * Destroy an object manager which was previously created via 988c2ecf20Sopenharmony_ci * drm_vma_offset_manager_init(). The caller must remove all allocated nodes 998c2ecf20Sopenharmony_ci * before destroying the manager. Otherwise, drm_mm will refuse to free the 1008c2ecf20Sopenharmony_ci * requested resources. 1018c2ecf20Sopenharmony_ci * 1028c2ecf20Sopenharmony_ci * The manager must not be accessed after this function is called. 1038c2ecf20Sopenharmony_ci */ 1048c2ecf20Sopenharmony_civoid drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) 1058c2ecf20Sopenharmony_ci{ 1068c2ecf20Sopenharmony_ci drm_mm_takedown(&mgr->vm_addr_space_mm); 1078c2ecf20Sopenharmony_ci} 1088c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_offset_manager_destroy); 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci/** 1118c2ecf20Sopenharmony_ci * drm_vma_offset_lookup_locked() - Find node in offset space 1128c2ecf20Sopenharmony_ci * @mgr: Manager object 1138c2ecf20Sopenharmony_ci * @start: Start address for object (page-based) 1148c2ecf20Sopenharmony_ci * @pages: Size of object (page-based) 1158c2ecf20Sopenharmony_ci * 1168c2ecf20Sopenharmony_ci * Find a node given a start address and object size. This returns the _best_ 1178c2ecf20Sopenharmony_ci * match for the given node. That is, @start may point somewhere into a valid 1188c2ecf20Sopenharmony_ci * region and the given node will be returned, as long as the node spans the 1198c2ecf20Sopenharmony_ci * whole requested area (given the size in number of pages as @pages). 1208c2ecf20Sopenharmony_ci * 1218c2ecf20Sopenharmony_ci * Note that before lookup the vma offset manager lookup lock must be acquired 1228c2ecf20Sopenharmony_ci * with drm_vma_offset_lock_lookup(). See there for an example. This can then be 1238c2ecf20Sopenharmony_ci * used to implement weakly referenced lookups using kref_get_unless_zero(). 1248c2ecf20Sopenharmony_ci * 1258c2ecf20Sopenharmony_ci * Example: 1268c2ecf20Sopenharmony_ci * 1278c2ecf20Sopenharmony_ci * :: 1288c2ecf20Sopenharmony_ci * 1298c2ecf20Sopenharmony_ci * drm_vma_offset_lock_lookup(mgr); 1308c2ecf20Sopenharmony_ci * node = drm_vma_offset_lookup_locked(mgr); 1318c2ecf20Sopenharmony_ci * if (node) 1328c2ecf20Sopenharmony_ci * kref_get_unless_zero(container_of(node, sth, entr)); 1338c2ecf20Sopenharmony_ci * drm_vma_offset_unlock_lookup(mgr); 1348c2ecf20Sopenharmony_ci * 1358c2ecf20Sopenharmony_ci * RETURNS: 1368c2ecf20Sopenharmony_ci * Returns NULL if no suitable node can be found. Otherwise, the best match 1378c2ecf20Sopenharmony_ci * is returned. It's the caller's responsibility to make sure the node doesn't 1388c2ecf20Sopenharmony_ci * get destroyed before the caller can access it. 1398c2ecf20Sopenharmony_ci */ 1408c2ecf20Sopenharmony_cistruct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, 1418c2ecf20Sopenharmony_ci unsigned long start, 1428c2ecf20Sopenharmony_ci unsigned long pages) 1438c2ecf20Sopenharmony_ci{ 1448c2ecf20Sopenharmony_ci struct drm_mm_node *node, *best; 1458c2ecf20Sopenharmony_ci struct rb_node *iter; 1468c2ecf20Sopenharmony_ci unsigned long offset; 1478c2ecf20Sopenharmony_ci 1488c2ecf20Sopenharmony_ci iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; 1498c2ecf20Sopenharmony_ci best = NULL; 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_ci while (likely(iter)) { 1528c2ecf20Sopenharmony_ci node = rb_entry(iter, struct drm_mm_node, rb); 1538c2ecf20Sopenharmony_ci offset = node->start; 1548c2ecf20Sopenharmony_ci if (start >= offset) { 1558c2ecf20Sopenharmony_ci iter = iter->rb_right; 1568c2ecf20Sopenharmony_ci best = node; 1578c2ecf20Sopenharmony_ci if (start == offset) 1588c2ecf20Sopenharmony_ci break; 1598c2ecf20Sopenharmony_ci } else { 1608c2ecf20Sopenharmony_ci iter = iter->rb_left; 1618c2ecf20Sopenharmony_ci } 1628c2ecf20Sopenharmony_ci } 1638c2ecf20Sopenharmony_ci 1648c2ecf20Sopenharmony_ci /* verify that the node spans the requested area */ 1658c2ecf20Sopenharmony_ci if (best) { 1668c2ecf20Sopenharmony_ci offset = best->start + best->size; 1678c2ecf20Sopenharmony_ci if (offset < start + pages) 1688c2ecf20Sopenharmony_ci best = NULL; 1698c2ecf20Sopenharmony_ci } 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_ci if (!best) 1728c2ecf20Sopenharmony_ci return NULL; 1738c2ecf20Sopenharmony_ci 1748c2ecf20Sopenharmony_ci return container_of(best, struct drm_vma_offset_node, vm_node); 1758c2ecf20Sopenharmony_ci} 1768c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_offset_lookup_locked); 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci/** 1798c2ecf20Sopenharmony_ci * drm_vma_offset_add() - Add offset node to manager 1808c2ecf20Sopenharmony_ci * @mgr: Manager object 1818c2ecf20Sopenharmony_ci * @node: Node to be added 1828c2ecf20Sopenharmony_ci * @pages: Allocation size visible to user-space (in number of pages) 1838c2ecf20Sopenharmony_ci * 1848c2ecf20Sopenharmony_ci * Add a node to the offset-manager. If the node was already added, this does 1858c2ecf20Sopenharmony_ci * nothing and return 0. @pages is the size of the object given in number of 1868c2ecf20Sopenharmony_ci * pages. 1878c2ecf20Sopenharmony_ci * After this call succeeds, you can access the offset of the node until it 1888c2ecf20Sopenharmony_ci * is removed again. 1898c2ecf20Sopenharmony_ci * 1908c2ecf20Sopenharmony_ci * If this call fails, it is safe to retry the operation or call 1918c2ecf20Sopenharmony_ci * drm_vma_offset_remove(), anyway. However, no cleanup is required in that 1928c2ecf20Sopenharmony_ci * case. 1938c2ecf20Sopenharmony_ci * 1948c2ecf20Sopenharmony_ci * @pages is not required to be the same size as the underlying memory object 1958c2ecf20Sopenharmony_ci * that you want to map. It only limits the size that user-space can map into 1968c2ecf20Sopenharmony_ci * their address space. 1978c2ecf20Sopenharmony_ci * 1988c2ecf20Sopenharmony_ci * RETURNS: 1998c2ecf20Sopenharmony_ci * 0 on success, negative error code on failure. 2008c2ecf20Sopenharmony_ci */ 2018c2ecf20Sopenharmony_ciint drm_vma_offset_add(struct drm_vma_offset_manager *mgr, 2028c2ecf20Sopenharmony_ci struct drm_vma_offset_node *node, unsigned long pages) 2038c2ecf20Sopenharmony_ci{ 2048c2ecf20Sopenharmony_ci int ret = 0; 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci write_lock(&mgr->vm_lock); 2078c2ecf20Sopenharmony_ci 2088c2ecf20Sopenharmony_ci if (!drm_mm_node_allocated(&node->vm_node)) 2098c2ecf20Sopenharmony_ci ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, 2108c2ecf20Sopenharmony_ci &node->vm_node, pages); 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_ci write_unlock(&mgr->vm_lock); 2138c2ecf20Sopenharmony_ci 2148c2ecf20Sopenharmony_ci return ret; 2158c2ecf20Sopenharmony_ci} 2168c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_offset_add); 2178c2ecf20Sopenharmony_ci 2188c2ecf20Sopenharmony_ci/** 2198c2ecf20Sopenharmony_ci * drm_vma_offset_remove() - Remove offset node from manager 2208c2ecf20Sopenharmony_ci * @mgr: Manager object 2218c2ecf20Sopenharmony_ci * @node: Node to be removed 2228c2ecf20Sopenharmony_ci * 2238c2ecf20Sopenharmony_ci * Remove a node from the offset manager. If the node wasn't added before, this 2248c2ecf20Sopenharmony_ci * does nothing. After this call returns, the offset and size will be 0 until a 2258c2ecf20Sopenharmony_ci * new offset is allocated via drm_vma_offset_add() again. Helper functions like 2268c2ecf20Sopenharmony_ci * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no 2278c2ecf20Sopenharmony_ci * offset is allocated. 2288c2ecf20Sopenharmony_ci */ 2298c2ecf20Sopenharmony_civoid drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, 2308c2ecf20Sopenharmony_ci struct drm_vma_offset_node *node) 2318c2ecf20Sopenharmony_ci{ 2328c2ecf20Sopenharmony_ci write_lock(&mgr->vm_lock); 2338c2ecf20Sopenharmony_ci 2348c2ecf20Sopenharmony_ci if (drm_mm_node_allocated(&node->vm_node)) { 2358c2ecf20Sopenharmony_ci drm_mm_remove_node(&node->vm_node); 2368c2ecf20Sopenharmony_ci memset(&node->vm_node, 0, sizeof(node->vm_node)); 2378c2ecf20Sopenharmony_ci } 2388c2ecf20Sopenharmony_ci 2398c2ecf20Sopenharmony_ci write_unlock(&mgr->vm_lock); 2408c2ecf20Sopenharmony_ci} 2418c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_offset_remove); 2428c2ecf20Sopenharmony_ci 2438c2ecf20Sopenharmony_ci/** 2448c2ecf20Sopenharmony_ci * drm_vma_node_allow - Add open-file to list of allowed users 2458c2ecf20Sopenharmony_ci * @node: Node to modify 2468c2ecf20Sopenharmony_ci * @tag: Tag of file to remove 2478c2ecf20Sopenharmony_ci * 2488c2ecf20Sopenharmony_ci * Add @tag to the list of allowed open-files for this node. If @tag is 2498c2ecf20Sopenharmony_ci * already on this list, the ref-count is incremented. 2508c2ecf20Sopenharmony_ci * 2518c2ecf20Sopenharmony_ci * The list of allowed-users is preserved across drm_vma_offset_add() and 2528c2ecf20Sopenharmony_ci * drm_vma_offset_remove() calls. You may even call it if the node is currently 2538c2ecf20Sopenharmony_ci * not added to any offset-manager. 2548c2ecf20Sopenharmony_ci * 2558c2ecf20Sopenharmony_ci * You must remove all open-files the same number of times as you added them 2568c2ecf20Sopenharmony_ci * before destroying the node. Otherwise, you will leak memory. 2578c2ecf20Sopenharmony_ci * 2588c2ecf20Sopenharmony_ci * This is locked against concurrent access internally. 2598c2ecf20Sopenharmony_ci * 2608c2ecf20Sopenharmony_ci * RETURNS: 2618c2ecf20Sopenharmony_ci * 0 on success, negative error code on internal failure (out-of-mem) 2628c2ecf20Sopenharmony_ci */ 2638c2ecf20Sopenharmony_ciint drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) 2648c2ecf20Sopenharmony_ci{ 2658c2ecf20Sopenharmony_ci struct rb_node **iter; 2668c2ecf20Sopenharmony_ci struct rb_node *parent = NULL; 2678c2ecf20Sopenharmony_ci struct drm_vma_offset_file *new, *entry; 2688c2ecf20Sopenharmony_ci int ret = 0; 2698c2ecf20Sopenharmony_ci 2708c2ecf20Sopenharmony_ci /* Preallocate entry to avoid atomic allocations below. It is quite 2718c2ecf20Sopenharmony_ci * unlikely that an open-file is added twice to a single node so we 2728c2ecf20Sopenharmony_ci * don't optimize for this case. OOM is checked below only if the entry 2738c2ecf20Sopenharmony_ci * is actually used. */ 2748c2ecf20Sopenharmony_ci new = kmalloc(sizeof(*entry), GFP_KERNEL); 2758c2ecf20Sopenharmony_ci 2768c2ecf20Sopenharmony_ci write_lock(&node->vm_lock); 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_ci iter = &node->vm_files.rb_node; 2798c2ecf20Sopenharmony_ci 2808c2ecf20Sopenharmony_ci while (likely(*iter)) { 2818c2ecf20Sopenharmony_ci parent = *iter; 2828c2ecf20Sopenharmony_ci entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci if (tag == entry->vm_tag) { 2858c2ecf20Sopenharmony_ci entry->vm_count++; 2868c2ecf20Sopenharmony_ci goto unlock; 2878c2ecf20Sopenharmony_ci } else if (tag > entry->vm_tag) { 2888c2ecf20Sopenharmony_ci iter = &(*iter)->rb_right; 2898c2ecf20Sopenharmony_ci } else { 2908c2ecf20Sopenharmony_ci iter = &(*iter)->rb_left; 2918c2ecf20Sopenharmony_ci } 2928c2ecf20Sopenharmony_ci } 2938c2ecf20Sopenharmony_ci 2948c2ecf20Sopenharmony_ci if (!new) { 2958c2ecf20Sopenharmony_ci ret = -ENOMEM; 2968c2ecf20Sopenharmony_ci goto unlock; 2978c2ecf20Sopenharmony_ci } 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci new->vm_tag = tag; 3008c2ecf20Sopenharmony_ci new->vm_count = 1; 3018c2ecf20Sopenharmony_ci rb_link_node(&new->vm_rb, parent, iter); 3028c2ecf20Sopenharmony_ci rb_insert_color(&new->vm_rb, &node->vm_files); 3038c2ecf20Sopenharmony_ci new = NULL; 3048c2ecf20Sopenharmony_ci 3058c2ecf20Sopenharmony_ciunlock: 3068c2ecf20Sopenharmony_ci write_unlock(&node->vm_lock); 3078c2ecf20Sopenharmony_ci kfree(new); 3088c2ecf20Sopenharmony_ci return ret; 3098c2ecf20Sopenharmony_ci} 3108c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_node_allow); 3118c2ecf20Sopenharmony_ci 3128c2ecf20Sopenharmony_ci/** 3138c2ecf20Sopenharmony_ci * drm_vma_node_revoke - Remove open-file from list of allowed users 3148c2ecf20Sopenharmony_ci * @node: Node to modify 3158c2ecf20Sopenharmony_ci * @tag: Tag of file to remove 3168c2ecf20Sopenharmony_ci * 3178c2ecf20Sopenharmony_ci * Decrement the ref-count of @tag in the list of allowed open-files on @node. 3188c2ecf20Sopenharmony_ci * If the ref-count drops to zero, remove @tag from the list. You must call 3198c2ecf20Sopenharmony_ci * this once for every drm_vma_node_allow() on @tag. 3208c2ecf20Sopenharmony_ci * 3218c2ecf20Sopenharmony_ci * This is locked against concurrent access internally. 3228c2ecf20Sopenharmony_ci * 3238c2ecf20Sopenharmony_ci * If @tag is not on the list, nothing is done. 3248c2ecf20Sopenharmony_ci */ 3258c2ecf20Sopenharmony_civoid drm_vma_node_revoke(struct drm_vma_offset_node *node, 3268c2ecf20Sopenharmony_ci struct drm_file *tag) 3278c2ecf20Sopenharmony_ci{ 3288c2ecf20Sopenharmony_ci struct drm_vma_offset_file *entry; 3298c2ecf20Sopenharmony_ci struct rb_node *iter; 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci write_lock(&node->vm_lock); 3328c2ecf20Sopenharmony_ci 3338c2ecf20Sopenharmony_ci iter = node->vm_files.rb_node; 3348c2ecf20Sopenharmony_ci while (likely(iter)) { 3358c2ecf20Sopenharmony_ci entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 3368c2ecf20Sopenharmony_ci if (tag == entry->vm_tag) { 3378c2ecf20Sopenharmony_ci if (!--entry->vm_count) { 3388c2ecf20Sopenharmony_ci rb_erase(&entry->vm_rb, &node->vm_files); 3398c2ecf20Sopenharmony_ci kfree(entry); 3408c2ecf20Sopenharmony_ci } 3418c2ecf20Sopenharmony_ci break; 3428c2ecf20Sopenharmony_ci } else if (tag > entry->vm_tag) { 3438c2ecf20Sopenharmony_ci iter = iter->rb_right; 3448c2ecf20Sopenharmony_ci } else { 3458c2ecf20Sopenharmony_ci iter = iter->rb_left; 3468c2ecf20Sopenharmony_ci } 3478c2ecf20Sopenharmony_ci } 3488c2ecf20Sopenharmony_ci 3498c2ecf20Sopenharmony_ci write_unlock(&node->vm_lock); 3508c2ecf20Sopenharmony_ci} 3518c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_node_revoke); 3528c2ecf20Sopenharmony_ci 3538c2ecf20Sopenharmony_ci/** 3548c2ecf20Sopenharmony_ci * drm_vma_node_is_allowed - Check whether an open-file is granted access 3558c2ecf20Sopenharmony_ci * @node: Node to check 3568c2ecf20Sopenharmony_ci * @tag: Tag of file to remove 3578c2ecf20Sopenharmony_ci * 3588c2ecf20Sopenharmony_ci * Search the list in @node whether @tag is currently on the list of allowed 3598c2ecf20Sopenharmony_ci * open-files (see drm_vma_node_allow()). 3608c2ecf20Sopenharmony_ci * 3618c2ecf20Sopenharmony_ci * This is locked against concurrent access internally. 3628c2ecf20Sopenharmony_ci * 3638c2ecf20Sopenharmony_ci * RETURNS: 3648c2ecf20Sopenharmony_ci * true iff @filp is on the list 3658c2ecf20Sopenharmony_ci */ 3668c2ecf20Sopenharmony_cibool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, 3678c2ecf20Sopenharmony_ci struct drm_file *tag) 3688c2ecf20Sopenharmony_ci{ 3698c2ecf20Sopenharmony_ci struct drm_vma_offset_file *entry; 3708c2ecf20Sopenharmony_ci struct rb_node *iter; 3718c2ecf20Sopenharmony_ci 3728c2ecf20Sopenharmony_ci read_lock(&node->vm_lock); 3738c2ecf20Sopenharmony_ci 3748c2ecf20Sopenharmony_ci iter = node->vm_files.rb_node; 3758c2ecf20Sopenharmony_ci while (likely(iter)) { 3768c2ecf20Sopenharmony_ci entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 3778c2ecf20Sopenharmony_ci if (tag == entry->vm_tag) 3788c2ecf20Sopenharmony_ci break; 3798c2ecf20Sopenharmony_ci else if (tag > entry->vm_tag) 3808c2ecf20Sopenharmony_ci iter = iter->rb_right; 3818c2ecf20Sopenharmony_ci else 3828c2ecf20Sopenharmony_ci iter = iter->rb_left; 3838c2ecf20Sopenharmony_ci } 3848c2ecf20Sopenharmony_ci 3858c2ecf20Sopenharmony_ci read_unlock(&node->vm_lock); 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci return iter; 3888c2ecf20Sopenharmony_ci} 3898c2ecf20Sopenharmony_ciEXPORT_SYMBOL(drm_vma_node_is_allowed); 390