162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 OR MIT */ 262306a36Sopenharmony_ci/************************************************************************** 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 562306a36Sopenharmony_ci * All Rights Reserved. 662306a36Sopenharmony_ci * 762306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 862306a36Sopenharmony_ci * copy of this software and associated documentation files (the 962306a36Sopenharmony_ci * "Software"), to deal in the Software without restriction, including 1062306a36Sopenharmony_ci * without limitation the rights to use, copy, modify, merge, publish, 1162306a36Sopenharmony_ci * distribute, sub license, and/or sell copies of the Software, and to 1262306a36Sopenharmony_ci * permit persons to whom the Software is furnished to do so, subject to 1362306a36Sopenharmony_ci * the following conditions: 1462306a36Sopenharmony_ci * 1562306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the 1662306a36Sopenharmony_ci * next paragraph) shall be included in all copies or substantial portions 1762306a36Sopenharmony_ci * of the Software. 1862306a36Sopenharmony_ci * 1962306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 2062306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2162306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 2262306a36Sopenharmony_ci * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 2362306a36Sopenharmony_ci * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 2462306a36Sopenharmony_ci * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 2562306a36Sopenharmony_ci * USE OR OTHER DEALINGS IN THE SOFTWARE. 2662306a36Sopenharmony_ci * 2762306a36Sopenharmony_ci **************************************************************************/ 2862306a36Sopenharmony_ci/* 2962306a36Sopenharmony_ci * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 3062306a36Sopenharmony_ci */ 3162306a36Sopenharmony_ci 3262306a36Sopenharmony_ci#define pr_fmt(fmt) "[TTM] " fmt 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci#include <drm/ttm/ttm_bo.h> 3562306a36Sopenharmony_ci#include <drm/ttm/ttm_placement.h> 3662306a36Sopenharmony_ci#include <drm/ttm/ttm_tt.h> 3762306a36Sopenharmony_ci 3862306a36Sopenharmony_ci#include <drm/drm_drv.h> 3962306a36Sopenharmony_ci#include <drm/drm_managed.h> 4062306a36Sopenharmony_ci 4162306a36Sopenharmony_cistatic vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, 4262306a36Sopenharmony_ci struct vm_fault *vmf) 4362306a36Sopenharmony_ci{ 4462306a36Sopenharmony_ci long err = 0; 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_ci /* 4762306a36Sopenharmony_ci * Quick non-stalling check for idle. 4862306a36Sopenharmony_ci */ 4962306a36Sopenharmony_ci if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) 5062306a36Sopenharmony_ci return 0; 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci /* 5362306a36Sopenharmony_ci * If possible, avoid waiting for GPU with mmap_lock 5462306a36Sopenharmony_ci * held. We only do this if the fault allows retry and this 5562306a36Sopenharmony_ci * is the first attempt. 5662306a36Sopenharmony_ci */ 5762306a36Sopenharmony_ci if (fault_flag_allow_retry_first(vmf->flags)) { 5862306a36Sopenharmony_ci if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 5962306a36Sopenharmony_ci return VM_FAULT_RETRY; 6062306a36Sopenharmony_ci 6162306a36Sopenharmony_ci ttm_bo_get(bo); 6262306a36Sopenharmony_ci mmap_read_unlock(vmf->vma->vm_mm); 6362306a36Sopenharmony_ci (void)dma_resv_wait_timeout(bo->base.resv, 6462306a36Sopenharmony_ci DMA_RESV_USAGE_KERNEL, true, 6562306a36Sopenharmony_ci MAX_SCHEDULE_TIMEOUT); 6662306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 6762306a36Sopenharmony_ci ttm_bo_put(bo); 6862306a36Sopenharmony_ci return VM_FAULT_RETRY; 6962306a36Sopenharmony_ci } 7062306a36Sopenharmony_ci 7162306a36Sopenharmony_ci /* 7262306a36Sopenharmony_ci * Ordinary wait. 7362306a36Sopenharmony_ci */ 7462306a36Sopenharmony_ci err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true, 7562306a36Sopenharmony_ci MAX_SCHEDULE_TIMEOUT); 7662306a36Sopenharmony_ci if (unlikely(err < 0)) { 7762306a36Sopenharmony_ci return (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS : 7862306a36Sopenharmony_ci VM_FAULT_NOPAGE; 7962306a36Sopenharmony_ci } 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_ci return 0; 8262306a36Sopenharmony_ci} 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_cistatic unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, 8562306a36Sopenharmony_ci unsigned long page_offset) 8662306a36Sopenharmony_ci{ 8762306a36Sopenharmony_ci struct ttm_device *bdev = bo->bdev; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci if (bdev->funcs->io_mem_pfn) 9062306a36Sopenharmony_ci return bdev->funcs->io_mem_pfn(bo, page_offset); 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset; 9362306a36Sopenharmony_ci} 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci/** 9662306a36Sopenharmony_ci * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback 9762306a36Sopenharmony_ci * @bo: The buffer object 9862306a36Sopenharmony_ci * @vmf: The fault structure handed to the callback 9962306a36Sopenharmony_ci * 10062306a36Sopenharmony_ci * vm callbacks like fault() and *_mkwrite() allow for the mmap_lock to be dropped 10162306a36Sopenharmony_ci * during long waits, and after the wait the callback will be restarted. This 10262306a36Sopenharmony_ci * is to allow other threads using the same virtual memory space concurrent 10362306a36Sopenharmony_ci * access to map(), unmap() completely unrelated buffer objects. TTM buffer 10462306a36Sopenharmony_ci * object reservations sometimes wait for GPU and should therefore be 10562306a36Sopenharmony_ci * considered long waits. This function reserves the buffer object interruptibly 10662306a36Sopenharmony_ci * taking this into account. Starvation is avoided by the vm system not 10762306a36Sopenharmony_ci * allowing too many repeated restarts. 10862306a36Sopenharmony_ci * This function is intended to be used in customized fault() and _mkwrite() 10962306a36Sopenharmony_ci * handlers. 11062306a36Sopenharmony_ci * 11162306a36Sopenharmony_ci * Return: 11262306a36Sopenharmony_ci * 0 on success and the bo was reserved. 11362306a36Sopenharmony_ci * VM_FAULT_RETRY if blocking wait. 11462306a36Sopenharmony_ci * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. 11562306a36Sopenharmony_ci */ 11662306a36Sopenharmony_civm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 11762306a36Sopenharmony_ci struct vm_fault *vmf) 11862306a36Sopenharmony_ci{ 11962306a36Sopenharmony_ci /* 12062306a36Sopenharmony_ci * Work around locking order reversal in fault / nopfn 12162306a36Sopenharmony_ci * between mmap_lock and bo_reserve: Perform a trylock operation 12262306a36Sopenharmony_ci * for reserve, and if it fails, retry the fault after waiting 12362306a36Sopenharmony_ci * for the buffer to become unreserved. 12462306a36Sopenharmony_ci */ 12562306a36Sopenharmony_ci if (unlikely(!dma_resv_trylock(bo->base.resv))) { 12662306a36Sopenharmony_ci /* 12762306a36Sopenharmony_ci * If the fault allows retry and this is the first 12862306a36Sopenharmony_ci * fault attempt, we try to release the mmap_lock 12962306a36Sopenharmony_ci * before waiting 13062306a36Sopenharmony_ci */ 13162306a36Sopenharmony_ci if (fault_flag_allow_retry_first(vmf->flags)) { 13262306a36Sopenharmony_ci if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 13362306a36Sopenharmony_ci ttm_bo_get(bo); 13462306a36Sopenharmony_ci mmap_read_unlock(vmf->vma->vm_mm); 13562306a36Sopenharmony_ci if (!dma_resv_lock_interruptible(bo->base.resv, 13662306a36Sopenharmony_ci NULL)) 13762306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 13862306a36Sopenharmony_ci ttm_bo_put(bo); 13962306a36Sopenharmony_ci } 14062306a36Sopenharmony_ci 14162306a36Sopenharmony_ci return VM_FAULT_RETRY; 14262306a36Sopenharmony_ci } 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_ci if (dma_resv_lock_interruptible(bo->base.resv, NULL)) 14562306a36Sopenharmony_ci return VM_FAULT_NOPAGE; 14662306a36Sopenharmony_ci } 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ci /* 14962306a36Sopenharmony_ci * Refuse to fault imported pages. This should be handled 15062306a36Sopenharmony_ci * (if at all) by redirecting mmap to the exporter. 15162306a36Sopenharmony_ci */ 15262306a36Sopenharmony_ci if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { 15362306a36Sopenharmony_ci if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) { 15462306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 15562306a36Sopenharmony_ci return VM_FAULT_SIGBUS; 15662306a36Sopenharmony_ci } 15762306a36Sopenharmony_ci } 15862306a36Sopenharmony_ci 15962306a36Sopenharmony_ci return 0; 16062306a36Sopenharmony_ci} 16162306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_reserve); 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci/** 16462306a36Sopenharmony_ci * ttm_bo_vm_fault_reserved - TTM fault helper 16562306a36Sopenharmony_ci * @vmf: The struct vm_fault given as argument to the fault callback 16662306a36Sopenharmony_ci * @prot: The page protection to be used for this memory area. 16762306a36Sopenharmony_ci * @num_prefault: Maximum number of prefault pages. The caller may want to 16862306a36Sopenharmony_ci * specify this based on madvice settings and the size of the GPU object 16962306a36Sopenharmony_ci * backed by the memory. 17062306a36Sopenharmony_ci * 17162306a36Sopenharmony_ci * This function inserts one or more page table entries pointing to the 17262306a36Sopenharmony_ci * memory backing the buffer object, and then returns a return code 17362306a36Sopenharmony_ci * instructing the caller to retry the page access. 17462306a36Sopenharmony_ci * 17562306a36Sopenharmony_ci * Return: 17662306a36Sopenharmony_ci * VM_FAULT_NOPAGE on success or pending signal 17762306a36Sopenharmony_ci * VM_FAULT_SIGBUS on unspecified error 17862306a36Sopenharmony_ci * VM_FAULT_OOM on out-of-memory 17962306a36Sopenharmony_ci * VM_FAULT_RETRY if retryable wait 18062306a36Sopenharmony_ci */ 18162306a36Sopenharmony_civm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 18262306a36Sopenharmony_ci pgprot_t prot, 18362306a36Sopenharmony_ci pgoff_t num_prefault) 18462306a36Sopenharmony_ci{ 18562306a36Sopenharmony_ci struct vm_area_struct *vma = vmf->vma; 18662306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 18762306a36Sopenharmony_ci struct ttm_device *bdev = bo->bdev; 18862306a36Sopenharmony_ci unsigned long page_offset; 18962306a36Sopenharmony_ci unsigned long page_last; 19062306a36Sopenharmony_ci unsigned long pfn; 19162306a36Sopenharmony_ci struct ttm_tt *ttm = NULL; 19262306a36Sopenharmony_ci struct page *page; 19362306a36Sopenharmony_ci int err; 19462306a36Sopenharmony_ci pgoff_t i; 19562306a36Sopenharmony_ci vm_fault_t ret = VM_FAULT_NOPAGE; 19662306a36Sopenharmony_ci unsigned long address = vmf->address; 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci /* 19962306a36Sopenharmony_ci * Wait for buffer data in transit, due to a pipelined 20062306a36Sopenharmony_ci * move. 20162306a36Sopenharmony_ci */ 20262306a36Sopenharmony_ci ret = ttm_bo_vm_fault_idle(bo, vmf); 20362306a36Sopenharmony_ci if (unlikely(ret != 0)) 20462306a36Sopenharmony_ci return ret; 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci err = ttm_mem_io_reserve(bdev, bo->resource); 20762306a36Sopenharmony_ci if (unlikely(err != 0)) 20862306a36Sopenharmony_ci return VM_FAULT_SIGBUS; 20962306a36Sopenharmony_ci 21062306a36Sopenharmony_ci page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 21162306a36Sopenharmony_ci vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); 21262306a36Sopenharmony_ci page_last = vma_pages(vma) + vma->vm_pgoff - 21362306a36Sopenharmony_ci drm_vma_node_start(&bo->base.vma_node); 21462306a36Sopenharmony_ci 21562306a36Sopenharmony_ci if (unlikely(page_offset >= PFN_UP(bo->base.size))) 21662306a36Sopenharmony_ci return VM_FAULT_SIGBUS; 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_ci prot = ttm_io_prot(bo, bo->resource, prot); 21962306a36Sopenharmony_ci if (!bo->resource->bus.is_iomem) { 22062306a36Sopenharmony_ci struct ttm_operation_ctx ctx = { 22162306a36Sopenharmony_ci .interruptible = true, 22262306a36Sopenharmony_ci .no_wait_gpu = false, 22362306a36Sopenharmony_ci .force_alloc = true 22462306a36Sopenharmony_ci }; 22562306a36Sopenharmony_ci 22662306a36Sopenharmony_ci ttm = bo->ttm; 22762306a36Sopenharmony_ci err = ttm_tt_populate(bdev, bo->ttm, &ctx); 22862306a36Sopenharmony_ci if (err) { 22962306a36Sopenharmony_ci if (err == -EINTR || err == -ERESTARTSYS || 23062306a36Sopenharmony_ci err == -EAGAIN) 23162306a36Sopenharmony_ci return VM_FAULT_NOPAGE; 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_ci pr_debug("TTM fault hit %pe.\n", ERR_PTR(err)); 23462306a36Sopenharmony_ci return VM_FAULT_SIGBUS; 23562306a36Sopenharmony_ci } 23662306a36Sopenharmony_ci } else { 23762306a36Sopenharmony_ci /* Iomem should not be marked encrypted */ 23862306a36Sopenharmony_ci prot = pgprot_decrypted(prot); 23962306a36Sopenharmony_ci } 24062306a36Sopenharmony_ci 24162306a36Sopenharmony_ci /* 24262306a36Sopenharmony_ci * Speculatively prefault a number of pages. Only error on 24362306a36Sopenharmony_ci * first page. 24462306a36Sopenharmony_ci */ 24562306a36Sopenharmony_ci for (i = 0; i < num_prefault; ++i) { 24662306a36Sopenharmony_ci if (bo->resource->bus.is_iomem) { 24762306a36Sopenharmony_ci pfn = ttm_bo_io_mem_pfn(bo, page_offset); 24862306a36Sopenharmony_ci } else { 24962306a36Sopenharmony_ci page = ttm->pages[page_offset]; 25062306a36Sopenharmony_ci if (unlikely(!page && i == 0)) { 25162306a36Sopenharmony_ci return VM_FAULT_OOM; 25262306a36Sopenharmony_ci } else if (unlikely(!page)) { 25362306a36Sopenharmony_ci break; 25462306a36Sopenharmony_ci } 25562306a36Sopenharmony_ci pfn = page_to_pfn(page); 25662306a36Sopenharmony_ci } 25762306a36Sopenharmony_ci 25862306a36Sopenharmony_ci /* 25962306a36Sopenharmony_ci * Note that the value of @prot at this point may differ from 26062306a36Sopenharmony_ci * the value of @vma->vm_page_prot in the caching- and 26162306a36Sopenharmony_ci * encryption bits. This is because the exact location of the 26262306a36Sopenharmony_ci * data may not be known at mmap() time and may also change 26362306a36Sopenharmony_ci * at arbitrary times while the data is mmap'ed. 26462306a36Sopenharmony_ci * See vmf_insert_pfn_prot() for a discussion. 26562306a36Sopenharmony_ci */ 26662306a36Sopenharmony_ci ret = vmf_insert_pfn_prot(vma, address, pfn, prot); 26762306a36Sopenharmony_ci 26862306a36Sopenharmony_ci /* Never error on prefaulted PTEs */ 26962306a36Sopenharmony_ci if (unlikely((ret & VM_FAULT_ERROR))) { 27062306a36Sopenharmony_ci if (i == 0) 27162306a36Sopenharmony_ci return VM_FAULT_NOPAGE; 27262306a36Sopenharmony_ci else 27362306a36Sopenharmony_ci break; 27462306a36Sopenharmony_ci } 27562306a36Sopenharmony_ci 27662306a36Sopenharmony_ci address += PAGE_SIZE; 27762306a36Sopenharmony_ci if (unlikely(++page_offset >= page_last)) 27862306a36Sopenharmony_ci break; 27962306a36Sopenharmony_ci } 28062306a36Sopenharmony_ci return ret; 28162306a36Sopenharmony_ci} 28262306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_fault_reserved); 28362306a36Sopenharmony_ci 28462306a36Sopenharmony_cistatic void ttm_bo_release_dummy_page(struct drm_device *dev, void *res) 28562306a36Sopenharmony_ci{ 28662306a36Sopenharmony_ci struct page *dummy_page = (struct page *)res; 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci __free_page(dummy_page); 28962306a36Sopenharmony_ci} 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_civm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot) 29262306a36Sopenharmony_ci{ 29362306a36Sopenharmony_ci struct vm_area_struct *vma = vmf->vma; 29462306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 29562306a36Sopenharmony_ci struct drm_device *ddev = bo->base.dev; 29662306a36Sopenharmony_ci vm_fault_t ret = VM_FAULT_NOPAGE; 29762306a36Sopenharmony_ci unsigned long address; 29862306a36Sopenharmony_ci unsigned long pfn; 29962306a36Sopenharmony_ci struct page *page; 30062306a36Sopenharmony_ci 30162306a36Sopenharmony_ci /* Allocate new dummy page to map all the VA range in this VMA to it*/ 30262306a36Sopenharmony_ci page = alloc_page(GFP_KERNEL | __GFP_ZERO); 30362306a36Sopenharmony_ci if (!page) 30462306a36Sopenharmony_ci return VM_FAULT_OOM; 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci /* Set the page to be freed using drmm release action */ 30762306a36Sopenharmony_ci if (drmm_add_action_or_reset(ddev, ttm_bo_release_dummy_page, page)) 30862306a36Sopenharmony_ci return VM_FAULT_OOM; 30962306a36Sopenharmony_ci 31062306a36Sopenharmony_ci pfn = page_to_pfn(page); 31162306a36Sopenharmony_ci 31262306a36Sopenharmony_ci /* Prefault the entire VMA range right away to avoid further faults */ 31362306a36Sopenharmony_ci for (address = vma->vm_start; address < vma->vm_end; 31462306a36Sopenharmony_ci address += PAGE_SIZE) 31562306a36Sopenharmony_ci ret = vmf_insert_pfn_prot(vma, address, pfn, prot); 31662306a36Sopenharmony_ci 31762306a36Sopenharmony_ci return ret; 31862306a36Sopenharmony_ci} 31962306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_dummy_page); 32062306a36Sopenharmony_ci 32162306a36Sopenharmony_civm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) 32262306a36Sopenharmony_ci{ 32362306a36Sopenharmony_ci struct vm_area_struct *vma = vmf->vma; 32462306a36Sopenharmony_ci pgprot_t prot; 32562306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 32662306a36Sopenharmony_ci struct drm_device *ddev = bo->base.dev; 32762306a36Sopenharmony_ci vm_fault_t ret; 32862306a36Sopenharmony_ci int idx; 32962306a36Sopenharmony_ci 33062306a36Sopenharmony_ci ret = ttm_bo_vm_reserve(bo, vmf); 33162306a36Sopenharmony_ci if (ret) 33262306a36Sopenharmony_ci return ret; 33362306a36Sopenharmony_ci 33462306a36Sopenharmony_ci prot = vma->vm_page_prot; 33562306a36Sopenharmony_ci if (drm_dev_enter(ddev, &idx)) { 33662306a36Sopenharmony_ci ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); 33762306a36Sopenharmony_ci drm_dev_exit(idx); 33862306a36Sopenharmony_ci } else { 33962306a36Sopenharmony_ci ret = ttm_bo_vm_dummy_page(vmf, prot); 34062306a36Sopenharmony_ci } 34162306a36Sopenharmony_ci if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 34262306a36Sopenharmony_ci return ret; 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_ci return ret; 34762306a36Sopenharmony_ci} 34862306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_fault); 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_civoid ttm_bo_vm_open(struct vm_area_struct *vma) 35162306a36Sopenharmony_ci{ 35262306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 35362306a36Sopenharmony_ci 35462306a36Sopenharmony_ci WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping); 35562306a36Sopenharmony_ci 35662306a36Sopenharmony_ci ttm_bo_get(bo); 35762306a36Sopenharmony_ci} 35862306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_open); 35962306a36Sopenharmony_ci 36062306a36Sopenharmony_civoid ttm_bo_vm_close(struct vm_area_struct *vma) 36162306a36Sopenharmony_ci{ 36262306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 36362306a36Sopenharmony_ci 36462306a36Sopenharmony_ci ttm_bo_put(bo); 36562306a36Sopenharmony_ci vma->vm_private_data = NULL; 36662306a36Sopenharmony_ci} 36762306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_close); 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_cistatic int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, 37062306a36Sopenharmony_ci unsigned long offset, 37162306a36Sopenharmony_ci uint8_t *buf, int len, int write) 37262306a36Sopenharmony_ci{ 37362306a36Sopenharmony_ci unsigned long page = offset >> PAGE_SHIFT; 37462306a36Sopenharmony_ci unsigned long bytes_left = len; 37562306a36Sopenharmony_ci int ret; 37662306a36Sopenharmony_ci 37762306a36Sopenharmony_ci /* Copy a page at a time, that way no extra virtual address 37862306a36Sopenharmony_ci * mapping is needed 37962306a36Sopenharmony_ci */ 38062306a36Sopenharmony_ci offset -= page << PAGE_SHIFT; 38162306a36Sopenharmony_ci do { 38262306a36Sopenharmony_ci unsigned long bytes = min(bytes_left, PAGE_SIZE - offset); 38362306a36Sopenharmony_ci struct ttm_bo_kmap_obj map; 38462306a36Sopenharmony_ci void *ptr; 38562306a36Sopenharmony_ci bool is_iomem; 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci ret = ttm_bo_kmap(bo, page, 1, &map); 38862306a36Sopenharmony_ci if (ret) 38962306a36Sopenharmony_ci return ret; 39062306a36Sopenharmony_ci 39162306a36Sopenharmony_ci ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; 39262306a36Sopenharmony_ci WARN_ON_ONCE(is_iomem); 39362306a36Sopenharmony_ci if (write) 39462306a36Sopenharmony_ci memcpy(ptr, buf, bytes); 39562306a36Sopenharmony_ci else 39662306a36Sopenharmony_ci memcpy(buf, ptr, bytes); 39762306a36Sopenharmony_ci ttm_bo_kunmap(&map); 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci page++; 40062306a36Sopenharmony_ci buf += bytes; 40162306a36Sopenharmony_ci bytes_left -= bytes; 40262306a36Sopenharmony_ci offset = 0; 40362306a36Sopenharmony_ci } while (bytes_left); 40462306a36Sopenharmony_ci 40562306a36Sopenharmony_ci return len; 40662306a36Sopenharmony_ci} 40762306a36Sopenharmony_ci 40862306a36Sopenharmony_ciint ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 40962306a36Sopenharmony_ci void *buf, int len, int write) 41062306a36Sopenharmony_ci{ 41162306a36Sopenharmony_ci struct ttm_buffer_object *bo = vma->vm_private_data; 41262306a36Sopenharmony_ci unsigned long offset = (addr) - vma->vm_start + 41362306a36Sopenharmony_ci ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) 41462306a36Sopenharmony_ci << PAGE_SHIFT); 41562306a36Sopenharmony_ci int ret; 41662306a36Sopenharmony_ci 41762306a36Sopenharmony_ci if (len < 1 || (offset + len) > bo->base.size) 41862306a36Sopenharmony_ci return -EIO; 41962306a36Sopenharmony_ci 42062306a36Sopenharmony_ci ret = ttm_bo_reserve(bo, true, false, NULL); 42162306a36Sopenharmony_ci if (ret) 42262306a36Sopenharmony_ci return ret; 42362306a36Sopenharmony_ci 42462306a36Sopenharmony_ci switch (bo->resource->mem_type) { 42562306a36Sopenharmony_ci case TTM_PL_SYSTEM: 42662306a36Sopenharmony_ci fallthrough; 42762306a36Sopenharmony_ci case TTM_PL_TT: 42862306a36Sopenharmony_ci ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write); 42962306a36Sopenharmony_ci break; 43062306a36Sopenharmony_ci default: 43162306a36Sopenharmony_ci if (bo->bdev->funcs->access_memory) 43262306a36Sopenharmony_ci ret = bo->bdev->funcs->access_memory( 43362306a36Sopenharmony_ci bo, offset, buf, len, write); 43462306a36Sopenharmony_ci else 43562306a36Sopenharmony_ci ret = -EIO; 43662306a36Sopenharmony_ci } 43762306a36Sopenharmony_ci 43862306a36Sopenharmony_ci ttm_bo_unreserve(bo); 43962306a36Sopenharmony_ci 44062306a36Sopenharmony_ci return ret; 44162306a36Sopenharmony_ci} 44262306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_vm_access); 44362306a36Sopenharmony_ci 44462306a36Sopenharmony_cistatic const struct vm_operations_struct ttm_bo_vm_ops = { 44562306a36Sopenharmony_ci .fault = ttm_bo_vm_fault, 44662306a36Sopenharmony_ci .open = ttm_bo_vm_open, 44762306a36Sopenharmony_ci .close = ttm_bo_vm_close, 44862306a36Sopenharmony_ci .access = ttm_bo_vm_access, 44962306a36Sopenharmony_ci}; 45062306a36Sopenharmony_ci 45162306a36Sopenharmony_ci/** 45262306a36Sopenharmony_ci * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. 45362306a36Sopenharmony_ci * 45462306a36Sopenharmony_ci * @vma: vma as input from the fbdev mmap method. 45562306a36Sopenharmony_ci * @bo: The bo backing the address space. 45662306a36Sopenharmony_ci * 45762306a36Sopenharmony_ci * Maps a buffer object. 45862306a36Sopenharmony_ci */ 45962306a36Sopenharmony_ciint ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) 46062306a36Sopenharmony_ci{ 46162306a36Sopenharmony_ci /* Enforce no COW since would have really strange behavior with it. */ 46262306a36Sopenharmony_ci if (is_cow_mapping(vma->vm_flags)) 46362306a36Sopenharmony_ci return -EINVAL; 46462306a36Sopenharmony_ci 46562306a36Sopenharmony_ci ttm_bo_get(bo); 46662306a36Sopenharmony_ci 46762306a36Sopenharmony_ci /* 46862306a36Sopenharmony_ci * Drivers may want to override the vm_ops field. Otherwise we 46962306a36Sopenharmony_ci * use TTM's default callbacks. 47062306a36Sopenharmony_ci */ 47162306a36Sopenharmony_ci if (!vma->vm_ops) 47262306a36Sopenharmony_ci vma->vm_ops = &ttm_bo_vm_ops; 47362306a36Sopenharmony_ci 47462306a36Sopenharmony_ci /* 47562306a36Sopenharmony_ci * Note: We're transferring the bo reference to 47662306a36Sopenharmony_ci * vma->vm_private_data here. 47762306a36Sopenharmony_ci */ 47862306a36Sopenharmony_ci 47962306a36Sopenharmony_ci vma->vm_private_data = bo; 48062306a36Sopenharmony_ci 48162306a36Sopenharmony_ci vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP); 48262306a36Sopenharmony_ci return 0; 48362306a36Sopenharmony_ci} 48462306a36Sopenharmony_ciEXPORT_SYMBOL(ttm_bo_mmap_obj); 485