162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR MIT 262306a36Sopenharmony_ci/************************************************************************** 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA 562306a36Sopenharmony_ci * 662306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a 762306a36Sopenharmony_ci * copy of this software and associated documentation files (the 862306a36Sopenharmony_ci * "Software"), to deal in the Software without restriction, including 962306a36Sopenharmony_ci * without limitation the rights to use, copy, modify, merge, publish, 1062306a36Sopenharmony_ci * distribute, sub license, and/or sell copies of the Software, and to 1162306a36Sopenharmony_ci * permit persons to whom the Software is furnished to do so, subject to 1262306a36Sopenharmony_ci * the following conditions: 1362306a36Sopenharmony_ci * 1462306a36Sopenharmony_ci * The above copyright notice and this permission notice (including the 1562306a36Sopenharmony_ci * next paragraph) shall be included in all copies or substantial portions 1662306a36Sopenharmony_ci * of the Software. 1762306a36Sopenharmony_ci * 1862306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1962306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 2062306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 2162306a36Sopenharmony_ci * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 2262306a36Sopenharmony_ci * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 2362306a36Sopenharmony_ci * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 2462306a36Sopenharmony_ci * USE OR OTHER DEALINGS IN THE SOFTWARE. 2562306a36Sopenharmony_ci * 2662306a36Sopenharmony_ci **************************************************************************/ 2762306a36Sopenharmony_ci#include "vmwgfx_bo.h" 2862306a36Sopenharmony_ci#include "vmwgfx_drv.h" 2962306a36Sopenharmony_ci 3062306a36Sopenharmony_ci/* 3162306a36Sopenharmony_ci * Different methods for tracking dirty: 3262306a36Sopenharmony_ci * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits 3362306a36Sopenharmony_ci * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write- 3462306a36Sopenharmony_ci * accesses in the VM mkwrite() callback 3562306a36Sopenharmony_ci */ 3662306a36Sopenharmony_cienum vmw_bo_dirty_method { 3762306a36Sopenharmony_ci VMW_BO_DIRTY_PAGETABLE, 3862306a36Sopenharmony_ci VMW_BO_DIRTY_MKWRITE, 3962306a36Sopenharmony_ci}; 4062306a36Sopenharmony_ci 4162306a36Sopenharmony_ci/* 4262306a36Sopenharmony_ci * No dirtied pages at scan trigger a transition to the _MKWRITE method, 4362306a36Sopenharmony_ci * similarly a certain percentage of dirty pages trigger a transition to 4462306a36Sopenharmony_ci * the _PAGETABLE method. How many triggers should we wait for before 4562306a36Sopenharmony_ci * changing method? 4662306a36Sopenharmony_ci */ 4762306a36Sopenharmony_ci#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2 4862306a36Sopenharmony_ci 4962306a36Sopenharmony_ci/* Percentage to trigger a transition to the _PAGETABLE method */ 5062306a36Sopenharmony_ci#define VMW_DIRTY_PERCENTAGE 10 5162306a36Sopenharmony_ci 5262306a36Sopenharmony_ci/** 5362306a36Sopenharmony_ci * struct vmw_bo_dirty - Dirty information for buffer objects 5462306a36Sopenharmony_ci * @start: First currently dirty bit 5562306a36Sopenharmony_ci * @end: Last currently dirty bit + 1 5662306a36Sopenharmony_ci * @method: The currently used dirty method 5762306a36Sopenharmony_ci * @change_count: Number of consecutive method change triggers 5862306a36Sopenharmony_ci * @ref_count: Reference count for this structure 5962306a36Sopenharmony_ci * @bitmap_size: The size of the bitmap in bits. Typically equal to the 6062306a36Sopenharmony_ci * nuber of pages in the bo. 6162306a36Sopenharmony_ci * @bitmap: A bitmap where each bit represents a page. A set bit means a 6262306a36Sopenharmony_ci * dirty page. 6362306a36Sopenharmony_ci */ 6462306a36Sopenharmony_cistruct vmw_bo_dirty { 6562306a36Sopenharmony_ci unsigned long start; 6662306a36Sopenharmony_ci unsigned long end; 6762306a36Sopenharmony_ci enum vmw_bo_dirty_method method; 6862306a36Sopenharmony_ci unsigned int change_count; 6962306a36Sopenharmony_ci unsigned int ref_count; 7062306a36Sopenharmony_ci unsigned long bitmap_size; 7162306a36Sopenharmony_ci unsigned long bitmap[]; 7262306a36Sopenharmony_ci}; 7362306a36Sopenharmony_ci 7462306a36Sopenharmony_ci/** 7562306a36Sopenharmony_ci * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits 7662306a36Sopenharmony_ci * @vbo: The buffer object to scan 7762306a36Sopenharmony_ci * 7862306a36Sopenharmony_ci * Scans the pagetable for dirty bits. Clear those bits and modify the 7962306a36Sopenharmony_ci * dirty structure with the results. This function may change the 8062306a36Sopenharmony_ci * dirty-tracking method. 8162306a36Sopenharmony_ci */ 8262306a36Sopenharmony_cistatic void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo) 8362306a36Sopenharmony_ci{ 8462306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 8562306a36Sopenharmony_ci pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 8662306a36Sopenharmony_ci struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 8762306a36Sopenharmony_ci pgoff_t num_marked; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci num_marked = clean_record_shared_mapping_range 9062306a36Sopenharmony_ci (mapping, 9162306a36Sopenharmony_ci offset, dirty->bitmap_size, 9262306a36Sopenharmony_ci offset, &dirty->bitmap[0], 9362306a36Sopenharmony_ci &dirty->start, &dirty->end); 9462306a36Sopenharmony_ci if (num_marked == 0) 9562306a36Sopenharmony_ci dirty->change_count++; 9662306a36Sopenharmony_ci else 9762306a36Sopenharmony_ci dirty->change_count = 0; 9862306a36Sopenharmony_ci 9962306a36Sopenharmony_ci if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { 10062306a36Sopenharmony_ci dirty->change_count = 0; 10162306a36Sopenharmony_ci dirty->method = VMW_BO_DIRTY_MKWRITE; 10262306a36Sopenharmony_ci wp_shared_mapping_range(mapping, 10362306a36Sopenharmony_ci offset, dirty->bitmap_size); 10462306a36Sopenharmony_ci clean_record_shared_mapping_range(mapping, 10562306a36Sopenharmony_ci offset, dirty->bitmap_size, 10662306a36Sopenharmony_ci offset, &dirty->bitmap[0], 10762306a36Sopenharmony_ci &dirty->start, &dirty->end); 10862306a36Sopenharmony_ci } 10962306a36Sopenharmony_ci} 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_ci/** 11262306a36Sopenharmony_ci * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method 11362306a36Sopenharmony_ci * @vbo: The buffer object to scan 11462306a36Sopenharmony_ci * 11562306a36Sopenharmony_ci * Write-protect pages written to so that consecutive write accesses will 11662306a36Sopenharmony_ci * trigger a call to mkwrite. 11762306a36Sopenharmony_ci * 11862306a36Sopenharmony_ci * This function may change the dirty-tracking method. 11962306a36Sopenharmony_ci */ 12062306a36Sopenharmony_cistatic void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo) 12162306a36Sopenharmony_ci{ 12262306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 12362306a36Sopenharmony_ci unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 12462306a36Sopenharmony_ci struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 12562306a36Sopenharmony_ci pgoff_t num_marked; 12662306a36Sopenharmony_ci 12762306a36Sopenharmony_ci if (dirty->end <= dirty->start) 12862306a36Sopenharmony_ci return; 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping, 13162306a36Sopenharmony_ci dirty->start + offset, 13262306a36Sopenharmony_ci dirty->end - dirty->start); 13362306a36Sopenharmony_ci 13462306a36Sopenharmony_ci if (100UL * num_marked / dirty->bitmap_size > 13562306a36Sopenharmony_ci VMW_DIRTY_PERCENTAGE) 13662306a36Sopenharmony_ci dirty->change_count++; 13762306a36Sopenharmony_ci else 13862306a36Sopenharmony_ci dirty->change_count = 0; 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { 14162306a36Sopenharmony_ci pgoff_t start = 0; 14262306a36Sopenharmony_ci pgoff_t end = dirty->bitmap_size; 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_ci dirty->method = VMW_BO_DIRTY_PAGETABLE; 14562306a36Sopenharmony_ci clean_record_shared_mapping_range(mapping, offset, end, offset, 14662306a36Sopenharmony_ci &dirty->bitmap[0], 14762306a36Sopenharmony_ci &start, &end); 14862306a36Sopenharmony_ci bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); 14962306a36Sopenharmony_ci if (dirty->start < dirty->end) 15062306a36Sopenharmony_ci bitmap_set(&dirty->bitmap[0], dirty->start, 15162306a36Sopenharmony_ci dirty->end - dirty->start); 15262306a36Sopenharmony_ci dirty->change_count = 0; 15362306a36Sopenharmony_ci } 15462306a36Sopenharmony_ci} 15562306a36Sopenharmony_ci 15662306a36Sopenharmony_ci/** 15762306a36Sopenharmony_ci * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty 15862306a36Sopenharmony_ci * tracking structure 15962306a36Sopenharmony_ci * @vbo: The buffer object to scan 16062306a36Sopenharmony_ci * 16162306a36Sopenharmony_ci * This function may change the dirty tracking method. 16262306a36Sopenharmony_ci */ 16362306a36Sopenharmony_civoid vmw_bo_dirty_scan(struct vmw_bo *vbo) 16462306a36Sopenharmony_ci{ 16562306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 16662306a36Sopenharmony_ci 16762306a36Sopenharmony_ci if (dirty->method == VMW_BO_DIRTY_PAGETABLE) 16862306a36Sopenharmony_ci vmw_bo_dirty_scan_pagetable(vbo); 16962306a36Sopenharmony_ci else 17062306a36Sopenharmony_ci vmw_bo_dirty_scan_mkwrite(vbo); 17162306a36Sopenharmony_ci} 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_ci/** 17462306a36Sopenharmony_ci * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before 17562306a36Sopenharmony_ci * an unmap_mapping_range operation. 17662306a36Sopenharmony_ci * @vbo: The buffer object, 17762306a36Sopenharmony_ci * @start: First page of the range within the buffer object. 17862306a36Sopenharmony_ci * @end: Last page of the range within the buffer object + 1. 17962306a36Sopenharmony_ci * 18062306a36Sopenharmony_ci * If we're using the _PAGETABLE scan method, we may leak dirty pages 18162306a36Sopenharmony_ci * when calling unmap_mapping_range(). This function makes sure we pick 18262306a36Sopenharmony_ci * up all dirty pages. 18362306a36Sopenharmony_ci */ 18462306a36Sopenharmony_cistatic void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo, 18562306a36Sopenharmony_ci pgoff_t start, pgoff_t end) 18662306a36Sopenharmony_ci{ 18762306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 18862306a36Sopenharmony_ci unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 18962306a36Sopenharmony_ci struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) 19262306a36Sopenharmony_ci return; 19362306a36Sopenharmony_ci 19462306a36Sopenharmony_ci wp_shared_mapping_range(mapping, start + offset, end - start); 19562306a36Sopenharmony_ci clean_record_shared_mapping_range(mapping, start + offset, 19662306a36Sopenharmony_ci end - start, offset, 19762306a36Sopenharmony_ci &dirty->bitmap[0], &dirty->start, 19862306a36Sopenharmony_ci &dirty->end); 19962306a36Sopenharmony_ci} 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci/** 20262306a36Sopenharmony_ci * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo 20362306a36Sopenharmony_ci * @vbo: The buffer object, 20462306a36Sopenharmony_ci * @start: First page of the range within the buffer object. 20562306a36Sopenharmony_ci * @end: Last page of the range within the buffer object + 1. 20662306a36Sopenharmony_ci * 20762306a36Sopenharmony_ci * This is similar to ttm_bo_unmap_virtual() except it takes a subrange. 20862306a36Sopenharmony_ci */ 20962306a36Sopenharmony_civoid vmw_bo_dirty_unmap(struct vmw_bo *vbo, 21062306a36Sopenharmony_ci pgoff_t start, pgoff_t end) 21162306a36Sopenharmony_ci{ 21262306a36Sopenharmony_ci unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 21362306a36Sopenharmony_ci struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 21462306a36Sopenharmony_ci 21562306a36Sopenharmony_ci vmw_bo_dirty_pre_unmap(vbo, start, end); 21662306a36Sopenharmony_ci unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, 21762306a36Sopenharmony_ci (loff_t) (end - start) << PAGE_SHIFT); 21862306a36Sopenharmony_ci} 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci/** 22162306a36Sopenharmony_ci * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object 22262306a36Sopenharmony_ci * @vbo: The buffer object 22362306a36Sopenharmony_ci * 22462306a36Sopenharmony_ci * This function registers a dirty-tracking user to a buffer object. 22562306a36Sopenharmony_ci * A user can be for example a resource or a vma in a special user-space 22662306a36Sopenharmony_ci * mapping. 22762306a36Sopenharmony_ci * 22862306a36Sopenharmony_ci * Return: Zero on success, -ENOMEM on memory allocation failure. 22962306a36Sopenharmony_ci */ 23062306a36Sopenharmony_ciint vmw_bo_dirty_add(struct vmw_bo *vbo) 23162306a36Sopenharmony_ci{ 23262306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 23362306a36Sopenharmony_ci pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size); 23462306a36Sopenharmony_ci size_t size; 23562306a36Sopenharmony_ci int ret; 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci if (dirty) { 23862306a36Sopenharmony_ci dirty->ref_count++; 23962306a36Sopenharmony_ci return 0; 24062306a36Sopenharmony_ci } 24162306a36Sopenharmony_ci 24262306a36Sopenharmony_ci size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); 24362306a36Sopenharmony_ci dirty = kvzalloc(size, GFP_KERNEL); 24462306a36Sopenharmony_ci if (!dirty) { 24562306a36Sopenharmony_ci ret = -ENOMEM; 24662306a36Sopenharmony_ci goto out_no_dirty; 24762306a36Sopenharmony_ci } 24862306a36Sopenharmony_ci 24962306a36Sopenharmony_ci dirty->bitmap_size = num_pages; 25062306a36Sopenharmony_ci dirty->start = dirty->bitmap_size; 25162306a36Sopenharmony_ci dirty->end = 0; 25262306a36Sopenharmony_ci dirty->ref_count = 1; 25362306a36Sopenharmony_ci if (num_pages < PAGE_SIZE / sizeof(pte_t)) { 25462306a36Sopenharmony_ci dirty->method = VMW_BO_DIRTY_PAGETABLE; 25562306a36Sopenharmony_ci } else { 25662306a36Sopenharmony_ci struct address_space *mapping = vbo->tbo.bdev->dev_mapping; 25762306a36Sopenharmony_ci pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node); 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci dirty->method = VMW_BO_DIRTY_MKWRITE; 26062306a36Sopenharmony_ci 26162306a36Sopenharmony_ci /* Write-protect and then pick up already dirty bits */ 26262306a36Sopenharmony_ci wp_shared_mapping_range(mapping, offset, num_pages); 26362306a36Sopenharmony_ci clean_record_shared_mapping_range(mapping, offset, num_pages, 26462306a36Sopenharmony_ci offset, 26562306a36Sopenharmony_ci &dirty->bitmap[0], 26662306a36Sopenharmony_ci &dirty->start, &dirty->end); 26762306a36Sopenharmony_ci } 26862306a36Sopenharmony_ci 26962306a36Sopenharmony_ci vbo->dirty = dirty; 27062306a36Sopenharmony_ci 27162306a36Sopenharmony_ci return 0; 27262306a36Sopenharmony_ci 27362306a36Sopenharmony_ciout_no_dirty: 27462306a36Sopenharmony_ci return ret; 27562306a36Sopenharmony_ci} 27662306a36Sopenharmony_ci 27762306a36Sopenharmony_ci/** 27862306a36Sopenharmony_ci * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object 27962306a36Sopenharmony_ci * @vbo: The buffer object 28062306a36Sopenharmony_ci * 28162306a36Sopenharmony_ci * This function releases a dirty-tracking user from a buffer object. 28262306a36Sopenharmony_ci * If the reference count reaches zero, then the dirty-tracking object is 28362306a36Sopenharmony_ci * freed and the pointer to it cleared. 28462306a36Sopenharmony_ci * 28562306a36Sopenharmony_ci * Return: Zero on success, -ENOMEM on memory allocation failure. 28662306a36Sopenharmony_ci */ 28762306a36Sopenharmony_civoid vmw_bo_dirty_release(struct vmw_bo *vbo) 28862306a36Sopenharmony_ci{ 28962306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 29062306a36Sopenharmony_ci 29162306a36Sopenharmony_ci if (dirty && --dirty->ref_count == 0) { 29262306a36Sopenharmony_ci kvfree(dirty); 29362306a36Sopenharmony_ci vbo->dirty = NULL; 29462306a36Sopenharmony_ci } 29562306a36Sopenharmony_ci} 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_ci/** 29862306a36Sopenharmony_ci * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from 29962306a36Sopenharmony_ci * its backing mob. 30062306a36Sopenharmony_ci * @res: The resource 30162306a36Sopenharmony_ci * 30262306a36Sopenharmony_ci * This function will pick up all dirty ranges affecting the resource from 30362306a36Sopenharmony_ci * it's backup mob, and call vmw_resource_dirty_update() once for each 30462306a36Sopenharmony_ci * range. The transferred ranges will be cleared from the backing mob's 30562306a36Sopenharmony_ci * dirty tracking. 30662306a36Sopenharmony_ci */ 30762306a36Sopenharmony_civoid vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) 30862306a36Sopenharmony_ci{ 30962306a36Sopenharmony_ci struct vmw_bo *vbo = res->guest_memory_bo; 31062306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 31162306a36Sopenharmony_ci pgoff_t start, cur, end; 31262306a36Sopenharmony_ci unsigned long res_start = res->guest_memory_offset; 31362306a36Sopenharmony_ci unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci WARN_ON_ONCE(res_start & ~PAGE_MASK); 31662306a36Sopenharmony_ci res_start >>= PAGE_SHIFT; 31762306a36Sopenharmony_ci res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); 31862306a36Sopenharmony_ci 31962306a36Sopenharmony_ci if (res_start >= dirty->end || res_end <= dirty->start) 32062306a36Sopenharmony_ci return; 32162306a36Sopenharmony_ci 32262306a36Sopenharmony_ci cur = max(res_start, dirty->start); 32362306a36Sopenharmony_ci res_end = max(res_end, dirty->end); 32462306a36Sopenharmony_ci while (cur < res_end) { 32562306a36Sopenharmony_ci unsigned long num; 32662306a36Sopenharmony_ci 32762306a36Sopenharmony_ci start = find_next_bit(&dirty->bitmap[0], res_end, cur); 32862306a36Sopenharmony_ci if (start >= res_end) 32962306a36Sopenharmony_ci break; 33062306a36Sopenharmony_ci 33162306a36Sopenharmony_ci end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); 33262306a36Sopenharmony_ci cur = end + 1; 33362306a36Sopenharmony_ci num = end - start; 33462306a36Sopenharmony_ci bitmap_clear(&dirty->bitmap[0], start, num); 33562306a36Sopenharmony_ci vmw_resource_dirty_update(res, start, end); 33662306a36Sopenharmony_ci } 33762306a36Sopenharmony_ci 33862306a36Sopenharmony_ci if (res_start <= dirty->start && res_end > dirty->start) 33962306a36Sopenharmony_ci dirty->start = res_end; 34062306a36Sopenharmony_ci if (res_start < dirty->end && res_end >= dirty->end) 34162306a36Sopenharmony_ci dirty->end = res_start; 34262306a36Sopenharmony_ci} 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_ci/** 34562306a36Sopenharmony_ci * vmw_bo_dirty_clear_res - Clear a resource's dirty region from 34662306a36Sopenharmony_ci * its backing mob. 34762306a36Sopenharmony_ci * @res: The resource 34862306a36Sopenharmony_ci * 34962306a36Sopenharmony_ci * This function will clear all dirty ranges affecting the resource from 35062306a36Sopenharmony_ci * it's backup mob's dirty tracking. 35162306a36Sopenharmony_ci */ 35262306a36Sopenharmony_civoid vmw_bo_dirty_clear_res(struct vmw_resource *res) 35362306a36Sopenharmony_ci{ 35462306a36Sopenharmony_ci unsigned long res_start = res->guest_memory_offset; 35562306a36Sopenharmony_ci unsigned long res_end = res->guest_memory_offset + res->guest_memory_size; 35662306a36Sopenharmony_ci struct vmw_bo *vbo = res->guest_memory_bo; 35762306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci res_start >>= PAGE_SHIFT; 36062306a36Sopenharmony_ci res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); 36162306a36Sopenharmony_ci 36262306a36Sopenharmony_ci if (res_start >= dirty->end || res_end <= dirty->start) 36362306a36Sopenharmony_ci return; 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci res_start = max(res_start, dirty->start); 36662306a36Sopenharmony_ci res_end = min(res_end, dirty->end); 36762306a36Sopenharmony_ci bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci if (res_start <= dirty->start && res_end > dirty->start) 37062306a36Sopenharmony_ci dirty->start = res_end; 37162306a36Sopenharmony_ci if (res_start < dirty->end && res_end >= dirty->end) 37262306a36Sopenharmony_ci dirty->end = res_start; 37362306a36Sopenharmony_ci} 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_civm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) 37662306a36Sopenharmony_ci{ 37762306a36Sopenharmony_ci struct vm_area_struct *vma = vmf->vma; 37862306a36Sopenharmony_ci struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 37962306a36Sopenharmony_ci vma->vm_private_data; 38062306a36Sopenharmony_ci vm_fault_t ret; 38162306a36Sopenharmony_ci unsigned long page_offset; 38262306a36Sopenharmony_ci unsigned int save_flags; 38362306a36Sopenharmony_ci struct vmw_bo *vbo = to_vmw_bo(&bo->base); 38462306a36Sopenharmony_ci 38562306a36Sopenharmony_ci /* 38662306a36Sopenharmony_ci * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly. 38762306a36Sopenharmony_ci * So make sure the TTM helpers are aware. 38862306a36Sopenharmony_ci */ 38962306a36Sopenharmony_ci save_flags = vmf->flags; 39062306a36Sopenharmony_ci vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY; 39162306a36Sopenharmony_ci ret = ttm_bo_vm_reserve(bo, vmf); 39262306a36Sopenharmony_ci vmf->flags = save_flags; 39362306a36Sopenharmony_ci if (ret) 39462306a36Sopenharmony_ci return ret; 39562306a36Sopenharmony_ci 39662306a36Sopenharmony_ci page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); 39762306a36Sopenharmony_ci if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { 39862306a36Sopenharmony_ci ret = VM_FAULT_SIGBUS; 39962306a36Sopenharmony_ci goto out_unlock; 40062306a36Sopenharmony_ci } 40162306a36Sopenharmony_ci 40262306a36Sopenharmony_ci if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && 40362306a36Sopenharmony_ci !test_bit(page_offset, &vbo->dirty->bitmap[0])) { 40462306a36Sopenharmony_ci struct vmw_bo_dirty *dirty = vbo->dirty; 40562306a36Sopenharmony_ci 40662306a36Sopenharmony_ci __set_bit(page_offset, &dirty->bitmap[0]); 40762306a36Sopenharmony_ci dirty->start = min(dirty->start, page_offset); 40862306a36Sopenharmony_ci dirty->end = max(dirty->end, page_offset + 1); 40962306a36Sopenharmony_ci } 41062306a36Sopenharmony_ci 41162306a36Sopenharmony_ciout_unlock: 41262306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 41362306a36Sopenharmony_ci return ret; 41462306a36Sopenharmony_ci} 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_civm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) 41762306a36Sopenharmony_ci{ 41862306a36Sopenharmony_ci struct vm_area_struct *vma = vmf->vma; 41962306a36Sopenharmony_ci struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 42062306a36Sopenharmony_ci vma->vm_private_data; 42162306a36Sopenharmony_ci struct vmw_bo *vbo = to_vmw_bo(&bo->base); 42262306a36Sopenharmony_ci pgoff_t num_prefault; 42362306a36Sopenharmony_ci pgprot_t prot; 42462306a36Sopenharmony_ci vm_fault_t ret; 42562306a36Sopenharmony_ci 42662306a36Sopenharmony_ci ret = ttm_bo_vm_reserve(bo, vmf); 42762306a36Sopenharmony_ci if (ret) 42862306a36Sopenharmony_ci return ret; 42962306a36Sopenharmony_ci 43062306a36Sopenharmony_ci num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : 43162306a36Sopenharmony_ci TTM_BO_VM_NUM_PREFAULT; 43262306a36Sopenharmony_ci 43362306a36Sopenharmony_ci if (vbo->dirty) { 43462306a36Sopenharmony_ci pgoff_t allowed_prefault; 43562306a36Sopenharmony_ci unsigned long page_offset; 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci page_offset = vmf->pgoff - 43862306a36Sopenharmony_ci drm_vma_node_start(&bo->base.vma_node); 43962306a36Sopenharmony_ci if (page_offset >= PFN_UP(bo->resource->size) || 44062306a36Sopenharmony_ci vmw_resources_clean(vbo, page_offset, 44162306a36Sopenharmony_ci page_offset + PAGE_SIZE, 44262306a36Sopenharmony_ci &allowed_prefault)) { 44362306a36Sopenharmony_ci ret = VM_FAULT_SIGBUS; 44462306a36Sopenharmony_ci goto out_unlock; 44562306a36Sopenharmony_ci } 44662306a36Sopenharmony_ci 44762306a36Sopenharmony_ci num_prefault = min(num_prefault, allowed_prefault); 44862306a36Sopenharmony_ci } 44962306a36Sopenharmony_ci 45062306a36Sopenharmony_ci /* 45162306a36Sopenharmony_ci * If we don't track dirty using the MKWRITE method, make sure 45262306a36Sopenharmony_ci * sure the page protection is write-enabled so we don't get 45362306a36Sopenharmony_ci * a lot of unnecessary write faults. 45462306a36Sopenharmony_ci */ 45562306a36Sopenharmony_ci if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) 45662306a36Sopenharmony_ci prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); 45762306a36Sopenharmony_ci else 45862306a36Sopenharmony_ci prot = vm_get_page_prot(vma->vm_flags); 45962306a36Sopenharmony_ci 46062306a36Sopenharmony_ci ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); 46162306a36Sopenharmony_ci if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) 46262306a36Sopenharmony_ci return ret; 46362306a36Sopenharmony_ci 46462306a36Sopenharmony_ciout_unlock: 46562306a36Sopenharmony_ci dma_resv_unlock(bo->base.resv); 46662306a36Sopenharmony_ci 46762306a36Sopenharmony_ci return ret; 46862306a36Sopenharmony_ci} 469