18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR MIT
28c2ecf20Sopenharmony_ci/**************************************************************************
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
58c2ecf20Sopenharmony_ci *
68c2ecf20Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
78c2ecf20Sopenharmony_ci * copy of this software and associated documentation files (the
88c2ecf20Sopenharmony_ci * "Software"), to deal in the Software without restriction, including
98c2ecf20Sopenharmony_ci * without limitation the rights to use, copy, modify, merge, publish,
108c2ecf20Sopenharmony_ci * distribute, sub license, and/or sell copies of the Software, and to
118c2ecf20Sopenharmony_ci * permit persons to whom the Software is furnished to do so, subject to
128c2ecf20Sopenharmony_ci * the following conditions:
138c2ecf20Sopenharmony_ci *
148c2ecf20Sopenharmony_ci * The above copyright notice and this permission notice (including the
158c2ecf20Sopenharmony_ci * next paragraph) shall be included in all copies or substantial portions
168c2ecf20Sopenharmony_ci * of the Software.
178c2ecf20Sopenharmony_ci *
188c2ecf20Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
198c2ecf20Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
208c2ecf20Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
218c2ecf20Sopenharmony_ci * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
228c2ecf20Sopenharmony_ci * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
238c2ecf20Sopenharmony_ci * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
248c2ecf20Sopenharmony_ci * USE OR OTHER DEALINGS IN THE SOFTWARE.
258c2ecf20Sopenharmony_ci *
268c2ecf20Sopenharmony_ci **************************************************************************/
278c2ecf20Sopenharmony_ci#include "vmwgfx_drv.h"
288c2ecf20Sopenharmony_ci
298c2ecf20Sopenharmony_ci/*
308c2ecf20Sopenharmony_ci * Different methods for tracking dirty:
318c2ecf20Sopenharmony_ci * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
328c2ecf20Sopenharmony_ci * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
338c2ecf20Sopenharmony_ci * accesses in the VM mkwrite() callback
348c2ecf20Sopenharmony_ci */
358c2ecf20Sopenharmony_cienum vmw_bo_dirty_method {
368c2ecf20Sopenharmony_ci	VMW_BO_DIRTY_PAGETABLE,
378c2ecf20Sopenharmony_ci	VMW_BO_DIRTY_MKWRITE,
388c2ecf20Sopenharmony_ci};
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci/*
418c2ecf20Sopenharmony_ci * No dirtied pages at scan trigger a transition to the _MKWRITE method,
428c2ecf20Sopenharmony_ci * similarly a certain percentage of dirty pages trigger a transition to
438c2ecf20Sopenharmony_ci * the _PAGETABLE method. How many triggers should we wait for before
448c2ecf20Sopenharmony_ci * changing method?
458c2ecf20Sopenharmony_ci */
468c2ecf20Sopenharmony_ci#define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci/* Percentage to trigger a transition to the _PAGETABLE method */
498c2ecf20Sopenharmony_ci#define VMW_DIRTY_PERCENTAGE 10
508c2ecf20Sopenharmony_ci
518c2ecf20Sopenharmony_ci/**
528c2ecf20Sopenharmony_ci * struct vmw_bo_dirty - Dirty information for buffer objects
538c2ecf20Sopenharmony_ci * @start: First currently dirty bit
548c2ecf20Sopenharmony_ci * @end: Last currently dirty bit + 1
558c2ecf20Sopenharmony_ci * @method: The currently used dirty method
568c2ecf20Sopenharmony_ci * @change_count: Number of consecutive method change triggers
578c2ecf20Sopenharmony_ci * @ref_count: Reference count for this structure
588c2ecf20Sopenharmony_ci * @bitmap_size: The size of the bitmap in bits. Typically equal to the
598c2ecf20Sopenharmony_ci * nuber of pages in the bo.
608c2ecf20Sopenharmony_ci * @size: The accounting size for this struct.
618c2ecf20Sopenharmony_ci * @bitmap: A bitmap where each bit represents a page. A set bit means a
628c2ecf20Sopenharmony_ci * dirty page.
638c2ecf20Sopenharmony_ci */
648c2ecf20Sopenharmony_cistruct vmw_bo_dirty {
658c2ecf20Sopenharmony_ci	unsigned long start;
668c2ecf20Sopenharmony_ci	unsigned long end;
678c2ecf20Sopenharmony_ci	enum vmw_bo_dirty_method method;
688c2ecf20Sopenharmony_ci	unsigned int change_count;
698c2ecf20Sopenharmony_ci	unsigned int ref_count;
708c2ecf20Sopenharmony_ci	unsigned long bitmap_size;
718c2ecf20Sopenharmony_ci	size_t size;
728c2ecf20Sopenharmony_ci	unsigned long bitmap[];
738c2ecf20Sopenharmony_ci};
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_ci/**
768c2ecf20Sopenharmony_ci * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
778c2ecf20Sopenharmony_ci * @vbo: The buffer object to scan
788c2ecf20Sopenharmony_ci *
798c2ecf20Sopenharmony_ci * Scans the pagetable for dirty bits. Clear those bits and modify the
808c2ecf20Sopenharmony_ci * dirty structure with the results. This function may change the
818c2ecf20Sopenharmony_ci * dirty-tracking method.
828c2ecf20Sopenharmony_ci */
838c2ecf20Sopenharmony_cistatic void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
848c2ecf20Sopenharmony_ci{
858c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
868c2ecf20Sopenharmony_ci	pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
878c2ecf20Sopenharmony_ci	struct address_space *mapping = vbo->base.bdev->dev_mapping;
888c2ecf20Sopenharmony_ci	pgoff_t num_marked;
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci	num_marked = clean_record_shared_mapping_range
918c2ecf20Sopenharmony_ci		(mapping,
928c2ecf20Sopenharmony_ci		 offset, dirty->bitmap_size,
938c2ecf20Sopenharmony_ci		 offset, &dirty->bitmap[0],
948c2ecf20Sopenharmony_ci		 &dirty->start, &dirty->end);
958c2ecf20Sopenharmony_ci	if (num_marked == 0)
968c2ecf20Sopenharmony_ci		dirty->change_count++;
978c2ecf20Sopenharmony_ci	else
988c2ecf20Sopenharmony_ci		dirty->change_count = 0;
998c2ecf20Sopenharmony_ci
1008c2ecf20Sopenharmony_ci	if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
1018c2ecf20Sopenharmony_ci		dirty->change_count = 0;
1028c2ecf20Sopenharmony_ci		dirty->method = VMW_BO_DIRTY_MKWRITE;
1038c2ecf20Sopenharmony_ci		wp_shared_mapping_range(mapping,
1048c2ecf20Sopenharmony_ci					offset, dirty->bitmap_size);
1058c2ecf20Sopenharmony_ci		clean_record_shared_mapping_range(mapping,
1068c2ecf20Sopenharmony_ci						  offset, dirty->bitmap_size,
1078c2ecf20Sopenharmony_ci						  offset, &dirty->bitmap[0],
1088c2ecf20Sopenharmony_ci						  &dirty->start, &dirty->end);
1098c2ecf20Sopenharmony_ci	}
1108c2ecf20Sopenharmony_ci}
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci/**
1138c2ecf20Sopenharmony_ci * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
1148c2ecf20Sopenharmony_ci * @vbo: The buffer object to scan
1158c2ecf20Sopenharmony_ci *
1168c2ecf20Sopenharmony_ci * Write-protect pages written to so that consecutive write accesses will
1178c2ecf20Sopenharmony_ci * trigger a call to mkwrite.
1188c2ecf20Sopenharmony_ci *
1198c2ecf20Sopenharmony_ci * This function may change the dirty-tracking method.
1208c2ecf20Sopenharmony_ci */
1218c2ecf20Sopenharmony_cistatic void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
1228c2ecf20Sopenharmony_ci{
1238c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
1248c2ecf20Sopenharmony_ci	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
1258c2ecf20Sopenharmony_ci	struct address_space *mapping = vbo->base.bdev->dev_mapping;
1268c2ecf20Sopenharmony_ci	pgoff_t num_marked;
1278c2ecf20Sopenharmony_ci
1288c2ecf20Sopenharmony_ci	if (dirty->end <= dirty->start)
1298c2ecf20Sopenharmony_ci		return;
1308c2ecf20Sopenharmony_ci
1318c2ecf20Sopenharmony_ci	num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
1328c2ecf20Sopenharmony_ci					dirty->start + offset,
1338c2ecf20Sopenharmony_ci					dirty->end - dirty->start);
1348c2ecf20Sopenharmony_ci
1358c2ecf20Sopenharmony_ci	if (100UL * num_marked / dirty->bitmap_size >
1368c2ecf20Sopenharmony_ci	    VMW_DIRTY_PERCENTAGE) {
1378c2ecf20Sopenharmony_ci		dirty->change_count++;
1388c2ecf20Sopenharmony_ci	} else {
1398c2ecf20Sopenharmony_ci		dirty->change_count = 0;
1408c2ecf20Sopenharmony_ci	}
1418c2ecf20Sopenharmony_ci
1428c2ecf20Sopenharmony_ci	if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
1438c2ecf20Sopenharmony_ci		pgoff_t start = 0;
1448c2ecf20Sopenharmony_ci		pgoff_t end = dirty->bitmap_size;
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ci		dirty->method = VMW_BO_DIRTY_PAGETABLE;
1478c2ecf20Sopenharmony_ci		clean_record_shared_mapping_range(mapping, offset, end, offset,
1488c2ecf20Sopenharmony_ci						  &dirty->bitmap[0],
1498c2ecf20Sopenharmony_ci						  &start, &end);
1508c2ecf20Sopenharmony_ci		bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
1518c2ecf20Sopenharmony_ci		if (dirty->start < dirty->end)
1528c2ecf20Sopenharmony_ci			bitmap_set(&dirty->bitmap[0], dirty->start,
1538c2ecf20Sopenharmony_ci				   dirty->end - dirty->start);
1548c2ecf20Sopenharmony_ci		dirty->change_count = 0;
1558c2ecf20Sopenharmony_ci	}
1568c2ecf20Sopenharmony_ci}
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_ci/**
1598c2ecf20Sopenharmony_ci * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
1608c2ecf20Sopenharmony_ci * tracking structure
1618c2ecf20Sopenharmony_ci * @vbo: The buffer object to scan
1628c2ecf20Sopenharmony_ci *
1638c2ecf20Sopenharmony_ci * This function may change the dirty tracking method.
1648c2ecf20Sopenharmony_ci */
1658c2ecf20Sopenharmony_civoid vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
1668c2ecf20Sopenharmony_ci{
1678c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
1688c2ecf20Sopenharmony_ci
1698c2ecf20Sopenharmony_ci	if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
1708c2ecf20Sopenharmony_ci		vmw_bo_dirty_scan_pagetable(vbo);
1718c2ecf20Sopenharmony_ci	else
1728c2ecf20Sopenharmony_ci		vmw_bo_dirty_scan_mkwrite(vbo);
1738c2ecf20Sopenharmony_ci}
1748c2ecf20Sopenharmony_ci
1758c2ecf20Sopenharmony_ci/**
1768c2ecf20Sopenharmony_ci * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
1778c2ecf20Sopenharmony_ci * an unmap_mapping_range operation.
1788c2ecf20Sopenharmony_ci * @vbo: The buffer object,
1798c2ecf20Sopenharmony_ci * @start: First page of the range within the buffer object.
1808c2ecf20Sopenharmony_ci * @end: Last page of the range within the buffer object + 1.
1818c2ecf20Sopenharmony_ci *
1828c2ecf20Sopenharmony_ci * If we're using the _PAGETABLE scan method, we may leak dirty pages
1838c2ecf20Sopenharmony_ci * when calling unmap_mapping_range(). This function makes sure we pick
1848c2ecf20Sopenharmony_ci * up all dirty pages.
1858c2ecf20Sopenharmony_ci */
1868c2ecf20Sopenharmony_cistatic void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
1878c2ecf20Sopenharmony_ci				   pgoff_t start, pgoff_t end)
1888c2ecf20Sopenharmony_ci{
1898c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
1908c2ecf20Sopenharmony_ci	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
1918c2ecf20Sopenharmony_ci	struct address_space *mapping = vbo->base.bdev->dev_mapping;
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci	if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
1948c2ecf20Sopenharmony_ci		return;
1958c2ecf20Sopenharmony_ci
1968c2ecf20Sopenharmony_ci	wp_shared_mapping_range(mapping, start + offset, end - start);
1978c2ecf20Sopenharmony_ci	clean_record_shared_mapping_range(mapping, start + offset,
1988c2ecf20Sopenharmony_ci					  end - start, offset,
1998c2ecf20Sopenharmony_ci					  &dirty->bitmap[0], &dirty->start,
2008c2ecf20Sopenharmony_ci					  &dirty->end);
2018c2ecf20Sopenharmony_ci}
2028c2ecf20Sopenharmony_ci
2038c2ecf20Sopenharmony_ci/**
2048c2ecf20Sopenharmony_ci * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
2058c2ecf20Sopenharmony_ci * @vbo: The buffer object,
2068c2ecf20Sopenharmony_ci * @start: First page of the range within the buffer object.
2078c2ecf20Sopenharmony_ci * @end: Last page of the range within the buffer object + 1.
2088c2ecf20Sopenharmony_ci *
2098c2ecf20Sopenharmony_ci * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
2108c2ecf20Sopenharmony_ci */
2118c2ecf20Sopenharmony_civoid vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
2128c2ecf20Sopenharmony_ci			pgoff_t start, pgoff_t end)
2138c2ecf20Sopenharmony_ci{
2148c2ecf20Sopenharmony_ci	unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
2158c2ecf20Sopenharmony_ci	struct address_space *mapping = vbo->base.bdev->dev_mapping;
2168c2ecf20Sopenharmony_ci
2178c2ecf20Sopenharmony_ci	vmw_bo_dirty_pre_unmap(vbo, start, end);
2188c2ecf20Sopenharmony_ci	unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
2198c2ecf20Sopenharmony_ci				   (loff_t) (end - start) << PAGE_SHIFT);
2208c2ecf20Sopenharmony_ci}
2218c2ecf20Sopenharmony_ci
2228c2ecf20Sopenharmony_ci/**
2238c2ecf20Sopenharmony_ci * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
2248c2ecf20Sopenharmony_ci * @vbo: The buffer object
2258c2ecf20Sopenharmony_ci *
2268c2ecf20Sopenharmony_ci * This function registers a dirty-tracking user to a buffer object.
2278c2ecf20Sopenharmony_ci * A user can be for example a resource or a vma in a special user-space
2288c2ecf20Sopenharmony_ci * mapping.
2298c2ecf20Sopenharmony_ci *
2308c2ecf20Sopenharmony_ci * Return: Zero on success, -ENOMEM on memory allocation failure.
2318c2ecf20Sopenharmony_ci */
2328c2ecf20Sopenharmony_ciint vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
2338c2ecf20Sopenharmony_ci{
2348c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
2358c2ecf20Sopenharmony_ci	pgoff_t num_pages = vbo->base.num_pages;
2368c2ecf20Sopenharmony_ci	size_t size, acc_size;
2378c2ecf20Sopenharmony_ci	int ret;
2388c2ecf20Sopenharmony_ci	static struct ttm_operation_ctx ctx = {
2398c2ecf20Sopenharmony_ci		.interruptible = false,
2408c2ecf20Sopenharmony_ci		.no_wait_gpu = false
2418c2ecf20Sopenharmony_ci	};
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	if (dirty) {
2448c2ecf20Sopenharmony_ci		dirty->ref_count++;
2458c2ecf20Sopenharmony_ci		return 0;
2468c2ecf20Sopenharmony_ci	}
2478c2ecf20Sopenharmony_ci
2488c2ecf20Sopenharmony_ci	size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
2498c2ecf20Sopenharmony_ci	acc_size = ttm_round_pot(size);
2508c2ecf20Sopenharmony_ci	ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
2518c2ecf20Sopenharmony_ci	if (ret) {
2528c2ecf20Sopenharmony_ci		VMW_DEBUG_USER("Out of graphics memory for buffer object "
2538c2ecf20Sopenharmony_ci			       "dirty tracker.\n");
2548c2ecf20Sopenharmony_ci		return ret;
2558c2ecf20Sopenharmony_ci	}
2568c2ecf20Sopenharmony_ci	dirty = kvzalloc(size, GFP_KERNEL);
2578c2ecf20Sopenharmony_ci	if (!dirty) {
2588c2ecf20Sopenharmony_ci		ret = -ENOMEM;
2598c2ecf20Sopenharmony_ci		goto out_no_dirty;
2608c2ecf20Sopenharmony_ci	}
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_ci	dirty->size = acc_size;
2638c2ecf20Sopenharmony_ci	dirty->bitmap_size = num_pages;
2648c2ecf20Sopenharmony_ci	dirty->start = dirty->bitmap_size;
2658c2ecf20Sopenharmony_ci	dirty->end = 0;
2668c2ecf20Sopenharmony_ci	dirty->ref_count = 1;
2678c2ecf20Sopenharmony_ci	if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
2688c2ecf20Sopenharmony_ci		dirty->method = VMW_BO_DIRTY_PAGETABLE;
2698c2ecf20Sopenharmony_ci	} else {
2708c2ecf20Sopenharmony_ci		struct address_space *mapping = vbo->base.bdev->dev_mapping;
2718c2ecf20Sopenharmony_ci		pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci		dirty->method = VMW_BO_DIRTY_MKWRITE;
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ci		/* Write-protect and then pick up already dirty bits */
2768c2ecf20Sopenharmony_ci		wp_shared_mapping_range(mapping, offset, num_pages);
2778c2ecf20Sopenharmony_ci		clean_record_shared_mapping_range(mapping, offset, num_pages,
2788c2ecf20Sopenharmony_ci						  offset,
2798c2ecf20Sopenharmony_ci						  &dirty->bitmap[0],
2808c2ecf20Sopenharmony_ci						  &dirty->start, &dirty->end);
2818c2ecf20Sopenharmony_ci	}
2828c2ecf20Sopenharmony_ci
2838c2ecf20Sopenharmony_ci	vbo->dirty = dirty;
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci	return 0;
2868c2ecf20Sopenharmony_ci
2878c2ecf20Sopenharmony_ciout_no_dirty:
2888c2ecf20Sopenharmony_ci	ttm_mem_global_free(&ttm_mem_glob, acc_size);
2898c2ecf20Sopenharmony_ci	return ret;
2908c2ecf20Sopenharmony_ci}
2918c2ecf20Sopenharmony_ci
2928c2ecf20Sopenharmony_ci/**
2938c2ecf20Sopenharmony_ci * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
2948c2ecf20Sopenharmony_ci * @vbo: The buffer object
2958c2ecf20Sopenharmony_ci *
2968c2ecf20Sopenharmony_ci * This function releases a dirty-tracking user from a buffer object.
2978c2ecf20Sopenharmony_ci * If the reference count reaches zero, then the dirty-tracking object is
2988c2ecf20Sopenharmony_ci * freed and the pointer to it cleared.
2998c2ecf20Sopenharmony_ci *
3008c2ecf20Sopenharmony_ci * Return: Zero on success, -ENOMEM on memory allocation failure.
3018c2ecf20Sopenharmony_ci */
3028c2ecf20Sopenharmony_civoid vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
3038c2ecf20Sopenharmony_ci{
3048c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ci	if (dirty && --dirty->ref_count == 0) {
3078c2ecf20Sopenharmony_ci		size_t acc_size = dirty->size;
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci		kvfree(dirty);
3108c2ecf20Sopenharmony_ci		ttm_mem_global_free(&ttm_mem_glob, acc_size);
3118c2ecf20Sopenharmony_ci		vbo->dirty = NULL;
3128c2ecf20Sopenharmony_ci	}
3138c2ecf20Sopenharmony_ci}
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci/**
3168c2ecf20Sopenharmony_ci * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
3178c2ecf20Sopenharmony_ci * its backing mob.
3188c2ecf20Sopenharmony_ci * @res: The resource
3198c2ecf20Sopenharmony_ci *
3208c2ecf20Sopenharmony_ci * This function will pick up all dirty ranges affecting the resource from
3218c2ecf20Sopenharmony_ci * it's backup mob, and call vmw_resource_dirty_update() once for each
3228c2ecf20Sopenharmony_ci * range. The transferred ranges will be cleared from the backing mob's
3238c2ecf20Sopenharmony_ci * dirty tracking.
3248c2ecf20Sopenharmony_ci */
3258c2ecf20Sopenharmony_civoid vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
3268c2ecf20Sopenharmony_ci{
3278c2ecf20Sopenharmony_ci	struct vmw_buffer_object *vbo = res->backup;
3288c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
3298c2ecf20Sopenharmony_ci	pgoff_t start, cur, end;
3308c2ecf20Sopenharmony_ci	unsigned long res_start = res->backup_offset;
3318c2ecf20Sopenharmony_ci	unsigned long res_end = res->backup_offset + res->backup_size;
3328c2ecf20Sopenharmony_ci
3338c2ecf20Sopenharmony_ci	WARN_ON_ONCE(res_start & ~PAGE_MASK);
3348c2ecf20Sopenharmony_ci	res_start >>= PAGE_SHIFT;
3358c2ecf20Sopenharmony_ci	res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
3368c2ecf20Sopenharmony_ci
3378c2ecf20Sopenharmony_ci	if (res_start >= dirty->end || res_end <= dirty->start)
3388c2ecf20Sopenharmony_ci		return;
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci	cur = max(res_start, dirty->start);
3418c2ecf20Sopenharmony_ci	res_end = max(res_end, dirty->end);
3428c2ecf20Sopenharmony_ci	while (cur < res_end) {
3438c2ecf20Sopenharmony_ci		unsigned long num;
3448c2ecf20Sopenharmony_ci
3458c2ecf20Sopenharmony_ci		start = find_next_bit(&dirty->bitmap[0], res_end, cur);
3468c2ecf20Sopenharmony_ci		if (start >= res_end)
3478c2ecf20Sopenharmony_ci			break;
3488c2ecf20Sopenharmony_ci
3498c2ecf20Sopenharmony_ci		end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
3508c2ecf20Sopenharmony_ci		cur = end + 1;
3518c2ecf20Sopenharmony_ci		num = end - start;
3528c2ecf20Sopenharmony_ci		bitmap_clear(&dirty->bitmap[0], start, num);
3538c2ecf20Sopenharmony_ci		vmw_resource_dirty_update(res, start, end);
3548c2ecf20Sopenharmony_ci	}
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci	if (res_start <= dirty->start && res_end > dirty->start)
3578c2ecf20Sopenharmony_ci		dirty->start = res_end;
3588c2ecf20Sopenharmony_ci	if (res_start < dirty->end && res_end >= dirty->end)
3598c2ecf20Sopenharmony_ci		dirty->end = res_start;
3608c2ecf20Sopenharmony_ci}
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci/**
3638c2ecf20Sopenharmony_ci * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
3648c2ecf20Sopenharmony_ci * its backing mob.
3658c2ecf20Sopenharmony_ci * @res: The resource
3668c2ecf20Sopenharmony_ci *
3678c2ecf20Sopenharmony_ci * This function will clear all dirty ranges affecting the resource from
3688c2ecf20Sopenharmony_ci * it's backup mob's dirty tracking.
3698c2ecf20Sopenharmony_ci */
3708c2ecf20Sopenharmony_civoid vmw_bo_dirty_clear_res(struct vmw_resource *res)
3718c2ecf20Sopenharmony_ci{
3728c2ecf20Sopenharmony_ci	unsigned long res_start = res->backup_offset;
3738c2ecf20Sopenharmony_ci	unsigned long res_end = res->backup_offset + res->backup_size;
3748c2ecf20Sopenharmony_ci	struct vmw_buffer_object *vbo = res->backup;
3758c2ecf20Sopenharmony_ci	struct vmw_bo_dirty *dirty = vbo->dirty;
3768c2ecf20Sopenharmony_ci
3778c2ecf20Sopenharmony_ci	res_start >>= PAGE_SHIFT;
3788c2ecf20Sopenharmony_ci	res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
3798c2ecf20Sopenharmony_ci
3808c2ecf20Sopenharmony_ci	if (res_start >= dirty->end || res_end <= dirty->start)
3818c2ecf20Sopenharmony_ci		return;
3828c2ecf20Sopenharmony_ci
3838c2ecf20Sopenharmony_ci	res_start = max(res_start, dirty->start);
3848c2ecf20Sopenharmony_ci	res_end = min(res_end, dirty->end);
3858c2ecf20Sopenharmony_ci	bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_ci	if (res_start <= dirty->start && res_end > dirty->start)
3888c2ecf20Sopenharmony_ci		dirty->start = res_end;
3898c2ecf20Sopenharmony_ci	if (res_start < dirty->end && res_end >= dirty->end)
3908c2ecf20Sopenharmony_ci		dirty->end = res_start;
3918c2ecf20Sopenharmony_ci}
3928c2ecf20Sopenharmony_ci
3938c2ecf20Sopenharmony_civm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
3948c2ecf20Sopenharmony_ci{
3958c2ecf20Sopenharmony_ci	struct vm_area_struct *vma = vmf->vma;
3968c2ecf20Sopenharmony_ci	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
3978c2ecf20Sopenharmony_ci	    vma->vm_private_data;
3988c2ecf20Sopenharmony_ci	vm_fault_t ret;
3998c2ecf20Sopenharmony_ci	unsigned long page_offset;
4008c2ecf20Sopenharmony_ci	unsigned int save_flags;
4018c2ecf20Sopenharmony_ci	struct vmw_buffer_object *vbo =
4028c2ecf20Sopenharmony_ci		container_of(bo, typeof(*vbo), base);
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	/*
4058c2ecf20Sopenharmony_ci	 * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
4068c2ecf20Sopenharmony_ci	 * So make sure the TTM helpers are aware.
4078c2ecf20Sopenharmony_ci	 */
4088c2ecf20Sopenharmony_ci	save_flags = vmf->flags;
4098c2ecf20Sopenharmony_ci	vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
4108c2ecf20Sopenharmony_ci	ret = ttm_bo_vm_reserve(bo, vmf);
4118c2ecf20Sopenharmony_ci	vmf->flags = save_flags;
4128c2ecf20Sopenharmony_ci	if (ret)
4138c2ecf20Sopenharmony_ci		return ret;
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ci	page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
4168c2ecf20Sopenharmony_ci	if (unlikely(page_offset >= bo->num_pages)) {
4178c2ecf20Sopenharmony_ci		ret = VM_FAULT_SIGBUS;
4188c2ecf20Sopenharmony_ci		goto out_unlock;
4198c2ecf20Sopenharmony_ci	}
4208c2ecf20Sopenharmony_ci
4218c2ecf20Sopenharmony_ci	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
4228c2ecf20Sopenharmony_ci	    !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
4238c2ecf20Sopenharmony_ci		struct vmw_bo_dirty *dirty = vbo->dirty;
4248c2ecf20Sopenharmony_ci
4258c2ecf20Sopenharmony_ci		__set_bit(page_offset, &dirty->bitmap[0]);
4268c2ecf20Sopenharmony_ci		dirty->start = min(dirty->start, page_offset);
4278c2ecf20Sopenharmony_ci		dirty->end = max(dirty->end, page_offset + 1);
4288c2ecf20Sopenharmony_ci	}
4298c2ecf20Sopenharmony_ci
4308c2ecf20Sopenharmony_ciout_unlock:
4318c2ecf20Sopenharmony_ci	dma_resv_unlock(bo->base.resv);
4328c2ecf20Sopenharmony_ci	return ret;
4338c2ecf20Sopenharmony_ci}
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_civm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
4368c2ecf20Sopenharmony_ci{
4378c2ecf20Sopenharmony_ci	struct vm_area_struct *vma = vmf->vma;
4388c2ecf20Sopenharmony_ci	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
4398c2ecf20Sopenharmony_ci	    vma->vm_private_data;
4408c2ecf20Sopenharmony_ci	struct vmw_buffer_object *vbo =
4418c2ecf20Sopenharmony_ci		container_of(bo, struct vmw_buffer_object, base);
4428c2ecf20Sopenharmony_ci	pgoff_t num_prefault;
4438c2ecf20Sopenharmony_ci	pgprot_t prot;
4448c2ecf20Sopenharmony_ci	vm_fault_t ret;
4458c2ecf20Sopenharmony_ci
4468c2ecf20Sopenharmony_ci	ret = ttm_bo_vm_reserve(bo, vmf);
4478c2ecf20Sopenharmony_ci	if (ret)
4488c2ecf20Sopenharmony_ci		return ret;
4498c2ecf20Sopenharmony_ci
4508c2ecf20Sopenharmony_ci	num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
4518c2ecf20Sopenharmony_ci		TTM_BO_VM_NUM_PREFAULT;
4528c2ecf20Sopenharmony_ci
4538c2ecf20Sopenharmony_ci	if (vbo->dirty) {
4548c2ecf20Sopenharmony_ci		pgoff_t allowed_prefault;
4558c2ecf20Sopenharmony_ci		unsigned long page_offset;
4568c2ecf20Sopenharmony_ci
4578c2ecf20Sopenharmony_ci		page_offset = vmf->pgoff -
4588c2ecf20Sopenharmony_ci			drm_vma_node_start(&bo->base.vma_node);
4598c2ecf20Sopenharmony_ci		if (page_offset >= bo->num_pages ||
4608c2ecf20Sopenharmony_ci		    vmw_resources_clean(vbo, page_offset,
4618c2ecf20Sopenharmony_ci					page_offset + PAGE_SIZE,
4628c2ecf20Sopenharmony_ci					&allowed_prefault)) {
4638c2ecf20Sopenharmony_ci			ret = VM_FAULT_SIGBUS;
4648c2ecf20Sopenharmony_ci			goto out_unlock;
4658c2ecf20Sopenharmony_ci		}
4668c2ecf20Sopenharmony_ci
4678c2ecf20Sopenharmony_ci		num_prefault = min(num_prefault, allowed_prefault);
4688c2ecf20Sopenharmony_ci	}
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	/*
4718c2ecf20Sopenharmony_ci	 * If we don't track dirty using the MKWRITE method, make sure
4728c2ecf20Sopenharmony_ci	 * sure the page protection is write-enabled so we don't get
4738c2ecf20Sopenharmony_ci	 * a lot of unnecessary write faults.
4748c2ecf20Sopenharmony_ci	 */
4758c2ecf20Sopenharmony_ci	if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
4768c2ecf20Sopenharmony_ci		prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
4778c2ecf20Sopenharmony_ci	else
4788c2ecf20Sopenharmony_ci		prot = vm_get_page_prot(vma->vm_flags);
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci	ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1);
4818c2ecf20Sopenharmony_ci	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
4828c2ecf20Sopenharmony_ci		return ret;
4838c2ecf20Sopenharmony_ci
4848c2ecf20Sopenharmony_ciout_unlock:
4858c2ecf20Sopenharmony_ci	dma_resv_unlock(bo->base.resv);
4868c2ecf20Sopenharmony_ci
4878c2ecf20Sopenharmony_ci	return ret;
4888c2ecf20Sopenharmony_ci}
4898c2ecf20Sopenharmony_ci
4908c2ecf20Sopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4918c2ecf20Sopenharmony_civm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
4928c2ecf20Sopenharmony_ci				enum page_entry_size pe_size)
4938c2ecf20Sopenharmony_ci{
4948c2ecf20Sopenharmony_ci	struct vm_area_struct *vma = vmf->vma;
4958c2ecf20Sopenharmony_ci	struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
4968c2ecf20Sopenharmony_ci	    vma->vm_private_data;
4978c2ecf20Sopenharmony_ci	struct vmw_buffer_object *vbo =
4988c2ecf20Sopenharmony_ci		container_of(bo, struct vmw_buffer_object, base);
4998c2ecf20Sopenharmony_ci	pgprot_t prot;
5008c2ecf20Sopenharmony_ci	vm_fault_t ret;
5018c2ecf20Sopenharmony_ci	pgoff_t fault_page_size;
5028c2ecf20Sopenharmony_ci	bool write = vmf->flags & FAULT_FLAG_WRITE;
5038c2ecf20Sopenharmony_ci	bool is_cow_mapping =
5048c2ecf20Sopenharmony_ci		(vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
5058c2ecf20Sopenharmony_ci
5068c2ecf20Sopenharmony_ci	switch (pe_size) {
5078c2ecf20Sopenharmony_ci	case PE_SIZE_PMD:
5088c2ecf20Sopenharmony_ci		fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
5098c2ecf20Sopenharmony_ci		break;
5108c2ecf20Sopenharmony_ci#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
5118c2ecf20Sopenharmony_ci	case PE_SIZE_PUD:
5128c2ecf20Sopenharmony_ci		fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
5138c2ecf20Sopenharmony_ci		break;
5148c2ecf20Sopenharmony_ci#endif
5158c2ecf20Sopenharmony_ci	default:
5168c2ecf20Sopenharmony_ci		WARN_ON_ONCE(1);
5178c2ecf20Sopenharmony_ci		return VM_FAULT_FALLBACK;
5188c2ecf20Sopenharmony_ci	}
5198c2ecf20Sopenharmony_ci
5208c2ecf20Sopenharmony_ci	/* Always do write dirty-tracking and COW on PTE level. */
5218c2ecf20Sopenharmony_ci	if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
5228c2ecf20Sopenharmony_ci		return VM_FAULT_FALLBACK;
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci	ret = ttm_bo_vm_reserve(bo, vmf);
5258c2ecf20Sopenharmony_ci	if (ret)
5268c2ecf20Sopenharmony_ci		return ret;
5278c2ecf20Sopenharmony_ci
5288c2ecf20Sopenharmony_ci	if (vbo->dirty) {
5298c2ecf20Sopenharmony_ci		pgoff_t allowed_prefault;
5308c2ecf20Sopenharmony_ci		unsigned long page_offset;
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_ci		page_offset = vmf->pgoff -
5338c2ecf20Sopenharmony_ci			drm_vma_node_start(&bo->base.vma_node);
5348c2ecf20Sopenharmony_ci		if (page_offset >= bo->num_pages ||
5358c2ecf20Sopenharmony_ci		    vmw_resources_clean(vbo, page_offset,
5368c2ecf20Sopenharmony_ci					page_offset + PAGE_SIZE,
5378c2ecf20Sopenharmony_ci					&allowed_prefault)) {
5388c2ecf20Sopenharmony_ci			ret = VM_FAULT_SIGBUS;
5398c2ecf20Sopenharmony_ci			goto out_unlock;
5408c2ecf20Sopenharmony_ci		}
5418c2ecf20Sopenharmony_ci
5428c2ecf20Sopenharmony_ci		/*
5438c2ecf20Sopenharmony_ci		 * Write protect, so we get a new fault on write, and can
5448c2ecf20Sopenharmony_ci		 * split.
5458c2ecf20Sopenharmony_ci		 */
5468c2ecf20Sopenharmony_ci		prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
5478c2ecf20Sopenharmony_ci	} else {
5488c2ecf20Sopenharmony_ci		prot = vm_get_page_prot(vma->vm_flags);
5498c2ecf20Sopenharmony_ci	}
5508c2ecf20Sopenharmony_ci
5518c2ecf20Sopenharmony_ci	ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
5528c2ecf20Sopenharmony_ci	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
5538c2ecf20Sopenharmony_ci		return ret;
5548c2ecf20Sopenharmony_ci
5558c2ecf20Sopenharmony_ciout_unlock:
5568c2ecf20Sopenharmony_ci	dma_resv_unlock(bo->base.resv);
5578c2ecf20Sopenharmony_ci
5588c2ecf20Sopenharmony_ci	return ret;
5598c2ecf20Sopenharmony_ci}
5608c2ecf20Sopenharmony_ci#endif
561