1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Huge page-table-entry support for IO memory.
4 *
5 * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
6 */
7#include "vmwgfx_drv.h"
8#include <drm/ttm/ttm_module.h>
9#include <drm/ttm/ttm_bo_driver.h>
10#include <drm/ttm/ttm_placement.h>
11
12/**
13 * struct vmw_thp_manager - Range manager implementing huge page alignment
14 *
15 * @mm: The underlying range manager. Protected by @lock.
16 * @lock: Manager lock.
17 */
18struct vmw_thp_manager {
19	struct ttm_resource_manager manager;
20	struct drm_mm mm;
21	spinlock_t lock;
22};
23
24static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
25{
26	return container_of(man, struct vmw_thp_manager, manager);
27}
28
29static const struct ttm_resource_manager_func vmw_thp_func;
30
31static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
32				  unsigned long align_pages,
33				  const struct ttm_place *place,
34				  struct ttm_resource *mem,
35				  unsigned long lpfn,
36				  enum drm_mm_insert_mode mode)
37{
38	if (align_pages >= mem->page_alignment &&
39	    (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
40		return drm_mm_insert_node_in_range(mm, node,
41						   mem->num_pages,
42						   align_pages, 0,
43						   place->fpfn, lpfn, mode);
44	}
45
46	return -ENOSPC;
47}
48
49static int vmw_thp_get_node(struct ttm_resource_manager *man,
50			    struct ttm_buffer_object *bo,
51			    const struct ttm_place *place,
52			    struct ttm_resource *mem)
53{
54	struct vmw_thp_manager *rman = to_thp_manager(man);
55	struct drm_mm *mm = &rman->mm;
56	struct drm_mm_node *node;
57	unsigned long align_pages;
58	unsigned long lpfn;
59	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
60	int ret;
61
62	node = kzalloc(sizeof(*node), GFP_KERNEL);
63	if (!node)
64		return -ENOMEM;
65
66	lpfn = place->lpfn;
67	if (!lpfn)
68		lpfn = man->size;
69
70	mode = DRM_MM_INSERT_BEST;
71	if (place->flags & TTM_PL_FLAG_TOPDOWN)
72		mode = DRM_MM_INSERT_HIGH;
73
74	spin_lock(&rman->lock);
75	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
76		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
77		if (mem->num_pages >= align_pages) {
78			ret = vmw_thp_insert_aligned(mm, node, align_pages,
79						     place, mem, lpfn, mode);
80			if (!ret)
81				goto found_unlock;
82		}
83	}
84
85	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
86	if (mem->num_pages >= align_pages) {
87		ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
88					     lpfn, mode);
89		if (!ret)
90			goto found_unlock;
91	}
92
93	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
94					  mem->page_alignment, 0,
95					  place->fpfn, lpfn, mode);
96found_unlock:
97	spin_unlock(&rman->lock);
98
99	if (unlikely(ret)) {
100		kfree(node);
101	} else {
102		mem->mm_node = node;
103		mem->start = node->start;
104	}
105
106	return ret;
107}
108
109
110
111static void vmw_thp_put_node(struct ttm_resource_manager *man,
112			     struct ttm_resource *mem)
113{
114	struct vmw_thp_manager *rman = to_thp_manager(man);
115
116	if (mem->mm_node) {
117		spin_lock(&rman->lock);
118		drm_mm_remove_node(mem->mm_node);
119		spin_unlock(&rman->lock);
120
121		kfree(mem->mm_node);
122		mem->mm_node = NULL;
123	}
124}
125
126int vmw_thp_init(struct vmw_private *dev_priv)
127{
128	struct vmw_thp_manager *rman;
129
130	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
131	if (!rman)
132		return -ENOMEM;
133
134	ttm_resource_manager_init(&rman->manager,
135				  dev_priv->vram_size >> PAGE_SHIFT);
136
137	rman->manager.func = &vmw_thp_func;
138	drm_mm_init(&rman->mm, 0, rman->manager.size);
139	spin_lock_init(&rman->lock);
140
141	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
142	ttm_resource_manager_set_used(&rman->manager, true);
143	return 0;
144}
145
146void vmw_thp_fini(struct vmw_private *dev_priv)
147{
148	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
149	struct vmw_thp_manager *rman = to_thp_manager(man);
150	struct drm_mm *mm = &rman->mm;
151	int ret;
152
153	ttm_resource_manager_set_used(man, false);
154
155	ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
156	if (ret)
157		return;
158	spin_lock(&rman->lock);
159	drm_mm_clean(mm);
160	drm_mm_takedown(mm);
161	spin_unlock(&rman->lock);
162	ttm_resource_manager_cleanup(man);
163	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
164	kfree(rman);
165}
166
167static void vmw_thp_debug(struct ttm_resource_manager *man,
168			  struct drm_printer *printer)
169{
170	struct vmw_thp_manager *rman = to_thp_manager(man);
171
172	spin_lock(&rman->lock);
173	drm_mm_print(&rman->mm, printer);
174	spin_unlock(&rman->lock);
175}
176
177static const struct ttm_resource_manager_func vmw_thp_func = {
178	.alloc = vmw_thp_get_node,
179	.free = vmw_thp_put_node,
180	.debug = vmw_thp_debug
181};
182