1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25#include "nouveau_drv.h"
26#include "nouveau_gem.h"
27#include "nouveau_mem.h"
28#include "nouveau_ttm.h"
29
30#include <drm/drm_legacy.h>
31
32#include <core/tegra.h>
33
34static void
35nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
36{
37	nouveau_mem_del(reg);
38}
39
40static int
41nouveau_vram_manager_new(struct ttm_resource_manager *man,
42			 struct ttm_buffer_object *bo,
43			 const struct ttm_place *place,
44			 struct ttm_resource *reg)
45{
46	struct nouveau_bo *nvbo = nouveau_bo(bo);
47	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
48	int ret;
49
50	if (drm->client.device.info.ram_size == 0)
51		return -ENOMEM;
52
53	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
54	if (ret)
55		return ret;
56
57	ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
58	if (ret) {
59		nouveau_mem_del(reg);
60		return ret;
61	}
62
63	return 0;
64}
65
66const struct ttm_resource_manager_func nouveau_vram_manager = {
67	.alloc = nouveau_vram_manager_new,
68	.free = nouveau_manager_del,
69};
70
71static int
72nouveau_gart_manager_new(struct ttm_resource_manager *man,
73			 struct ttm_buffer_object *bo,
74			 const struct ttm_place *place,
75			 struct ttm_resource *reg)
76{
77	struct nouveau_bo *nvbo = nouveau_bo(bo);
78	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
79	int ret;
80
81	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
82	if (ret)
83		return ret;
84
85	reg->start = 0;
86	return 0;
87}
88
89const struct ttm_resource_manager_func nouveau_gart_manager = {
90	.alloc = nouveau_gart_manager_new,
91	.free = nouveau_manager_del,
92};
93
94static int
95nv04_gart_manager_new(struct ttm_resource_manager *man,
96		      struct ttm_buffer_object *bo,
97		      const struct ttm_place *place,
98		      struct ttm_resource *reg)
99{
100	struct nouveau_bo *nvbo = nouveau_bo(bo);
101	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
102	struct nouveau_mem *mem;
103	int ret;
104
105	ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
106	mem = nouveau_mem(reg);
107	if (ret)
108		return ret;
109
110	ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
111			   reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
112	if (ret) {
113		nouveau_mem_del(reg);
114		return ret;
115	}
116
117	reg->start = mem->vma[0].addr >> PAGE_SHIFT;
118	return 0;
119}
120
121const struct ttm_resource_manager_func nv04_gart_manager = {
122	.alloc = nv04_gart_manager_new,
123	.free = nouveau_manager_del,
124};
125
126static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
127{
128	struct vm_area_struct *vma = vmf->vma;
129	struct ttm_buffer_object *bo = vma->vm_private_data;
130	pgprot_t prot;
131	vm_fault_t ret;
132
133	ret = ttm_bo_vm_reserve(bo, vmf);
134	if (ret)
135		return ret;
136
137	nouveau_bo_del_io_reserve_lru(bo);
138
139	prot = vm_get_page_prot(vma->vm_flags);
140	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
141	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
142		return ret;
143
144	nouveau_bo_add_io_reserve_lru(bo);
145
146	dma_resv_unlock(bo->base.resv);
147
148	return ret;
149}
150
151static struct vm_operations_struct nouveau_ttm_vm_ops = {
152	.fault = nouveau_ttm_fault,
153	.open = ttm_bo_vm_open,
154	.close = ttm_bo_vm_close,
155	.access = ttm_bo_vm_access
156};
157
158int
159nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
160{
161	struct drm_file *file_priv = filp->private_data;
162	struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
163	int ret;
164
165	ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
166	if (ret)
167		return ret;
168
169	vma->vm_ops = &nouveau_ttm_vm_ops;
170	return 0;
171}
172
173static int
174nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
175{
176	struct nvif_mmu *mmu = &drm->client.mmu;
177	int typei;
178
179	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
180					    kind | NVIF_MEM_COHERENT);
181	if (typei < 0)
182		return -ENOSYS;
183
184	drm->ttm.type_host[!!kind] = typei;
185
186	typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
187	if (typei < 0)
188		return -ENOSYS;
189
190	drm->ttm.type_ncoh[!!kind] = typei;
191	return 0;
192}
193
194static int
195nouveau_ttm_init_vram(struct nouveau_drm *drm)
196{
197	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
198		struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
199
200		if (!man)
201			return -ENOMEM;
202
203		man->func = &nouveau_vram_manager;
204
205		ttm_resource_manager_init(man,
206					  drm->gem.vram_available >> PAGE_SHIFT);
207		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
208		ttm_resource_manager_set_used(man, true);
209		return 0;
210	} else {
211		return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
212					  drm->gem.vram_available >> PAGE_SHIFT);
213	}
214}
215
216static void
217nouveau_ttm_fini_vram(struct nouveau_drm *drm)
218{
219	struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
220
221	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
222		ttm_resource_manager_set_used(man, false);
223		ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
224		ttm_resource_manager_cleanup(man);
225		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
226		kfree(man);
227	} else
228		ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
229}
230
231static int
232nouveau_ttm_init_gtt(struct nouveau_drm *drm)
233{
234	struct ttm_resource_manager *man;
235	unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
236	const struct ttm_resource_manager_func *func = NULL;
237
238	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
239		func = &nouveau_gart_manager;
240	else if (!drm->agp.bridge)
241		func = &nv04_gart_manager;
242	else
243		return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
244					  size_pages);
245
246	man = kzalloc(sizeof(*man), GFP_KERNEL);
247	if (!man)
248		return -ENOMEM;
249
250	man->func = func;
251	man->use_tt = true;
252	ttm_resource_manager_init(man, size_pages);
253	ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
254	ttm_resource_manager_set_used(man, true);
255	return 0;
256}
257
258static void
259nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
260{
261	struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
262
263	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
264	    drm->agp.bridge)
265		ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
266	else {
267		ttm_resource_manager_set_used(man, false);
268		ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
269		ttm_resource_manager_cleanup(man);
270		ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
271		kfree(man);
272	}
273}
274
275int
276nouveau_ttm_init(struct nouveau_drm *drm)
277{
278	struct nvkm_device *device = nvxx_device(&drm->client.device);
279	struct nvkm_pci *pci = device->pci;
280	struct nvif_mmu *mmu = &drm->client.mmu;
281	struct drm_device *dev = drm->dev;
282	int typei, ret;
283
284	ret = nouveau_ttm_init_host(drm, 0);
285	if (ret)
286		return ret;
287
288	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
289	    drm->client.device.info.chipset != 0x50) {
290		ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
291		if (ret)
292			return ret;
293	}
294
295	if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
296	    drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
297		typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
298					   NVIF_MEM_KIND |
299					   NVIF_MEM_COMP |
300					   NVIF_MEM_DISP);
301		if (typei < 0)
302			return -ENOSYS;
303
304		drm->ttm.type_vram = typei;
305	} else {
306		drm->ttm.type_vram = -1;
307	}
308
309	if (pci && pci->agp.bridge) {
310		drm->agp.bridge = pci->agp.bridge;
311		drm->agp.base = pci->agp.base;
312		drm->agp.size = pci->agp.size;
313		drm->agp.cma = pci->agp.cma;
314	}
315
316	ret = ttm_bo_device_init(&drm->ttm.bdev,
317				  &nouveau_bo_driver,
318				  dev->anon_inode->i_mapping,
319				  dev->vma_offset_manager,
320				  drm->client.mmu.dmabits <= 32 ? true : false);
321	if (ret) {
322		NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
323		return ret;
324	}
325
326	/* VRAM init */
327	drm->gem.vram_available = drm->client.device.info.ram_user;
328
329	arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
330				   device->func->resource_size(device, 1));
331
332	ret = nouveau_ttm_init_vram(drm);
333	if (ret) {
334		NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
335		return ret;
336	}
337
338	drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
339					 device->func->resource_size(device, 1));
340
341	/* GART init */
342	if (!drm->agp.bridge) {
343		drm->gem.gart_available = drm->client.vmm.vmm.limit;
344	} else {
345		drm->gem.gart_available = drm->agp.size;
346	}
347
348	ret = nouveau_ttm_init_gtt(drm);
349	if (ret) {
350		NV_ERROR(drm, "GART mm init failed, %d\n", ret);
351		return ret;
352	}
353
354	mutex_init(&drm->ttm.io_reserve_mutex);
355	INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
356
357	NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
358	NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
359	return 0;
360}
361
362void
363nouveau_ttm_fini(struct nouveau_drm *drm)
364{
365	struct nvkm_device *device = nvxx_device(&drm->client.device);
366
367	nouveau_ttm_fini_vram(drm);
368	nouveau_ttm_fini_gtt(drm);
369
370	ttm_bo_device_release(&drm->ttm.bdev);
371
372	arch_phys_wc_del(drm->ttm.mtrr);
373	drm->ttm.mtrr = 0;
374	arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
375				device->func->resource_size(device, 1));
376
377}
378