1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_exec.h>
26
27#include "amdgpu_mes.h"
28#include "amdgpu.h"
29#include "soc15_common.h"
30#include "amdgpu_mes_ctx.h"
31
32#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
33#define AMDGPU_ONE_DOORBELL_SIZE 8
34
35int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
36{
37	return roundup(AMDGPU_ONE_DOORBELL_SIZE *
38		       AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
39		       PAGE_SIZE);
40}
41
42static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev,
43					 struct amdgpu_mes_process *process,
44					 int ip_type, uint64_t *doorbell_index)
45{
46	unsigned int offset, found;
47	struct amdgpu_mes *mes = &adev->mes;
48
49	if (ip_type == AMDGPU_RING_TYPE_SDMA)
50		offset = adev->doorbell_index.sdma_engine[0];
51	else
52		offset = 0;
53
54	found = find_next_zero_bit(mes->doorbell_bitmap, mes->num_mes_dbs, offset);
55	if (found >= mes->num_mes_dbs) {
56		DRM_WARN("No doorbell available\n");
57		return -ENOSPC;
58	}
59
60	set_bit(found, mes->doorbell_bitmap);
61
62	/* Get the absolute doorbell index on BAR */
63	*doorbell_index = mes->db_start_dw_offset + found * 2;
64	return 0;
65}
66
67static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev,
68					   struct amdgpu_mes_process *process,
69					   uint32_t doorbell_index)
70{
71	unsigned int old, rel_index;
72	struct amdgpu_mes *mes = &adev->mes;
73
74	/* Find the relative index of the doorbell in this object */
75	rel_index = (doorbell_index - mes->db_start_dw_offset) / 2;
76	old = test_and_clear_bit(rel_index, mes->doorbell_bitmap);
77	WARN_ON(!old);
78}
79
80static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
81{
82	int i;
83	struct amdgpu_mes *mes = &adev->mes;
84
85	/* Bitmap for dynamic allocation of kernel doorbells */
86	mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
87	if (!mes->doorbell_bitmap) {
88		DRM_ERROR("Failed to allocate MES doorbell bitmap\n");
89		return -ENOMEM;
90	}
91
92	mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
93	for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
94		adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
95		set_bit(i, mes->doorbell_bitmap);
96	}
97
98	return 0;
99}
100
101static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
102{
103	bitmap_free(adev->mes.doorbell_bitmap);
104}
105
106int amdgpu_mes_init(struct amdgpu_device *adev)
107{
108	int i, r;
109
110	adev->mes.adev = adev;
111
112	idr_init(&adev->mes.pasid_idr);
113	idr_init(&adev->mes.gang_id_idr);
114	idr_init(&adev->mes.queue_id_idr);
115	ida_init(&adev->mes.doorbell_ida);
116	spin_lock_init(&adev->mes.queue_id_lock);
117	spin_lock_init(&adev->mes.ring_lock);
118	mutex_init(&adev->mes.mutex_hidden);
119
120	adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
121	adev->mes.vmid_mask_mmhub = 0xffffff00;
122	adev->mes.vmid_mask_gfxhub = 0xffffff00;
123
124	for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
125		/* use only 1st MEC pipes */
126		if (i >= 4)
127			continue;
128		adev->mes.compute_hqd_mask[i] = 0xc;
129	}
130
131	for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
132		adev->mes.gfx_hqd_mask[i] = i ? 0 : 0xfffffffe;
133
134	for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
135		if (adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(6, 0, 0))
136			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0x3fc;
137		/* zero sdma_hqd_mask for non-existent engine */
138		else if (adev->sdma.num_instances == 1)
139			adev->mes.sdma_hqd_mask[i] = i ? 0 : 0xfc;
140		else
141			adev->mes.sdma_hqd_mask[i] = 0xfc;
142	}
143
144	r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
145	if (r) {
146		dev_err(adev->dev,
147			"(%d) ring trail_fence_offs wb alloc failed\n", r);
148		goto error_ids;
149	}
150	adev->mes.sch_ctx_gpu_addr =
151		adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
152	adev->mes.sch_ctx_ptr =
153		(uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
154
155	r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
156	if (r) {
157		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
158		dev_err(adev->dev,
159			"(%d) query_status_fence_offs wb alloc failed\n", r);
160		goto error_ids;
161	}
162	adev->mes.query_status_fence_gpu_addr =
163		adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
164	adev->mes.query_status_fence_ptr =
165		(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
166
167	r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
168	if (r) {
169		amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
170		amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
171		dev_err(adev->dev,
172			"(%d) read_val_offs alloc failed\n", r);
173		goto error_ids;
174	}
175	adev->mes.read_val_gpu_addr =
176		adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
177	adev->mes.read_val_ptr =
178		(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
179
180	r = amdgpu_mes_doorbell_init(adev);
181	if (r)
182		goto error;
183
184	return 0;
185
186error:
187	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
188	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
189	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
190error_ids:
191	idr_destroy(&adev->mes.pasid_idr);
192	idr_destroy(&adev->mes.gang_id_idr);
193	idr_destroy(&adev->mes.queue_id_idr);
194	ida_destroy(&adev->mes.doorbell_ida);
195	mutex_destroy(&adev->mes.mutex_hidden);
196	return r;
197}
198
199void amdgpu_mes_fini(struct amdgpu_device *adev)
200{
201	amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
202	amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
203	amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
204	amdgpu_mes_doorbell_free(adev);
205
206	idr_destroy(&adev->mes.pasid_idr);
207	idr_destroy(&adev->mes.gang_id_idr);
208	idr_destroy(&adev->mes.queue_id_idr);
209	ida_destroy(&adev->mes.doorbell_ida);
210	mutex_destroy(&adev->mes.mutex_hidden);
211}
212
213static void amdgpu_mes_queue_free_mqd(struct amdgpu_mes_queue *q)
214{
215	amdgpu_bo_free_kernel(&q->mqd_obj,
216			      &q->mqd_gpu_addr,
217			      &q->mqd_cpu_ptr);
218}
219
220int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
221			      struct amdgpu_vm *vm)
222{
223	struct amdgpu_mes_process *process;
224	int r;
225
226	/* allocate the mes process buffer */
227	process = kzalloc(sizeof(struct amdgpu_mes_process), GFP_KERNEL);
228	if (!process) {
229		DRM_ERROR("no more memory to create mes process\n");
230		return -ENOMEM;
231	}
232
233	/* allocate the process context bo and map it */
234	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_PROC_CTX_SIZE, PAGE_SIZE,
235				    AMDGPU_GEM_DOMAIN_GTT,
236				    &process->proc_ctx_bo,
237				    &process->proc_ctx_gpu_addr,
238				    &process->proc_ctx_cpu_ptr);
239	if (r) {
240		DRM_ERROR("failed to allocate process context bo\n");
241		goto clean_up_memory;
242	}
243	memset(process->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
244
245	/*
246	 * Avoid taking any other locks under MES lock to avoid circular
247	 * lock dependencies.
248	 */
249	amdgpu_mes_lock(&adev->mes);
250
251	/* add the mes process to idr list */
252	r = idr_alloc(&adev->mes.pasid_idr, process, pasid, pasid + 1,
253		      GFP_KERNEL);
254	if (r < 0) {
255		DRM_ERROR("failed to lock pasid=%d\n", pasid);
256		goto clean_up_ctx;
257	}
258
259	INIT_LIST_HEAD(&process->gang_list);
260	process->vm = vm;
261	process->pasid = pasid;
262	process->process_quantum = adev->mes.default_process_quantum;
263	process->pd_gpu_addr = amdgpu_bo_gpu_offset(vm->root.bo);
264
265	amdgpu_mes_unlock(&adev->mes);
266	return 0;
267
268clean_up_ctx:
269	amdgpu_mes_unlock(&adev->mes);
270	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
271			      &process->proc_ctx_gpu_addr,
272			      &process->proc_ctx_cpu_ptr);
273clean_up_memory:
274	kfree(process);
275	return r;
276}
277
278void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid)
279{
280	struct amdgpu_mes_process *process;
281	struct amdgpu_mes_gang *gang, *tmp1;
282	struct amdgpu_mes_queue *queue, *tmp2;
283	struct mes_remove_queue_input queue_input;
284	unsigned long flags;
285	int r;
286
287	/*
288	 * Avoid taking any other locks under MES lock to avoid circular
289	 * lock dependencies.
290	 */
291	amdgpu_mes_lock(&adev->mes);
292
293	process = idr_find(&adev->mes.pasid_idr, pasid);
294	if (!process) {
295		DRM_WARN("pasid %d doesn't exist\n", pasid);
296		amdgpu_mes_unlock(&adev->mes);
297		return;
298	}
299
300	/* Remove all queues from hardware */
301	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
302		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
303			spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
304			idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
305			spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
306
307			queue_input.doorbell_offset = queue->doorbell_off;
308			queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
309
310			r = adev->mes.funcs->remove_hw_queue(&adev->mes,
311							     &queue_input);
312			if (r)
313				DRM_WARN("failed to remove hardware queue\n");
314		}
315
316		idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
317	}
318
319	idr_remove(&adev->mes.pasid_idr, pasid);
320	amdgpu_mes_unlock(&adev->mes);
321
322	/* free all memory allocated by the process */
323	list_for_each_entry_safe(gang, tmp1, &process->gang_list, list) {
324		/* free all queues in the gang */
325		list_for_each_entry_safe(queue, tmp2, &gang->queue_list, list) {
326			amdgpu_mes_queue_free_mqd(queue);
327			list_del(&queue->list);
328			kfree(queue);
329		}
330		amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
331				      &gang->gang_ctx_gpu_addr,
332				      &gang->gang_ctx_cpu_ptr);
333		list_del(&gang->list);
334		kfree(gang);
335
336	}
337	amdgpu_bo_free_kernel(&process->proc_ctx_bo,
338			      &process->proc_ctx_gpu_addr,
339			      &process->proc_ctx_cpu_ptr);
340	kfree(process);
341}
342
343int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
344			struct amdgpu_mes_gang_properties *gprops,
345			int *gang_id)
346{
347	struct amdgpu_mes_process *process;
348	struct amdgpu_mes_gang *gang;
349	int r;
350
351	/* allocate the mes gang buffer */
352	gang = kzalloc(sizeof(struct amdgpu_mes_gang), GFP_KERNEL);
353	if (!gang) {
354		return -ENOMEM;
355	}
356
357	/* allocate the gang context bo and map it to cpu space */
358	r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_GANG_CTX_SIZE, PAGE_SIZE,
359				    AMDGPU_GEM_DOMAIN_GTT,
360				    &gang->gang_ctx_bo,
361				    &gang->gang_ctx_gpu_addr,
362				    &gang->gang_ctx_cpu_ptr);
363	if (r) {
364		DRM_ERROR("failed to allocate process context bo\n");
365		goto clean_up_mem;
366	}
367	memset(gang->gang_ctx_cpu_ptr, 0, AMDGPU_MES_GANG_CTX_SIZE);
368
369	/*
370	 * Avoid taking any other locks under MES lock to avoid circular
371	 * lock dependencies.
372	 */
373	amdgpu_mes_lock(&adev->mes);
374
375	process = idr_find(&adev->mes.pasid_idr, pasid);
376	if (!process) {
377		DRM_ERROR("pasid %d doesn't exist\n", pasid);
378		r = -EINVAL;
379		goto clean_up_ctx;
380	}
381
382	/* add the mes gang to idr list */
383	r = idr_alloc(&adev->mes.gang_id_idr, gang, 1, 0,
384		      GFP_KERNEL);
385	if (r < 0) {
386		DRM_ERROR("failed to allocate idr for gang\n");
387		goto clean_up_ctx;
388	}
389
390	gang->gang_id = r;
391	*gang_id = r;
392
393	INIT_LIST_HEAD(&gang->queue_list);
394	gang->process = process;
395	gang->priority = gprops->priority;
396	gang->gang_quantum = gprops->gang_quantum ?
397		gprops->gang_quantum : adev->mes.default_gang_quantum;
398	gang->global_priority_level = gprops->global_priority_level;
399	gang->inprocess_gang_priority = gprops->inprocess_gang_priority;
400	list_add_tail(&gang->list, &process->gang_list);
401
402	amdgpu_mes_unlock(&adev->mes);
403	return 0;
404
405clean_up_ctx:
406	amdgpu_mes_unlock(&adev->mes);
407	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
408			      &gang->gang_ctx_gpu_addr,
409			      &gang->gang_ctx_cpu_ptr);
410clean_up_mem:
411	kfree(gang);
412	return r;
413}
414
415int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id)
416{
417	struct amdgpu_mes_gang *gang;
418
419	/*
420	 * Avoid taking any other locks under MES lock to avoid circular
421	 * lock dependencies.
422	 */
423	amdgpu_mes_lock(&adev->mes);
424
425	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
426	if (!gang) {
427		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
428		amdgpu_mes_unlock(&adev->mes);
429		return -EINVAL;
430	}
431
432	if (!list_empty(&gang->queue_list)) {
433		DRM_ERROR("queue list is not empty\n");
434		amdgpu_mes_unlock(&adev->mes);
435		return -EBUSY;
436	}
437
438	idr_remove(&adev->mes.gang_id_idr, gang->gang_id);
439	list_del(&gang->list);
440	amdgpu_mes_unlock(&adev->mes);
441
442	amdgpu_bo_free_kernel(&gang->gang_ctx_bo,
443			      &gang->gang_ctx_gpu_addr,
444			      &gang->gang_ctx_cpu_ptr);
445
446	kfree(gang);
447
448	return 0;
449}
450
451int amdgpu_mes_suspend(struct amdgpu_device *adev)
452{
453	struct idr *idp;
454	struct amdgpu_mes_process *process;
455	struct amdgpu_mes_gang *gang;
456	struct mes_suspend_gang_input input;
457	int r, pasid;
458
459	/*
460	 * Avoid taking any other locks under MES lock to avoid circular
461	 * lock dependencies.
462	 */
463	amdgpu_mes_lock(&adev->mes);
464
465	idp = &adev->mes.pasid_idr;
466
467	idr_for_each_entry(idp, process, pasid) {
468		list_for_each_entry(gang, &process->gang_list, list) {
469			r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
470			if (r)
471				DRM_ERROR("failed to suspend pasid %d gangid %d",
472					 pasid, gang->gang_id);
473		}
474	}
475
476	amdgpu_mes_unlock(&adev->mes);
477	return 0;
478}
479
480int amdgpu_mes_resume(struct amdgpu_device *adev)
481{
482	struct idr *idp;
483	struct amdgpu_mes_process *process;
484	struct amdgpu_mes_gang *gang;
485	struct mes_resume_gang_input input;
486	int r, pasid;
487
488	/*
489	 * Avoid taking any other locks under MES lock to avoid circular
490	 * lock dependencies.
491	 */
492	amdgpu_mes_lock(&adev->mes);
493
494	idp = &adev->mes.pasid_idr;
495
496	idr_for_each_entry(idp, process, pasid) {
497		list_for_each_entry(gang, &process->gang_list, list) {
498			r = adev->mes.funcs->resume_gang(&adev->mes, &input);
499			if (r)
500				DRM_ERROR("failed to resume pasid %d gangid %d",
501					 pasid, gang->gang_id);
502		}
503	}
504
505	amdgpu_mes_unlock(&adev->mes);
506	return 0;
507}
508
509static int amdgpu_mes_queue_alloc_mqd(struct amdgpu_device *adev,
510				     struct amdgpu_mes_queue *q,
511				     struct amdgpu_mes_queue_properties *p)
512{
513	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
514	u32 mqd_size = mqd_mgr->mqd_size;
515	int r;
516
517	r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
518				    AMDGPU_GEM_DOMAIN_GTT,
519				    &q->mqd_obj,
520				    &q->mqd_gpu_addr, &q->mqd_cpu_ptr);
521	if (r) {
522		dev_warn(adev->dev, "failed to create queue mqd bo (%d)", r);
523		return r;
524	}
525	memset(q->mqd_cpu_ptr, 0, mqd_size);
526
527	r = amdgpu_bo_reserve(q->mqd_obj, false);
528	if (unlikely(r != 0))
529		goto clean_up;
530
531	return 0;
532
533clean_up:
534	amdgpu_bo_free_kernel(&q->mqd_obj,
535			      &q->mqd_gpu_addr,
536			      &q->mqd_cpu_ptr);
537	return r;
538}
539
540static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
541				     struct amdgpu_mes_queue *q,
542				     struct amdgpu_mes_queue_properties *p)
543{
544	struct amdgpu_mqd *mqd_mgr = &adev->mqds[p->queue_type];
545	struct amdgpu_mqd_prop mqd_prop = {0};
546
547	mqd_prop.mqd_gpu_addr = q->mqd_gpu_addr;
548	mqd_prop.hqd_base_gpu_addr = p->hqd_base_gpu_addr;
549	mqd_prop.rptr_gpu_addr = p->rptr_gpu_addr;
550	mqd_prop.wptr_gpu_addr = p->wptr_gpu_addr;
551	mqd_prop.queue_size = p->queue_size;
552	mqd_prop.use_doorbell = true;
553	mqd_prop.doorbell_index = p->doorbell_off;
554	mqd_prop.eop_gpu_addr = p->eop_gpu_addr;
555	mqd_prop.hqd_pipe_priority = p->hqd_pipe_priority;
556	mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
557	mqd_prop.hqd_active = false;
558
559	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
560	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
561		mutex_lock(&adev->srbm_mutex);
562		amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
563	}
564
565	mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
566
567	if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
568	    p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
569		amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
570		mutex_unlock(&adev->srbm_mutex);
571	}
572
573	amdgpu_bo_unreserve(q->mqd_obj);
574}
575
576int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
577			    struct amdgpu_mes_queue_properties *qprops,
578			    int *queue_id)
579{
580	struct amdgpu_mes_queue *queue;
581	struct amdgpu_mes_gang *gang;
582	struct mes_add_queue_input queue_input;
583	unsigned long flags;
584	int r;
585
586	memset(&queue_input, 0, sizeof(struct mes_add_queue_input));
587
588	/* allocate the mes queue buffer */
589	queue = kzalloc(sizeof(struct amdgpu_mes_queue), GFP_KERNEL);
590	if (!queue) {
591		DRM_ERROR("Failed to allocate memory for queue\n");
592		return -ENOMEM;
593	}
594
595	/* Allocate the queue mqd */
596	r = amdgpu_mes_queue_alloc_mqd(adev, queue, qprops);
597	if (r)
598		goto clean_up_memory;
599
600	/*
601	 * Avoid taking any other locks under MES lock to avoid circular
602	 * lock dependencies.
603	 */
604	amdgpu_mes_lock(&adev->mes);
605
606	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
607	if (!gang) {
608		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
609		r = -EINVAL;
610		goto clean_up_mqd;
611	}
612
613	/* add the mes gang to idr list */
614	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
615	r = idr_alloc(&adev->mes.queue_id_idr, queue, 1, 0,
616		      GFP_ATOMIC);
617	if (r < 0) {
618		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
619		goto clean_up_mqd;
620	}
621	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
622	*queue_id = queue->queue_id = r;
623
624	/* allocate a doorbell index for the queue */
625	r = amdgpu_mes_kernel_doorbell_get(adev, gang->process,
626					  qprops->queue_type,
627					  &qprops->doorbell_off);
628	if (r)
629		goto clean_up_queue_id;
630
631	/* initialize the queue mqd */
632	amdgpu_mes_queue_init_mqd(adev, queue, qprops);
633
634	/* add hw queue to mes */
635	queue_input.process_id = gang->process->pasid;
636
637	queue_input.page_table_base_addr =
638		adev->vm_manager.vram_base_offset + gang->process->pd_gpu_addr -
639		adev->gmc.vram_start;
640
641	queue_input.process_va_start = 0;
642	queue_input.process_va_end =
643		(adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
644	queue_input.process_quantum = gang->process->process_quantum;
645	queue_input.process_context_addr = gang->process->proc_ctx_gpu_addr;
646	queue_input.gang_quantum = gang->gang_quantum;
647	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
648	queue_input.inprocess_gang_priority = gang->inprocess_gang_priority;
649	queue_input.gang_global_priority_level = gang->global_priority_level;
650	queue_input.doorbell_offset = qprops->doorbell_off;
651	queue_input.mqd_addr = queue->mqd_gpu_addr;
652	queue_input.wptr_addr = qprops->wptr_gpu_addr;
653	queue_input.wptr_mc_addr = qprops->wptr_mc_addr;
654	queue_input.queue_type = qprops->queue_type;
655	queue_input.paging = qprops->paging;
656	queue_input.is_kfd_process = 0;
657
658	r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
659	if (r) {
660		DRM_ERROR("failed to add hardware queue to MES, doorbell=0x%llx\n",
661			  qprops->doorbell_off);
662		goto clean_up_doorbell;
663	}
664
665	DRM_DEBUG("MES hw queue was added, pasid=%d, gang id=%d, "
666		  "queue type=%d, doorbell=0x%llx\n",
667		  gang->process->pasid, gang_id, qprops->queue_type,
668		  qprops->doorbell_off);
669
670	queue->ring = qprops->ring;
671	queue->doorbell_off = qprops->doorbell_off;
672	queue->wptr_gpu_addr = qprops->wptr_gpu_addr;
673	queue->queue_type = qprops->queue_type;
674	queue->paging = qprops->paging;
675	queue->gang = gang;
676	queue->ring->mqd_ptr = queue->mqd_cpu_ptr;
677	list_add_tail(&queue->list, &gang->queue_list);
678
679	amdgpu_mes_unlock(&adev->mes);
680	return 0;
681
682clean_up_doorbell:
683	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
684				       qprops->doorbell_off);
685clean_up_queue_id:
686	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
687	idr_remove(&adev->mes.queue_id_idr, queue->queue_id);
688	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
689clean_up_mqd:
690	amdgpu_mes_unlock(&adev->mes);
691	amdgpu_mes_queue_free_mqd(queue);
692clean_up_memory:
693	kfree(queue);
694	return r;
695}
696
697int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id)
698{
699	unsigned long flags;
700	struct amdgpu_mes_queue *queue;
701	struct amdgpu_mes_gang *gang;
702	struct mes_remove_queue_input queue_input;
703	int r;
704
705	/*
706	 * Avoid taking any other locks under MES lock to avoid circular
707	 * lock dependencies.
708	 */
709	amdgpu_mes_lock(&adev->mes);
710
711	/* remove the mes gang from idr list */
712	spin_lock_irqsave(&adev->mes.queue_id_lock, flags);
713
714	queue = idr_find(&adev->mes.queue_id_idr, queue_id);
715	if (!queue) {
716		spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
717		amdgpu_mes_unlock(&adev->mes);
718		DRM_ERROR("queue id %d doesn't exist\n", queue_id);
719		return -EINVAL;
720	}
721
722	idr_remove(&adev->mes.queue_id_idr, queue_id);
723	spin_unlock_irqrestore(&adev->mes.queue_id_lock, flags);
724
725	DRM_DEBUG("try to remove queue, doorbell off = 0x%llx\n",
726		  queue->doorbell_off);
727
728	gang = queue->gang;
729	queue_input.doorbell_offset = queue->doorbell_off;
730	queue_input.gang_context_addr = gang->gang_ctx_gpu_addr;
731
732	r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
733	if (r)
734		DRM_ERROR("failed to remove hardware queue, queue id = %d\n",
735			  queue_id);
736
737	list_del(&queue->list);
738	amdgpu_mes_kernel_doorbell_free(adev, gang->process,
739				       queue->doorbell_off);
740	amdgpu_mes_unlock(&adev->mes);
741
742	amdgpu_mes_queue_free_mqd(queue);
743	kfree(queue);
744	return 0;
745}
746
747int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
748				  struct amdgpu_ring *ring,
749				  enum amdgpu_unmap_queues_action action,
750				  u64 gpu_addr, u64 seq)
751{
752	struct mes_unmap_legacy_queue_input queue_input;
753	int r;
754
755	queue_input.action = action;
756	queue_input.queue_type = ring->funcs->type;
757	queue_input.doorbell_offset = ring->doorbell_index;
758	queue_input.pipe_id = ring->pipe;
759	queue_input.queue_id = ring->queue;
760	queue_input.trail_fence_addr = gpu_addr;
761	queue_input.trail_fence_data = seq;
762
763	r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
764	if (r)
765		DRM_ERROR("failed to unmap legacy queue\n");
766
767	return r;
768}
769
770uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
771{
772	struct mes_misc_op_input op_input;
773	int r, val = 0;
774
775	op_input.op = MES_MISC_OP_READ_REG;
776	op_input.read_reg.reg_offset = reg;
777	op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
778
779	if (!adev->mes.funcs->misc_op) {
780		DRM_ERROR("mes rreg is not supported!\n");
781		goto error;
782	}
783
784	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
785	if (r)
786		DRM_ERROR("failed to read reg (0x%x)\n", reg);
787	else
788		val = *(adev->mes.read_val_ptr);
789
790error:
791	return val;
792}
793
794int amdgpu_mes_wreg(struct amdgpu_device *adev,
795		    uint32_t reg, uint32_t val)
796{
797	struct mes_misc_op_input op_input;
798	int r;
799
800	op_input.op = MES_MISC_OP_WRITE_REG;
801	op_input.write_reg.reg_offset = reg;
802	op_input.write_reg.reg_value = val;
803
804	if (!adev->mes.funcs->misc_op) {
805		DRM_ERROR("mes wreg is not supported!\n");
806		r = -EINVAL;
807		goto error;
808	}
809
810	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
811	if (r)
812		DRM_ERROR("failed to write reg (0x%x)\n", reg);
813
814error:
815	return r;
816}
817
818int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
819				  uint32_t reg0, uint32_t reg1,
820				  uint32_t ref, uint32_t mask)
821{
822	struct mes_misc_op_input op_input;
823	int r;
824
825	op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
826	op_input.wrm_reg.reg0 = reg0;
827	op_input.wrm_reg.reg1 = reg1;
828	op_input.wrm_reg.ref = ref;
829	op_input.wrm_reg.mask = mask;
830
831	if (!adev->mes.funcs->misc_op) {
832		DRM_ERROR("mes reg_write_reg_wait is not supported!\n");
833		r = -EINVAL;
834		goto error;
835	}
836
837	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
838	if (r)
839		DRM_ERROR("failed to reg_write_reg_wait\n");
840
841error:
842	return r;
843}
844
845int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
846			uint32_t val, uint32_t mask)
847{
848	struct mes_misc_op_input op_input;
849	int r;
850
851	op_input.op = MES_MISC_OP_WRM_REG_WAIT;
852	op_input.wrm_reg.reg0 = reg;
853	op_input.wrm_reg.ref = val;
854	op_input.wrm_reg.mask = mask;
855
856	if (!adev->mes.funcs->misc_op) {
857		DRM_ERROR("mes reg wait is not supported!\n");
858		r = -EINVAL;
859		goto error;
860	}
861
862	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
863	if (r)
864		DRM_ERROR("failed to reg_write_reg_wait\n");
865
866error:
867	return r;
868}
869
870int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
871				uint64_t process_context_addr,
872				uint32_t spi_gdbg_per_vmid_cntl,
873				const uint32_t *tcp_watch_cntl,
874				uint32_t flags,
875				bool trap_en)
876{
877	struct mes_misc_op_input op_input = {0};
878	int r;
879
880	if (!adev->mes.funcs->misc_op) {
881		DRM_ERROR("mes set shader debugger is not supported!\n");
882		return -EINVAL;
883	}
884
885	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
886	op_input.set_shader_debugger.process_context_addr = process_context_addr;
887	op_input.set_shader_debugger.flags.u32all = flags;
888
889	/* use amdgpu mes_flush_shader_debugger instead */
890	if (op_input.set_shader_debugger.flags.process_ctx_flush)
891		return -EINVAL;
892
893	op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
894	memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
895			sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
896
897	if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
898			AMDGPU_MES_API_VERSION_SHIFT) >= 14)
899		op_input.set_shader_debugger.trap_en = trap_en;
900
901	amdgpu_mes_lock(&adev->mes);
902
903	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
904	if (r)
905		DRM_ERROR("failed to set_shader_debugger\n");
906
907	amdgpu_mes_unlock(&adev->mes);
908
909	return r;
910}
911
912int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
913				     uint64_t process_context_addr)
914{
915	struct mes_misc_op_input op_input = {0};
916	int r;
917
918	if (!adev->mes.funcs->misc_op) {
919		DRM_ERROR("mes flush shader debugger is not supported!\n");
920		return -EINVAL;
921	}
922
923	op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
924	op_input.set_shader_debugger.process_context_addr = process_context_addr;
925	op_input.set_shader_debugger.flags.process_ctx_flush = true;
926
927	amdgpu_mes_lock(&adev->mes);
928
929	r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
930	if (r)
931		DRM_ERROR("failed to set_shader_debugger\n");
932
933	amdgpu_mes_unlock(&adev->mes);
934
935	return r;
936}
937
938static void
939amdgpu_mes_ring_to_queue_props(struct amdgpu_device *adev,
940			       struct amdgpu_ring *ring,
941			       struct amdgpu_mes_queue_properties *props)
942{
943	props->queue_type = ring->funcs->type;
944	props->hqd_base_gpu_addr = ring->gpu_addr;
945	props->rptr_gpu_addr = ring->rptr_gpu_addr;
946	props->wptr_gpu_addr = ring->wptr_gpu_addr;
947	props->wptr_mc_addr =
948		ring->mes_ctx->meta_data_mc_addr + ring->wptr_offs;
949	props->queue_size = ring->ring_size;
950	props->eop_gpu_addr = ring->eop_gpu_addr;
951	props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
952	props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
953	props->paging = false;
954	props->ring = ring;
955}
956
957#define DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(_eng)			\
958do {									\
959       if (id_offs < AMDGPU_MES_CTX_MAX_OFFS)				\
960		return offsetof(struct amdgpu_mes_ctx_meta_data,	\
961				_eng[ring->idx].slots[id_offs]);        \
962       else if (id_offs == AMDGPU_MES_CTX_RING_OFFS)			\
963		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
964				_eng[ring->idx].ring);                  \
965       else if (id_offs == AMDGPU_MES_CTX_IB_OFFS)			\
966		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
967				_eng[ring->idx].ib);                    \
968       else if (id_offs == AMDGPU_MES_CTX_PADDING_OFFS)			\
969		return offsetof(struct amdgpu_mes_ctx_meta_data,        \
970				_eng[ring->idx].padding);               \
971} while(0)
972
973int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs)
974{
975	switch (ring->funcs->type) {
976	case AMDGPU_RING_TYPE_GFX:
977		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(gfx);
978		break;
979	case AMDGPU_RING_TYPE_COMPUTE:
980		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(compute);
981		break;
982	case AMDGPU_RING_TYPE_SDMA:
983		DEFINE_AMDGPU_MES_CTX_GET_OFFS_ENG(sdma);
984		break;
985	default:
986		break;
987	}
988
989	WARN_ON(1);
990	return -EINVAL;
991}
992
993int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
994			int queue_type, int idx,
995			struct amdgpu_mes_ctx_data *ctx_data,
996			struct amdgpu_ring **out)
997{
998	struct amdgpu_ring *ring;
999	struct amdgpu_mes_gang *gang;
1000	struct amdgpu_mes_queue_properties qprops = {0};
1001	int r, queue_id, pasid;
1002
1003	/*
1004	 * Avoid taking any other locks under MES lock to avoid circular
1005	 * lock dependencies.
1006	 */
1007	amdgpu_mes_lock(&adev->mes);
1008	gang = idr_find(&adev->mes.gang_id_idr, gang_id);
1009	if (!gang) {
1010		DRM_ERROR("gang id %d doesn't exist\n", gang_id);
1011		amdgpu_mes_unlock(&adev->mes);
1012		return -EINVAL;
1013	}
1014	pasid = gang->process->pasid;
1015
1016	ring = kzalloc(sizeof(struct amdgpu_ring), GFP_KERNEL);
1017	if (!ring) {
1018		amdgpu_mes_unlock(&adev->mes);
1019		return -ENOMEM;
1020	}
1021
1022	ring->ring_obj = NULL;
1023	ring->use_doorbell = true;
1024	ring->is_mes_queue = true;
1025	ring->mes_ctx = ctx_data;
1026	ring->idx = idx;
1027	ring->no_scheduler = true;
1028
1029	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
1030		int offset = offsetof(struct amdgpu_mes_ctx_meta_data,
1031				      compute[ring->idx].mec_hpd);
1032		ring->eop_gpu_addr =
1033			amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
1034	}
1035
1036	switch (queue_type) {
1037	case AMDGPU_RING_TYPE_GFX:
1038		ring->funcs = adev->gfx.gfx_ring[0].funcs;
1039		ring->me = adev->gfx.gfx_ring[0].me;
1040		ring->pipe = adev->gfx.gfx_ring[0].pipe;
1041		break;
1042	case AMDGPU_RING_TYPE_COMPUTE:
1043		ring->funcs = adev->gfx.compute_ring[0].funcs;
1044		ring->me = adev->gfx.compute_ring[0].me;
1045		ring->pipe = adev->gfx.compute_ring[0].pipe;
1046		break;
1047	case AMDGPU_RING_TYPE_SDMA:
1048		ring->funcs = adev->sdma.instance[0].ring.funcs;
1049		break;
1050	default:
1051		BUG();
1052	}
1053
1054	r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
1055			     AMDGPU_RING_PRIO_DEFAULT, NULL);
1056	if (r)
1057		goto clean_up_memory;
1058
1059	amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
1060
1061	dma_fence_wait(gang->process->vm->last_update, false);
1062	dma_fence_wait(ctx_data->meta_data_va->last_pt_update, false);
1063	amdgpu_mes_unlock(&adev->mes);
1064
1065	r = amdgpu_mes_add_hw_queue(adev, gang_id, &qprops, &queue_id);
1066	if (r)
1067		goto clean_up_ring;
1068
1069	ring->hw_queue_id = queue_id;
1070	ring->doorbell_index = qprops.doorbell_off;
1071
1072	if (queue_type == AMDGPU_RING_TYPE_GFX)
1073		sprintf(ring->name, "gfx_%d.%d.%d", pasid, gang_id, queue_id);
1074	else if (queue_type == AMDGPU_RING_TYPE_COMPUTE)
1075		sprintf(ring->name, "compute_%d.%d.%d", pasid, gang_id,
1076			queue_id);
1077	else if (queue_type == AMDGPU_RING_TYPE_SDMA)
1078		sprintf(ring->name, "sdma_%d.%d.%d", pasid, gang_id,
1079			queue_id);
1080	else
1081		BUG();
1082
1083	*out = ring;
1084	return 0;
1085
1086clean_up_ring:
1087	amdgpu_ring_fini(ring);
1088clean_up_memory:
1089	kfree(ring);
1090	amdgpu_mes_unlock(&adev->mes);
1091	return r;
1092}
1093
1094void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
1095			    struct amdgpu_ring *ring)
1096{
1097	if (!ring)
1098		return;
1099
1100	amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
1101	amdgpu_ring_fini(ring);
1102	kfree(ring);
1103}
1104
1105uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
1106						   enum amdgpu_mes_priority_level prio)
1107{
1108	return adev->mes.aggregated_doorbells[prio];
1109}
1110
1111int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
1112				   struct amdgpu_mes_ctx_data *ctx_data)
1113{
1114	int r;
1115
1116	r = amdgpu_bo_create_kernel(adev,
1117			    sizeof(struct amdgpu_mes_ctx_meta_data),
1118			    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1119			    &ctx_data->meta_data_obj,
1120			    &ctx_data->meta_data_mc_addr,
1121			    &ctx_data->meta_data_ptr);
1122	if (r) {
1123		dev_warn(adev->dev, "(%d) create CTX bo failed\n", r);
1124		return r;
1125	}
1126
1127	if (!ctx_data->meta_data_obj)
1128		return -ENOMEM;
1129
1130	memset(ctx_data->meta_data_ptr, 0,
1131	       sizeof(struct amdgpu_mes_ctx_meta_data));
1132
1133	return 0;
1134}
1135
1136void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data)
1137{
1138	if (ctx_data->meta_data_obj)
1139		amdgpu_bo_free_kernel(&ctx_data->meta_data_obj,
1140				      &ctx_data->meta_data_mc_addr,
1141				      &ctx_data->meta_data_ptr);
1142}
1143
1144int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
1145				 struct amdgpu_vm *vm,
1146				 struct amdgpu_mes_ctx_data *ctx_data)
1147{
1148	struct amdgpu_bo_va *bo_va;
1149	struct amdgpu_sync sync;
1150	struct drm_exec exec;
1151	int r;
1152
1153	amdgpu_sync_create(&sync);
1154
1155	drm_exec_init(&exec, 0);
1156	drm_exec_until_all_locked(&exec) {
1157		r = drm_exec_lock_obj(&exec,
1158				      &ctx_data->meta_data_obj->tbo.base);
1159		drm_exec_retry_on_contention(&exec);
1160		if (unlikely(r))
1161			goto error_fini_exec;
1162
1163		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1164		drm_exec_retry_on_contention(&exec);
1165		if (unlikely(r))
1166			goto error_fini_exec;
1167	}
1168
1169	bo_va = amdgpu_vm_bo_add(adev, vm, ctx_data->meta_data_obj);
1170	if (!bo_va) {
1171		DRM_ERROR("failed to create bo_va for meta data BO\n");
1172		r = -ENOMEM;
1173		goto error_fini_exec;
1174	}
1175
1176	r = amdgpu_vm_bo_map(adev, bo_va, ctx_data->meta_data_gpu_addr, 0,
1177			     sizeof(struct amdgpu_mes_ctx_meta_data),
1178			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
1179			     AMDGPU_PTE_EXECUTABLE);
1180
1181	if (r) {
1182		DRM_ERROR("failed to do bo_map on meta data, err=%d\n", r);
1183		goto error_del_bo_va;
1184	}
1185
1186	r = amdgpu_vm_bo_update(adev, bo_va, false);
1187	if (r) {
1188		DRM_ERROR("failed to do vm_bo_update on meta data\n");
1189		goto error_del_bo_va;
1190	}
1191	amdgpu_sync_fence(&sync, bo_va->last_pt_update);
1192
1193	r = amdgpu_vm_update_pdes(adev, vm, false);
1194	if (r) {
1195		DRM_ERROR("failed to update pdes on meta data\n");
1196		goto error_del_bo_va;
1197	}
1198	amdgpu_sync_fence(&sync, vm->last_update);
1199
1200	amdgpu_sync_wait(&sync, false);
1201	drm_exec_fini(&exec);
1202
1203	amdgpu_sync_free(&sync);
1204	ctx_data->meta_data_va = bo_va;
1205	return 0;
1206
1207error_del_bo_va:
1208	amdgpu_vm_bo_del(adev, bo_va);
1209
1210error_fini_exec:
1211	drm_exec_fini(&exec);
1212	amdgpu_sync_free(&sync);
1213	return r;
1214}
1215
1216int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
1217				   struct amdgpu_mes_ctx_data *ctx_data)
1218{
1219	struct amdgpu_bo_va *bo_va = ctx_data->meta_data_va;
1220	struct amdgpu_bo *bo = ctx_data->meta_data_obj;
1221	struct amdgpu_vm *vm = bo_va->base.vm;
1222	struct dma_fence *fence;
1223	struct drm_exec exec;
1224	long r;
1225
1226	drm_exec_init(&exec, 0);
1227	drm_exec_until_all_locked(&exec) {
1228		r = drm_exec_lock_obj(&exec,
1229				      &ctx_data->meta_data_obj->tbo.base);
1230		drm_exec_retry_on_contention(&exec);
1231		if (unlikely(r))
1232			goto out_unlock;
1233
1234		r = amdgpu_vm_lock_pd(vm, &exec, 0);
1235		drm_exec_retry_on_contention(&exec);
1236		if (unlikely(r))
1237			goto out_unlock;
1238	}
1239
1240	amdgpu_vm_bo_del(adev, bo_va);
1241	if (!amdgpu_vm_ready(vm))
1242		goto out_unlock;
1243
1244	r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
1245				   &fence);
1246	if (r)
1247		goto out_unlock;
1248	if (fence) {
1249		amdgpu_bo_fence(bo, fence, true);
1250		fence = NULL;
1251	}
1252
1253	r = amdgpu_vm_clear_freed(adev, vm, &fence);
1254	if (r || !fence)
1255		goto out_unlock;
1256
1257	dma_fence_wait(fence, false);
1258	amdgpu_bo_fence(bo, fence, true);
1259	dma_fence_put(fence);
1260
1261out_unlock:
1262	if (unlikely(r < 0))
1263		dev_err(adev->dev, "failed to clear page tables (%ld)\n", r);
1264	drm_exec_fini(&exec);
1265
1266	return r;
1267}
1268
1269static int amdgpu_mes_test_create_gang_and_queues(struct amdgpu_device *adev,
1270					  int pasid, int *gang_id,
1271					  int queue_type, int num_queue,
1272					  struct amdgpu_ring **added_rings,
1273					  struct amdgpu_mes_ctx_data *ctx_data)
1274{
1275	struct amdgpu_ring *ring;
1276	struct amdgpu_mes_gang_properties gprops = {0};
1277	int r, j;
1278
1279	/* create a gang for the process */
1280	gprops.priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1281	gprops.gang_quantum = adev->mes.default_gang_quantum;
1282	gprops.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1283	gprops.priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1284	gprops.global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
1285
1286	r = amdgpu_mes_add_gang(adev, pasid, &gprops, gang_id);
1287	if (r) {
1288		DRM_ERROR("failed to add gang\n");
1289		return r;
1290	}
1291
1292	/* create queues for the gang */
1293	for (j = 0; j < num_queue; j++) {
1294		r = amdgpu_mes_add_ring(adev, *gang_id, queue_type, j,
1295					ctx_data, &ring);
1296		if (r) {
1297			DRM_ERROR("failed to add ring\n");
1298			break;
1299		}
1300
1301		DRM_INFO("ring %s was added\n", ring->name);
1302		added_rings[j] = ring;
1303	}
1304
1305	return 0;
1306}
1307
1308static int amdgpu_mes_test_queues(struct amdgpu_ring **added_rings)
1309{
1310	struct amdgpu_ring *ring;
1311	int i, r;
1312
1313	for (i = 0; i < AMDGPU_MES_CTX_MAX_RINGS; i++) {
1314		ring = added_rings[i];
1315		if (!ring)
1316			continue;
1317
1318		r = amdgpu_ring_test_helper(ring);
1319		if (r)
1320			return r;
1321
1322		r = amdgpu_ring_test_ib(ring, 1000 * 10);
1323		if (r) {
1324			DRM_DEV_ERROR(ring->adev->dev,
1325				      "ring %s ib test failed (%d)\n",
1326				      ring->name, r);
1327			return r;
1328		} else
1329			DRM_INFO("ring %s ib test pass\n", ring->name);
1330	}
1331
1332	return 0;
1333}
1334
1335int amdgpu_mes_self_test(struct amdgpu_device *adev)
1336{
1337	struct amdgpu_vm *vm = NULL;
1338	struct amdgpu_mes_ctx_data ctx_data = {0};
1339	struct amdgpu_ring *added_rings[AMDGPU_MES_CTX_MAX_RINGS] = { NULL };
1340	int gang_ids[3] = {0};
1341	int queue_types[][2] = { { AMDGPU_RING_TYPE_GFX, 1 },
1342				 { AMDGPU_RING_TYPE_COMPUTE, 1 },
1343				 { AMDGPU_RING_TYPE_SDMA, 1} };
1344	int i, r, pasid, k = 0;
1345
1346	pasid = amdgpu_pasid_alloc(16);
1347	if (pasid < 0) {
1348		dev_warn(adev->dev, "No more PASIDs available!");
1349		pasid = 0;
1350	}
1351
1352	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1353	if (!vm) {
1354		r = -ENOMEM;
1355		goto error_pasid;
1356	}
1357
1358	r = amdgpu_vm_init(adev, vm, -1);
1359	if (r) {
1360		DRM_ERROR("failed to initialize vm\n");
1361		goto error_pasid;
1362	}
1363
1364	r = amdgpu_mes_ctx_alloc_meta_data(adev, &ctx_data);
1365	if (r) {
1366		DRM_ERROR("failed to alloc ctx meta data\n");
1367		goto error_fini;
1368	}
1369
1370	ctx_data.meta_data_gpu_addr = AMDGPU_VA_RESERVED_SIZE;
1371	r = amdgpu_mes_ctx_map_meta_data(adev, vm, &ctx_data);
1372	if (r) {
1373		DRM_ERROR("failed to map ctx meta data\n");
1374		goto error_vm;
1375	}
1376
1377	r = amdgpu_mes_create_process(adev, pasid, vm);
1378	if (r) {
1379		DRM_ERROR("failed to create MES process\n");
1380		goto error_vm;
1381	}
1382
1383	for (i = 0; i < ARRAY_SIZE(queue_types); i++) {
1384		/* On GFX v10.3, fw hasn't supported to map sdma queue. */
1385		if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0) &&
1386		    adev->ip_versions[GC_HWIP][0] < IP_VERSION(11, 0, 0) &&
1387		    queue_types[i][0] == AMDGPU_RING_TYPE_SDMA)
1388			continue;
1389
1390		r = amdgpu_mes_test_create_gang_and_queues(adev, pasid,
1391							   &gang_ids[i],
1392							   queue_types[i][0],
1393							   queue_types[i][1],
1394							   &added_rings[k],
1395							   &ctx_data);
1396		if (r)
1397			goto error_queues;
1398
1399		k += queue_types[i][1];
1400	}
1401
1402	/* start ring test and ib test for MES queues */
1403	amdgpu_mes_test_queues(added_rings);
1404
1405error_queues:
1406	/* remove all queues */
1407	for (i = 0; i < ARRAY_SIZE(added_rings); i++) {
1408		if (!added_rings[i])
1409			continue;
1410		amdgpu_mes_remove_ring(adev, added_rings[i]);
1411	}
1412
1413	for (i = 0; i < ARRAY_SIZE(gang_ids); i++) {
1414		if (!gang_ids[i])
1415			continue;
1416		amdgpu_mes_remove_gang(adev, gang_ids[i]);
1417	}
1418
1419	amdgpu_mes_destroy_process(adev, pasid);
1420
1421error_vm:
1422	amdgpu_mes_ctx_unmap_meta_data(adev, &ctx_data);
1423
1424error_fini:
1425	amdgpu_vm_fini(adev, vm);
1426
1427error_pasid:
1428	if (pasid)
1429		amdgpu_pasid_free(pasid);
1430
1431	amdgpu_mes_ctx_free_meta_data(&ctx_data);
1432	kfree(vm);
1433	return 0;
1434}
1435
1436int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
1437{
1438	const struct mes_firmware_header_v1_0 *mes_hdr;
1439	struct amdgpu_firmware_info *info;
1440	char ucode_prefix[30];
1441	char fw_name[40];
1442	bool need_retry = false;
1443	int r;
1444
1445	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
1446				       sizeof(ucode_prefix));
1447	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
1448		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1449			 ucode_prefix,
1450			 pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
1451		need_retry = true;
1452	} else {
1453		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
1454			 ucode_prefix,
1455			 pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
1456	}
1457
1458	r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
1459	if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
1460		snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
1461			 ucode_prefix);
1462		DRM_INFO("try to fall back to %s\n", fw_name);
1463		r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
1464					 fw_name);
1465	}
1466
1467	if (r)
1468		goto out;
1469
1470	mes_hdr = (const struct mes_firmware_header_v1_0 *)
1471		adev->mes.fw[pipe]->data;
1472	adev->mes.uc_start_addr[pipe] =
1473		le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
1474		((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
1475	adev->mes.data_start_addr[pipe] =
1476		le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
1477		((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
1478
1479	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1480		int ucode, ucode_data;
1481
1482		if (pipe == AMDGPU_MES_SCHED_PIPE) {
1483			ucode = AMDGPU_UCODE_ID_CP_MES;
1484			ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
1485		} else {
1486			ucode = AMDGPU_UCODE_ID_CP_MES1;
1487			ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
1488		}
1489
1490		info = &adev->firmware.ucode[ucode];
1491		info->ucode_id = ucode;
1492		info->fw = adev->mes.fw[pipe];
1493		adev->firmware.fw_size +=
1494			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
1495			      PAGE_SIZE);
1496
1497		info = &adev->firmware.ucode[ucode_data];
1498		info->ucode_id = ucode_data;
1499		info->fw = adev->mes.fw[pipe];
1500		adev->firmware.fw_size +=
1501			ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
1502			      PAGE_SIZE);
1503	}
1504
1505	return 0;
1506out:
1507	amdgpu_ucode_release(&adev->mes.fw[pipe]);
1508	return r;
1509}
1510