1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu_ids.h"
24
25#include <linux/idr.h>
26#include <linux/dma-fence-array.h>
27
28
29#include "amdgpu.h"
30#include "amdgpu_trace.h"
31
32/*
33 * PASID manager
34 *
35 * PASIDs are global address space identifiers that can be shared
36 * between the GPU, an IOMMU and the driver. VMs on different devices
37 * may use the same PASID if they share the same address
38 * space. Therefore PASIDs are allocated using a global IDA. VMs are
39 * looked up from the PASID per amdgpu_device.
40 */
41static DEFINE_IDA(amdgpu_pasid_ida);
42
43/* Helper to free pasid from a fence callback */
44struct amdgpu_pasid_cb {
45	struct dma_fence_cb cb;
46	u32 pasid;
47};
48
49/**
50 * amdgpu_pasid_alloc - Allocate a PASID
51 * @bits: Maximum width of the PASID in bits, must be at least 1
52 *
53 * Allocates a PASID of the given width while keeping smaller PASIDs
54 * available if possible.
55 *
56 * Returns a positive integer on success. Returns %-EINVAL if bits==0.
57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
58 * memory allocation failure.
59 */
60int amdgpu_pasid_alloc(unsigned int bits)
61{
62	int pasid = -EINVAL;
63
64	for (bits = min(bits, 31U); bits > 0; bits--) {
65		pasid = ida_simple_get(&amdgpu_pasid_ida,
66				       1U << (bits - 1), 1U << bits,
67				       GFP_KERNEL);
68		if (pasid != -ENOSPC)
69			break;
70	}
71
72	if (pasid >= 0)
73		trace_amdgpu_pasid_allocated(pasid);
74
75	return pasid;
76}
77
78/**
79 * amdgpu_pasid_free - Free a PASID
80 * @pasid: PASID to free
81 */
82void amdgpu_pasid_free(u32 pasid)
83{
84	trace_amdgpu_pasid_freed(pasid);
85	ida_simple_remove(&amdgpu_pasid_ida, pasid);
86}
87
88static void amdgpu_pasid_free_cb(struct dma_fence *fence,
89				 struct dma_fence_cb *_cb)
90{
91	struct amdgpu_pasid_cb *cb =
92		container_of(_cb, struct amdgpu_pasid_cb, cb);
93
94	amdgpu_pasid_free(cb->pasid);
95	dma_fence_put(fence);
96	kfree(cb);
97}
98
99/**
100 * amdgpu_pasid_free_delayed - free pasid when fences signal
101 *
102 * @resv: reservation object with the fences to wait for
103 * @pasid: pasid to free
104 *
105 * Free the pasid only after all the fences in resv are signaled.
106 */
107void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108			       u32 pasid)
109{
110	struct amdgpu_pasid_cb *cb;
111	struct dma_fence *fence;
112	int r;
113
114	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
115	if (r)
116		goto fallback;
117
118	if (!fence) {
119		amdgpu_pasid_free(pasid);
120		return;
121	}
122
123	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
124	if (!cb) {
125		/* Last resort when we are OOM */
126		dma_fence_wait(fence, false);
127		dma_fence_put(fence);
128		amdgpu_pasid_free(pasid);
129	} else {
130		cb->pasid = pasid;
131		if (dma_fence_add_callback(fence, &cb->cb,
132					   amdgpu_pasid_free_cb))
133			amdgpu_pasid_free_cb(fence, &cb->cb);
134	}
135
136	return;
137
138fallback:
139	/* Not enough memory for the delayed delete, as last resort
140	 * block for all the fences to complete.
141	 */
142	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
143			      false, MAX_SCHEDULE_TIMEOUT);
144	amdgpu_pasid_free(pasid);
145}
146
147/*
148 * VMID manager
149 *
150 * VMIDs are a per VMHUB identifier for page tables handling.
151 */
152
153/**
154 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
155 *
156 * @adev: amdgpu_device pointer
157 * @id: VMID structure
158 *
159 * Check if GPU reset occured since last use of the VMID.
160 */
161bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
162			       struct amdgpu_vmid *id)
163{
164	return id->current_gpu_reset_count !=
165		atomic_read(&adev->gpu_reset_counter);
166}
167
168/* Check if we need to switch to another set of resources */
169static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
170					  struct amdgpu_job *job)
171{
172	return id->gds_base != job->gds_base ||
173		id->gds_size != job->gds_size ||
174		id->gws_base != job->gws_base ||
175		id->gws_size != job->gws_size ||
176		id->oa_base != job->oa_base ||
177		id->oa_size != job->oa_size;
178}
179
180/* Check if the id is compatible with the job */
181static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
182				   struct amdgpu_job *job)
183{
184	return  id->pd_gpu_addr == job->vm_pd_addr &&
185		!amdgpu_vmid_gds_switch_needed(id, job);
186}
187
188/**
189 * amdgpu_vmid_grab_idle - grab idle VMID
190 *
191 * @vm: vm to allocate id for
192 * @ring: ring we want to submit job to
193 * @idle: resulting idle VMID
194 * @fence: fence to wait for if no id could be grabbed
195 *
196 * Try to find an idle VMID, if none is idle add a fence to wait to the sync
197 * object. Returns -ENOMEM when we are out of memory.
198 */
199static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
200				 struct amdgpu_ring *ring,
201				 struct amdgpu_vmid **idle,
202				 struct dma_fence **fence)
203{
204	struct amdgpu_device *adev = ring->adev;
205	unsigned vmhub = ring->vm_hub;
206	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
207	struct dma_fence **fences;
208	unsigned i;
209
210	if (!dma_fence_is_signaled(ring->vmid_wait)) {
211		*fence = dma_fence_get(ring->vmid_wait);
212		return 0;
213	}
214
215	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
216	if (!fences)
217		return -ENOMEM;
218
219	/* Check if we have an idle VMID */
220	i = 0;
221	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
222		/* Don't use per engine and per process VMID at the same time */
223		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
224			NULL : ring;
225
226		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
227		if (!fences[i])
228			break;
229		++i;
230	}
231
232	/* If we can't find a idle VMID to use, wait till one becomes available */
233	if (&(*idle)->list == &id_mgr->ids_lru) {
234		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
235		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
236		struct dma_fence_array *array;
237		unsigned j;
238
239		*idle = NULL;
240		for (j = 0; j < i; ++j)
241			dma_fence_get(fences[j]);
242
243		array = dma_fence_array_create(i, fences, fence_context,
244					       seqno, true);
245		if (!array) {
246			for (j = 0; j < i; ++j)
247				dma_fence_put(fences[j]);
248			kfree(fences);
249			return -ENOMEM;
250		}
251
252		*fence = dma_fence_get(&array->base);
253		dma_fence_put(ring->vmid_wait);
254		ring->vmid_wait = &array->base;
255		return 0;
256	}
257	kfree(fences);
258
259	return 0;
260}
261
262/**
263 * amdgpu_vmid_grab_reserved - try to assign reserved VMID
264 *
265 * @vm: vm to allocate id for
266 * @ring: ring we want to submit job to
267 * @job: job who wants to use the VMID
268 * @id: resulting VMID
269 * @fence: fence to wait for if no id could be grabbed
270 *
271 * Try to assign a reserved VMID.
272 */
273static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
274				     struct amdgpu_ring *ring,
275				     struct amdgpu_job *job,
276				     struct amdgpu_vmid **id,
277				     struct dma_fence **fence)
278{
279	struct amdgpu_device *adev = ring->adev;
280	unsigned vmhub = ring->vm_hub;
281	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
282	uint64_t fence_context = adev->fence_context + ring->idx;
283	bool needs_flush = vm->use_cpu_for_update;
284	uint64_t updates = amdgpu_vm_tlb_seq(vm);
285	int r;
286
287	*id = id_mgr->reserved;
288	if ((*id)->owner != vm->immediate.fence_context ||
289	    !amdgpu_vmid_compatible(*id, job) ||
290	    (*id)->flushed_updates < updates ||
291	    !(*id)->last_flush ||
292	    ((*id)->last_flush->context != fence_context &&
293	     !dma_fence_is_signaled((*id)->last_flush))) {
294		struct dma_fence *tmp;
295
296		/* Don't use per engine and per process VMID at the same time */
297		if (adev->vm_manager.concurrent_flush)
298			ring = NULL;
299
300		/* to prevent one context starved by another context */
301		(*id)->pd_gpu_addr = 0;
302		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
303		if (tmp) {
304			*id = NULL;
305			*fence = dma_fence_get(tmp);
306			return 0;
307		}
308		needs_flush = true;
309	}
310
311	/* Good we can use this VMID. Remember this submission as
312	* user of the VMID.
313	*/
314	r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
315	if (r)
316		return r;
317
318	job->vm_needs_flush = needs_flush;
319	job->spm_update_needed = true;
320	return 0;
321}
322
323/**
324 * amdgpu_vmid_grab_used - try to reuse a VMID
325 *
326 * @vm: vm to allocate id for
327 * @ring: ring we want to submit job to
328 * @job: job who wants to use the VMID
329 * @id: resulting VMID
330 * @fence: fence to wait for if no id could be grabbed
331 *
332 * Try to reuse a VMID for this submission.
333 */
334static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
335				 struct amdgpu_ring *ring,
336				 struct amdgpu_job *job,
337				 struct amdgpu_vmid **id,
338				 struct dma_fence **fence)
339{
340	struct amdgpu_device *adev = ring->adev;
341	unsigned vmhub = ring->vm_hub;
342	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
343	uint64_t fence_context = adev->fence_context + ring->idx;
344	uint64_t updates = amdgpu_vm_tlb_seq(vm);
345	int r;
346
347	job->vm_needs_flush = vm->use_cpu_for_update;
348
349	/* Check if we can use a VMID already assigned to this VM */
350	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
351		bool needs_flush = vm->use_cpu_for_update;
352
353		/* Check all the prerequisites to using this VMID */
354		if ((*id)->owner != vm->immediate.fence_context)
355			continue;
356
357		if (!amdgpu_vmid_compatible(*id, job))
358			continue;
359
360		if (!(*id)->last_flush ||
361		    ((*id)->last_flush->context != fence_context &&
362		     !dma_fence_is_signaled((*id)->last_flush)))
363			needs_flush = true;
364
365		if ((*id)->flushed_updates < updates)
366			needs_flush = true;
367
368		if (needs_flush && !adev->vm_manager.concurrent_flush)
369			continue;
370
371		/* Good, we can use this VMID. Remember this submission as
372		 * user of the VMID.
373		 */
374		r = amdgpu_sync_fence(&(*id)->active,
375				      &job->base.s_fence->finished);
376		if (r)
377			return r;
378
379		job->vm_needs_flush |= needs_flush;
380		return 0;
381	}
382
383	*id = NULL;
384	return 0;
385}
386
387/**
388 * amdgpu_vmid_grab - allocate the next free VMID
389 *
390 * @vm: vm to allocate id for
391 * @ring: ring we want to submit job to
392 * @job: job who wants to use the VMID
393 * @fence: fence to wait for if no id could be grabbed
394 *
395 * Allocate an id for the vm, adding fences to the sync obj as necessary.
396 */
397int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
398		     struct amdgpu_job *job, struct dma_fence **fence)
399{
400	struct amdgpu_device *adev = ring->adev;
401	unsigned vmhub = ring->vm_hub;
402	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
403	struct amdgpu_vmid *idle = NULL;
404	struct amdgpu_vmid *id = NULL;
405	int r = 0;
406
407	mutex_lock(&id_mgr->lock);
408	r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
409	if (r || !idle)
410		goto error;
411
412	if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
413		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
414		if (r || !id)
415			goto error;
416	} else {
417		r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
418		if (r)
419			goto error;
420
421		if (!id) {
422			/* Still no ID to use? Then use the idle one found earlier */
423			id = idle;
424
425			/* Remember this submission as user of the VMID */
426			r = amdgpu_sync_fence(&id->active,
427					      &job->base.s_fence->finished);
428			if (r)
429				goto error;
430
431			job->vm_needs_flush = true;
432		}
433
434		list_move_tail(&id->list, &id_mgr->ids_lru);
435	}
436
437	job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
438	if (job->vm_needs_flush) {
439		id->flushed_updates = amdgpu_vm_tlb_seq(vm);
440		dma_fence_put(id->last_flush);
441		id->last_flush = NULL;
442	}
443	job->vmid = id - id_mgr->ids;
444	job->pasid = vm->pasid;
445
446	id->gds_base = job->gds_base;
447	id->gds_size = job->gds_size;
448	id->gws_base = job->gws_base;
449	id->gws_size = job->gws_size;
450	id->oa_base = job->oa_base;
451	id->oa_size = job->oa_size;
452	id->pd_gpu_addr = job->vm_pd_addr;
453	id->owner = vm->immediate.fence_context;
454
455	trace_amdgpu_vm_grab_id(vm, ring, job);
456
457error:
458	mutex_unlock(&id_mgr->lock);
459	return r;
460}
461
462int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
463			       unsigned vmhub)
464{
465	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
466
467	mutex_lock(&id_mgr->lock);
468
469	++id_mgr->reserved_use_count;
470	if (!id_mgr->reserved) {
471		struct amdgpu_vmid *id;
472
473		id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
474				      list);
475		/* Remove from normal round robin handling */
476		list_del_init(&id->list);
477		id_mgr->reserved = id;
478	}
479
480	mutex_unlock(&id_mgr->lock);
481	return 0;
482}
483
484void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
485			       unsigned vmhub)
486{
487	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
488
489	mutex_lock(&id_mgr->lock);
490	if (!--id_mgr->reserved_use_count) {
491		/* give the reserved ID back to normal round robin */
492		list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
493		id_mgr->reserved = NULL;
494	}
495
496	mutex_unlock(&id_mgr->lock);
497}
498
499/**
500 * amdgpu_vmid_reset - reset VMID to zero
501 *
502 * @adev: amdgpu device structure
503 * @vmhub: vmhub type
504 * @vmid: vmid number to use
505 *
506 * Reset saved GDW, GWS and OA to force switch on next flush.
507 */
508void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
509		       unsigned vmid)
510{
511	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
512	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
513
514	mutex_lock(&id_mgr->lock);
515	id->owner = 0;
516	id->gds_base = 0;
517	id->gds_size = 0;
518	id->gws_base = 0;
519	id->gws_size = 0;
520	id->oa_base = 0;
521	id->oa_size = 0;
522	mutex_unlock(&id_mgr->lock);
523}
524
525/**
526 * amdgpu_vmid_reset_all - reset VMID to zero
527 *
528 * @adev: amdgpu device structure
529 *
530 * Reset VMID to force flush on next use
531 */
532void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
533{
534	unsigned i, j;
535
536	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
537		struct amdgpu_vmid_mgr *id_mgr =
538			&adev->vm_manager.id_mgr[i];
539
540		for (j = 1; j < id_mgr->num_ids; ++j)
541			amdgpu_vmid_reset(adev, i, j);
542	}
543}
544
545/**
546 * amdgpu_vmid_mgr_init - init the VMID manager
547 *
548 * @adev: amdgpu_device pointer
549 *
550 * Initialize the VM manager structures
551 */
552void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
553{
554	unsigned i, j;
555
556	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
557		struct amdgpu_vmid_mgr *id_mgr =
558			&adev->vm_manager.id_mgr[i];
559
560		mutex_init(&id_mgr->lock);
561		INIT_LIST_HEAD(&id_mgr->ids_lru);
562		id_mgr->reserved_use_count = 0;
563
564		/* manage only VMIDs not used by KFD */
565		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
566
567		/* skip over VMID 0, since it is the system VM */
568		for (j = 1; j < id_mgr->num_ids; ++j) {
569			amdgpu_vmid_reset(adev, i, j);
570			amdgpu_sync_create(&id_mgr->ids[j].active);
571			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
572		}
573	}
574	/* alloc a default reserved vmid to enforce isolation */
575	if (enforce_isolation)
576		amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
577
578}
579
580/**
581 * amdgpu_vmid_mgr_fini - cleanup VM manager
582 *
583 * @adev: amdgpu_device pointer
584 *
585 * Cleanup the VM manager and free resources.
586 */
587void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
588{
589	unsigned i, j;
590
591	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
592		struct amdgpu_vmid_mgr *id_mgr =
593			&adev->vm_manager.id_mgr[i];
594
595		mutex_destroy(&id_mgr->lock);
596		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
597			struct amdgpu_vmid *id = &id_mgr->ids[j];
598
599			amdgpu_sync_free(&id->active);
600			dma_fence_put(id->last_flush);
601			dma_fence_put(id->pasid_mapping);
602		}
603	}
604}
605