162306a36Sopenharmony_ci/*
262306a36Sopenharmony_ci * Copyright 2017 Advanced Micro Devices, Inc.
362306a36Sopenharmony_ci *
462306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
562306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
662306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation
762306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
862306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
962306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
1062306a36Sopenharmony_ci *
1162306a36Sopenharmony_ci * The above copyright notice and this permission notice shall be included in
1262306a36Sopenharmony_ci * all copies or substantial portions of the Software.
1362306a36Sopenharmony_ci *
1462306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1562306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1662306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1762306a36Sopenharmony_ci * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
1862306a36Sopenharmony_ci * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
1962306a36Sopenharmony_ci * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2062306a36Sopenharmony_ci * OTHER DEALINGS IN THE SOFTWARE.
2162306a36Sopenharmony_ci *
2262306a36Sopenharmony_ci */
2362306a36Sopenharmony_ci#include "amdgpu_ids.h"
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci#include <linux/idr.h>
2662306a36Sopenharmony_ci#include <linux/dma-fence-array.h>
2762306a36Sopenharmony_ci
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ci#include "amdgpu.h"
3062306a36Sopenharmony_ci#include "amdgpu_trace.h"
3162306a36Sopenharmony_ci
3262306a36Sopenharmony_ci/*
3362306a36Sopenharmony_ci * PASID manager
3462306a36Sopenharmony_ci *
3562306a36Sopenharmony_ci * PASIDs are global address space identifiers that can be shared
3662306a36Sopenharmony_ci * between the GPU, an IOMMU and the driver. VMs on different devices
3762306a36Sopenharmony_ci * may use the same PASID if they share the same address
3862306a36Sopenharmony_ci * space. Therefore PASIDs are allocated using a global IDA. VMs are
3962306a36Sopenharmony_ci * looked up from the PASID per amdgpu_device.
4062306a36Sopenharmony_ci */
4162306a36Sopenharmony_cistatic DEFINE_IDA(amdgpu_pasid_ida);
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_ci/* Helper to free pasid from a fence callback */
4462306a36Sopenharmony_cistruct amdgpu_pasid_cb {
4562306a36Sopenharmony_ci	struct dma_fence_cb cb;
4662306a36Sopenharmony_ci	u32 pasid;
4762306a36Sopenharmony_ci};
4862306a36Sopenharmony_ci
4962306a36Sopenharmony_ci/**
5062306a36Sopenharmony_ci * amdgpu_pasid_alloc - Allocate a PASID
5162306a36Sopenharmony_ci * @bits: Maximum width of the PASID in bits, must be at least 1
5262306a36Sopenharmony_ci *
5362306a36Sopenharmony_ci * Allocates a PASID of the given width while keeping smaller PASIDs
5462306a36Sopenharmony_ci * available if possible.
5562306a36Sopenharmony_ci *
5662306a36Sopenharmony_ci * Returns a positive integer on success. Returns %-EINVAL if bits==0.
5762306a36Sopenharmony_ci * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
5862306a36Sopenharmony_ci * memory allocation failure.
5962306a36Sopenharmony_ci */
6062306a36Sopenharmony_ciint amdgpu_pasid_alloc(unsigned int bits)
6162306a36Sopenharmony_ci{
6262306a36Sopenharmony_ci	int pasid = -EINVAL;
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci	for (bits = min(bits, 31U); bits > 0; bits--) {
6562306a36Sopenharmony_ci		pasid = ida_simple_get(&amdgpu_pasid_ida,
6662306a36Sopenharmony_ci				       1U << (bits - 1), 1U << bits,
6762306a36Sopenharmony_ci				       GFP_KERNEL);
6862306a36Sopenharmony_ci		if (pasid != -ENOSPC)
6962306a36Sopenharmony_ci			break;
7062306a36Sopenharmony_ci	}
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	if (pasid >= 0)
7362306a36Sopenharmony_ci		trace_amdgpu_pasid_allocated(pasid);
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_ci	return pasid;
7662306a36Sopenharmony_ci}
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci/**
7962306a36Sopenharmony_ci * amdgpu_pasid_free - Free a PASID
8062306a36Sopenharmony_ci * @pasid: PASID to free
8162306a36Sopenharmony_ci */
8262306a36Sopenharmony_civoid amdgpu_pasid_free(u32 pasid)
8362306a36Sopenharmony_ci{
8462306a36Sopenharmony_ci	trace_amdgpu_pasid_freed(pasid);
8562306a36Sopenharmony_ci	ida_simple_remove(&amdgpu_pasid_ida, pasid);
8662306a36Sopenharmony_ci}
8762306a36Sopenharmony_ci
8862306a36Sopenharmony_cistatic void amdgpu_pasid_free_cb(struct dma_fence *fence,
8962306a36Sopenharmony_ci				 struct dma_fence_cb *_cb)
9062306a36Sopenharmony_ci{
9162306a36Sopenharmony_ci	struct amdgpu_pasid_cb *cb =
9262306a36Sopenharmony_ci		container_of(_cb, struct amdgpu_pasid_cb, cb);
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci	amdgpu_pasid_free(cb->pasid);
9562306a36Sopenharmony_ci	dma_fence_put(fence);
9662306a36Sopenharmony_ci	kfree(cb);
9762306a36Sopenharmony_ci}
9862306a36Sopenharmony_ci
9962306a36Sopenharmony_ci/**
10062306a36Sopenharmony_ci * amdgpu_pasid_free_delayed - free pasid when fences signal
10162306a36Sopenharmony_ci *
10262306a36Sopenharmony_ci * @resv: reservation object with the fences to wait for
10362306a36Sopenharmony_ci * @pasid: pasid to free
10462306a36Sopenharmony_ci *
10562306a36Sopenharmony_ci * Free the pasid only after all the fences in resv are signaled.
10662306a36Sopenharmony_ci */
10762306a36Sopenharmony_civoid amdgpu_pasid_free_delayed(struct dma_resv *resv,
10862306a36Sopenharmony_ci			       u32 pasid)
10962306a36Sopenharmony_ci{
11062306a36Sopenharmony_ci	struct amdgpu_pasid_cb *cb;
11162306a36Sopenharmony_ci	struct dma_fence *fence;
11262306a36Sopenharmony_ci	int r;
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_ci	r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
11562306a36Sopenharmony_ci	if (r)
11662306a36Sopenharmony_ci		goto fallback;
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci	if (!fence) {
11962306a36Sopenharmony_ci		amdgpu_pasid_free(pasid);
12062306a36Sopenharmony_ci		return;
12162306a36Sopenharmony_ci	}
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
12462306a36Sopenharmony_ci	if (!cb) {
12562306a36Sopenharmony_ci		/* Last resort when we are OOM */
12662306a36Sopenharmony_ci		dma_fence_wait(fence, false);
12762306a36Sopenharmony_ci		dma_fence_put(fence);
12862306a36Sopenharmony_ci		amdgpu_pasid_free(pasid);
12962306a36Sopenharmony_ci	} else {
13062306a36Sopenharmony_ci		cb->pasid = pasid;
13162306a36Sopenharmony_ci		if (dma_fence_add_callback(fence, &cb->cb,
13262306a36Sopenharmony_ci					   amdgpu_pasid_free_cb))
13362306a36Sopenharmony_ci			amdgpu_pasid_free_cb(fence, &cb->cb);
13462306a36Sopenharmony_ci	}
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci	return;
13762306a36Sopenharmony_ci
13862306a36Sopenharmony_cifallback:
13962306a36Sopenharmony_ci	/* Not enough memory for the delayed delete, as last resort
14062306a36Sopenharmony_ci	 * block for all the fences to complete.
14162306a36Sopenharmony_ci	 */
14262306a36Sopenharmony_ci	dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
14362306a36Sopenharmony_ci			      false, MAX_SCHEDULE_TIMEOUT);
14462306a36Sopenharmony_ci	amdgpu_pasid_free(pasid);
14562306a36Sopenharmony_ci}
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci/*
14862306a36Sopenharmony_ci * VMID manager
14962306a36Sopenharmony_ci *
15062306a36Sopenharmony_ci * VMIDs are a per VMHUB identifier for page tables handling.
15162306a36Sopenharmony_ci */
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci/**
15462306a36Sopenharmony_ci * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
15562306a36Sopenharmony_ci *
15662306a36Sopenharmony_ci * @adev: amdgpu_device pointer
15762306a36Sopenharmony_ci * @id: VMID structure
15862306a36Sopenharmony_ci *
15962306a36Sopenharmony_ci * Check if GPU reset occured since last use of the VMID.
16062306a36Sopenharmony_ci */
16162306a36Sopenharmony_cibool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
16262306a36Sopenharmony_ci			       struct amdgpu_vmid *id)
16362306a36Sopenharmony_ci{
16462306a36Sopenharmony_ci	return id->current_gpu_reset_count !=
16562306a36Sopenharmony_ci		atomic_read(&adev->gpu_reset_counter);
16662306a36Sopenharmony_ci}
16762306a36Sopenharmony_ci
16862306a36Sopenharmony_ci/* Check if we need to switch to another set of resources */
16962306a36Sopenharmony_cistatic bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
17062306a36Sopenharmony_ci					  struct amdgpu_job *job)
17162306a36Sopenharmony_ci{
17262306a36Sopenharmony_ci	return id->gds_base != job->gds_base ||
17362306a36Sopenharmony_ci		id->gds_size != job->gds_size ||
17462306a36Sopenharmony_ci		id->gws_base != job->gws_base ||
17562306a36Sopenharmony_ci		id->gws_size != job->gws_size ||
17662306a36Sopenharmony_ci		id->oa_base != job->oa_base ||
17762306a36Sopenharmony_ci		id->oa_size != job->oa_size;
17862306a36Sopenharmony_ci}
17962306a36Sopenharmony_ci
18062306a36Sopenharmony_ci/* Check if the id is compatible with the job */
18162306a36Sopenharmony_cistatic bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
18262306a36Sopenharmony_ci				   struct amdgpu_job *job)
18362306a36Sopenharmony_ci{
18462306a36Sopenharmony_ci	return  id->pd_gpu_addr == job->vm_pd_addr &&
18562306a36Sopenharmony_ci		!amdgpu_vmid_gds_switch_needed(id, job);
18662306a36Sopenharmony_ci}
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_ci/**
18962306a36Sopenharmony_ci * amdgpu_vmid_grab_idle - grab idle VMID
19062306a36Sopenharmony_ci *
19162306a36Sopenharmony_ci * @vm: vm to allocate id for
19262306a36Sopenharmony_ci * @ring: ring we want to submit job to
19362306a36Sopenharmony_ci * @idle: resulting idle VMID
19462306a36Sopenharmony_ci * @fence: fence to wait for if no id could be grabbed
19562306a36Sopenharmony_ci *
19662306a36Sopenharmony_ci * Try to find an idle VMID, if none is idle add a fence to wait to the sync
19762306a36Sopenharmony_ci * object. Returns -ENOMEM when we are out of memory.
19862306a36Sopenharmony_ci */
19962306a36Sopenharmony_cistatic int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
20062306a36Sopenharmony_ci				 struct amdgpu_ring *ring,
20162306a36Sopenharmony_ci				 struct amdgpu_vmid **idle,
20262306a36Sopenharmony_ci				 struct dma_fence **fence)
20362306a36Sopenharmony_ci{
20462306a36Sopenharmony_ci	struct amdgpu_device *adev = ring->adev;
20562306a36Sopenharmony_ci	unsigned vmhub = ring->vm_hub;
20662306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
20762306a36Sopenharmony_ci	struct dma_fence **fences;
20862306a36Sopenharmony_ci	unsigned i;
20962306a36Sopenharmony_ci
21062306a36Sopenharmony_ci	if (!dma_fence_is_signaled(ring->vmid_wait)) {
21162306a36Sopenharmony_ci		*fence = dma_fence_get(ring->vmid_wait);
21262306a36Sopenharmony_ci		return 0;
21362306a36Sopenharmony_ci	}
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_ci	fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
21662306a36Sopenharmony_ci	if (!fences)
21762306a36Sopenharmony_ci		return -ENOMEM;
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_ci	/* Check if we have an idle VMID */
22062306a36Sopenharmony_ci	i = 0;
22162306a36Sopenharmony_ci	list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
22262306a36Sopenharmony_ci		/* Don't use per engine and per process VMID at the same time */
22362306a36Sopenharmony_ci		struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
22462306a36Sopenharmony_ci			NULL : ring;
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci		fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
22762306a36Sopenharmony_ci		if (!fences[i])
22862306a36Sopenharmony_ci			break;
22962306a36Sopenharmony_ci		++i;
23062306a36Sopenharmony_ci	}
23162306a36Sopenharmony_ci
23262306a36Sopenharmony_ci	/* If we can't find a idle VMID to use, wait till one becomes available */
23362306a36Sopenharmony_ci	if (&(*idle)->list == &id_mgr->ids_lru) {
23462306a36Sopenharmony_ci		u64 fence_context = adev->vm_manager.fence_context + ring->idx;
23562306a36Sopenharmony_ci		unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
23662306a36Sopenharmony_ci		struct dma_fence_array *array;
23762306a36Sopenharmony_ci		unsigned j;
23862306a36Sopenharmony_ci
23962306a36Sopenharmony_ci		*idle = NULL;
24062306a36Sopenharmony_ci		for (j = 0; j < i; ++j)
24162306a36Sopenharmony_ci			dma_fence_get(fences[j]);
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_ci		array = dma_fence_array_create(i, fences, fence_context,
24462306a36Sopenharmony_ci					       seqno, true);
24562306a36Sopenharmony_ci		if (!array) {
24662306a36Sopenharmony_ci			for (j = 0; j < i; ++j)
24762306a36Sopenharmony_ci				dma_fence_put(fences[j]);
24862306a36Sopenharmony_ci			kfree(fences);
24962306a36Sopenharmony_ci			return -ENOMEM;
25062306a36Sopenharmony_ci		}
25162306a36Sopenharmony_ci
25262306a36Sopenharmony_ci		*fence = dma_fence_get(&array->base);
25362306a36Sopenharmony_ci		dma_fence_put(ring->vmid_wait);
25462306a36Sopenharmony_ci		ring->vmid_wait = &array->base;
25562306a36Sopenharmony_ci		return 0;
25662306a36Sopenharmony_ci	}
25762306a36Sopenharmony_ci	kfree(fences);
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_ci	return 0;
26062306a36Sopenharmony_ci}
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci/**
26362306a36Sopenharmony_ci * amdgpu_vmid_grab_reserved - try to assign reserved VMID
26462306a36Sopenharmony_ci *
26562306a36Sopenharmony_ci * @vm: vm to allocate id for
26662306a36Sopenharmony_ci * @ring: ring we want to submit job to
26762306a36Sopenharmony_ci * @job: job who wants to use the VMID
26862306a36Sopenharmony_ci * @id: resulting VMID
26962306a36Sopenharmony_ci * @fence: fence to wait for if no id could be grabbed
27062306a36Sopenharmony_ci *
27162306a36Sopenharmony_ci * Try to assign a reserved VMID.
27262306a36Sopenharmony_ci */
27362306a36Sopenharmony_cistatic int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
27462306a36Sopenharmony_ci				     struct amdgpu_ring *ring,
27562306a36Sopenharmony_ci				     struct amdgpu_job *job,
27662306a36Sopenharmony_ci				     struct amdgpu_vmid **id,
27762306a36Sopenharmony_ci				     struct dma_fence **fence)
27862306a36Sopenharmony_ci{
27962306a36Sopenharmony_ci	struct amdgpu_device *adev = ring->adev;
28062306a36Sopenharmony_ci	unsigned vmhub = ring->vm_hub;
28162306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
28262306a36Sopenharmony_ci	uint64_t fence_context = adev->fence_context + ring->idx;
28362306a36Sopenharmony_ci	bool needs_flush = vm->use_cpu_for_update;
28462306a36Sopenharmony_ci	uint64_t updates = amdgpu_vm_tlb_seq(vm);
28562306a36Sopenharmony_ci	int r;
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci	*id = id_mgr->reserved;
28862306a36Sopenharmony_ci	if ((*id)->owner != vm->immediate.fence_context ||
28962306a36Sopenharmony_ci	    !amdgpu_vmid_compatible(*id, job) ||
29062306a36Sopenharmony_ci	    (*id)->flushed_updates < updates ||
29162306a36Sopenharmony_ci	    !(*id)->last_flush ||
29262306a36Sopenharmony_ci	    ((*id)->last_flush->context != fence_context &&
29362306a36Sopenharmony_ci	     !dma_fence_is_signaled((*id)->last_flush))) {
29462306a36Sopenharmony_ci		struct dma_fence *tmp;
29562306a36Sopenharmony_ci
29662306a36Sopenharmony_ci		/* Don't use per engine and per process VMID at the same time */
29762306a36Sopenharmony_ci		if (adev->vm_manager.concurrent_flush)
29862306a36Sopenharmony_ci			ring = NULL;
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_ci		/* to prevent one context starved by another context */
30162306a36Sopenharmony_ci		(*id)->pd_gpu_addr = 0;
30262306a36Sopenharmony_ci		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
30362306a36Sopenharmony_ci		if (tmp) {
30462306a36Sopenharmony_ci			*id = NULL;
30562306a36Sopenharmony_ci			*fence = dma_fence_get(tmp);
30662306a36Sopenharmony_ci			return 0;
30762306a36Sopenharmony_ci		}
30862306a36Sopenharmony_ci		needs_flush = true;
30962306a36Sopenharmony_ci	}
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	/* Good we can use this VMID. Remember this submission as
31262306a36Sopenharmony_ci	* user of the VMID.
31362306a36Sopenharmony_ci	*/
31462306a36Sopenharmony_ci	r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
31562306a36Sopenharmony_ci	if (r)
31662306a36Sopenharmony_ci		return r;
31762306a36Sopenharmony_ci
31862306a36Sopenharmony_ci	job->vm_needs_flush = needs_flush;
31962306a36Sopenharmony_ci	job->spm_update_needed = true;
32062306a36Sopenharmony_ci	return 0;
32162306a36Sopenharmony_ci}
32262306a36Sopenharmony_ci
32362306a36Sopenharmony_ci/**
32462306a36Sopenharmony_ci * amdgpu_vmid_grab_used - try to reuse a VMID
32562306a36Sopenharmony_ci *
32662306a36Sopenharmony_ci * @vm: vm to allocate id for
32762306a36Sopenharmony_ci * @ring: ring we want to submit job to
32862306a36Sopenharmony_ci * @job: job who wants to use the VMID
32962306a36Sopenharmony_ci * @id: resulting VMID
33062306a36Sopenharmony_ci * @fence: fence to wait for if no id could be grabbed
33162306a36Sopenharmony_ci *
33262306a36Sopenharmony_ci * Try to reuse a VMID for this submission.
33362306a36Sopenharmony_ci */
33462306a36Sopenharmony_cistatic int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
33562306a36Sopenharmony_ci				 struct amdgpu_ring *ring,
33662306a36Sopenharmony_ci				 struct amdgpu_job *job,
33762306a36Sopenharmony_ci				 struct amdgpu_vmid **id,
33862306a36Sopenharmony_ci				 struct dma_fence **fence)
33962306a36Sopenharmony_ci{
34062306a36Sopenharmony_ci	struct amdgpu_device *adev = ring->adev;
34162306a36Sopenharmony_ci	unsigned vmhub = ring->vm_hub;
34262306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
34362306a36Sopenharmony_ci	uint64_t fence_context = adev->fence_context + ring->idx;
34462306a36Sopenharmony_ci	uint64_t updates = amdgpu_vm_tlb_seq(vm);
34562306a36Sopenharmony_ci	int r;
34662306a36Sopenharmony_ci
34762306a36Sopenharmony_ci	job->vm_needs_flush = vm->use_cpu_for_update;
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	/* Check if we can use a VMID already assigned to this VM */
35062306a36Sopenharmony_ci	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
35162306a36Sopenharmony_ci		bool needs_flush = vm->use_cpu_for_update;
35262306a36Sopenharmony_ci
35362306a36Sopenharmony_ci		/* Check all the prerequisites to using this VMID */
35462306a36Sopenharmony_ci		if ((*id)->owner != vm->immediate.fence_context)
35562306a36Sopenharmony_ci			continue;
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci		if (!amdgpu_vmid_compatible(*id, job))
35862306a36Sopenharmony_ci			continue;
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_ci		if (!(*id)->last_flush ||
36162306a36Sopenharmony_ci		    ((*id)->last_flush->context != fence_context &&
36262306a36Sopenharmony_ci		     !dma_fence_is_signaled((*id)->last_flush)))
36362306a36Sopenharmony_ci			needs_flush = true;
36462306a36Sopenharmony_ci
36562306a36Sopenharmony_ci		if ((*id)->flushed_updates < updates)
36662306a36Sopenharmony_ci			needs_flush = true;
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci		if (needs_flush && !adev->vm_manager.concurrent_flush)
36962306a36Sopenharmony_ci			continue;
37062306a36Sopenharmony_ci
37162306a36Sopenharmony_ci		/* Good, we can use this VMID. Remember this submission as
37262306a36Sopenharmony_ci		 * user of the VMID.
37362306a36Sopenharmony_ci		 */
37462306a36Sopenharmony_ci		r = amdgpu_sync_fence(&(*id)->active,
37562306a36Sopenharmony_ci				      &job->base.s_fence->finished);
37662306a36Sopenharmony_ci		if (r)
37762306a36Sopenharmony_ci			return r;
37862306a36Sopenharmony_ci
37962306a36Sopenharmony_ci		job->vm_needs_flush |= needs_flush;
38062306a36Sopenharmony_ci		return 0;
38162306a36Sopenharmony_ci	}
38262306a36Sopenharmony_ci
38362306a36Sopenharmony_ci	*id = NULL;
38462306a36Sopenharmony_ci	return 0;
38562306a36Sopenharmony_ci}
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ci/**
38862306a36Sopenharmony_ci * amdgpu_vmid_grab - allocate the next free VMID
38962306a36Sopenharmony_ci *
39062306a36Sopenharmony_ci * @vm: vm to allocate id for
39162306a36Sopenharmony_ci * @ring: ring we want to submit job to
39262306a36Sopenharmony_ci * @job: job who wants to use the VMID
39362306a36Sopenharmony_ci * @fence: fence to wait for if no id could be grabbed
39462306a36Sopenharmony_ci *
39562306a36Sopenharmony_ci * Allocate an id for the vm, adding fences to the sync obj as necessary.
39662306a36Sopenharmony_ci */
39762306a36Sopenharmony_ciint amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
39862306a36Sopenharmony_ci		     struct amdgpu_job *job, struct dma_fence **fence)
39962306a36Sopenharmony_ci{
40062306a36Sopenharmony_ci	struct amdgpu_device *adev = ring->adev;
40162306a36Sopenharmony_ci	unsigned vmhub = ring->vm_hub;
40262306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
40362306a36Sopenharmony_ci	struct amdgpu_vmid *idle = NULL;
40462306a36Sopenharmony_ci	struct amdgpu_vmid *id = NULL;
40562306a36Sopenharmony_ci	int r = 0;
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	mutex_lock(&id_mgr->lock);
40862306a36Sopenharmony_ci	r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
40962306a36Sopenharmony_ci	if (r || !idle)
41062306a36Sopenharmony_ci		goto error;
41162306a36Sopenharmony_ci
41262306a36Sopenharmony_ci	if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
41362306a36Sopenharmony_ci		r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
41462306a36Sopenharmony_ci		if (r || !id)
41562306a36Sopenharmony_ci			goto error;
41662306a36Sopenharmony_ci	} else {
41762306a36Sopenharmony_ci		r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
41862306a36Sopenharmony_ci		if (r)
41962306a36Sopenharmony_ci			goto error;
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci		if (!id) {
42262306a36Sopenharmony_ci			/* Still no ID to use? Then use the idle one found earlier */
42362306a36Sopenharmony_ci			id = idle;
42462306a36Sopenharmony_ci
42562306a36Sopenharmony_ci			/* Remember this submission as user of the VMID */
42662306a36Sopenharmony_ci			r = amdgpu_sync_fence(&id->active,
42762306a36Sopenharmony_ci					      &job->base.s_fence->finished);
42862306a36Sopenharmony_ci			if (r)
42962306a36Sopenharmony_ci				goto error;
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_ci			job->vm_needs_flush = true;
43262306a36Sopenharmony_ci		}
43362306a36Sopenharmony_ci
43462306a36Sopenharmony_ci		list_move_tail(&id->list, &id_mgr->ids_lru);
43562306a36Sopenharmony_ci	}
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
43862306a36Sopenharmony_ci	if (job->vm_needs_flush) {
43962306a36Sopenharmony_ci		id->flushed_updates = amdgpu_vm_tlb_seq(vm);
44062306a36Sopenharmony_ci		dma_fence_put(id->last_flush);
44162306a36Sopenharmony_ci		id->last_flush = NULL;
44262306a36Sopenharmony_ci	}
44362306a36Sopenharmony_ci	job->vmid = id - id_mgr->ids;
44462306a36Sopenharmony_ci	job->pasid = vm->pasid;
44562306a36Sopenharmony_ci
44662306a36Sopenharmony_ci	id->gds_base = job->gds_base;
44762306a36Sopenharmony_ci	id->gds_size = job->gds_size;
44862306a36Sopenharmony_ci	id->gws_base = job->gws_base;
44962306a36Sopenharmony_ci	id->gws_size = job->gws_size;
45062306a36Sopenharmony_ci	id->oa_base = job->oa_base;
45162306a36Sopenharmony_ci	id->oa_size = job->oa_size;
45262306a36Sopenharmony_ci	id->pd_gpu_addr = job->vm_pd_addr;
45362306a36Sopenharmony_ci	id->owner = vm->immediate.fence_context;
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci	trace_amdgpu_vm_grab_id(vm, ring, job);
45662306a36Sopenharmony_ci
45762306a36Sopenharmony_cierror:
45862306a36Sopenharmony_ci	mutex_unlock(&id_mgr->lock);
45962306a36Sopenharmony_ci	return r;
46062306a36Sopenharmony_ci}
46162306a36Sopenharmony_ci
46262306a36Sopenharmony_ciint amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
46362306a36Sopenharmony_ci			       unsigned vmhub)
46462306a36Sopenharmony_ci{
46562306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
46662306a36Sopenharmony_ci
46762306a36Sopenharmony_ci	mutex_lock(&id_mgr->lock);
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci	++id_mgr->reserved_use_count;
47062306a36Sopenharmony_ci	if (!id_mgr->reserved) {
47162306a36Sopenharmony_ci		struct amdgpu_vmid *id;
47262306a36Sopenharmony_ci
47362306a36Sopenharmony_ci		id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
47462306a36Sopenharmony_ci				      list);
47562306a36Sopenharmony_ci		/* Remove from normal round robin handling */
47662306a36Sopenharmony_ci		list_del_init(&id->list);
47762306a36Sopenharmony_ci		id_mgr->reserved = id;
47862306a36Sopenharmony_ci	}
47962306a36Sopenharmony_ci
48062306a36Sopenharmony_ci	mutex_unlock(&id_mgr->lock);
48162306a36Sopenharmony_ci	return 0;
48262306a36Sopenharmony_ci}
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_civoid amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
48562306a36Sopenharmony_ci			       unsigned vmhub)
48662306a36Sopenharmony_ci{
48762306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
48862306a36Sopenharmony_ci
48962306a36Sopenharmony_ci	mutex_lock(&id_mgr->lock);
49062306a36Sopenharmony_ci	if (!--id_mgr->reserved_use_count) {
49162306a36Sopenharmony_ci		/* give the reserved ID back to normal round robin */
49262306a36Sopenharmony_ci		list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
49362306a36Sopenharmony_ci		id_mgr->reserved = NULL;
49462306a36Sopenharmony_ci	}
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_ci	mutex_unlock(&id_mgr->lock);
49762306a36Sopenharmony_ci}
49862306a36Sopenharmony_ci
49962306a36Sopenharmony_ci/**
50062306a36Sopenharmony_ci * amdgpu_vmid_reset - reset VMID to zero
50162306a36Sopenharmony_ci *
50262306a36Sopenharmony_ci * @adev: amdgpu device structure
50362306a36Sopenharmony_ci * @vmhub: vmhub type
50462306a36Sopenharmony_ci * @vmid: vmid number to use
50562306a36Sopenharmony_ci *
50662306a36Sopenharmony_ci * Reset saved GDW, GWS and OA to force switch on next flush.
50762306a36Sopenharmony_ci */
50862306a36Sopenharmony_civoid amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
50962306a36Sopenharmony_ci		       unsigned vmid)
51062306a36Sopenharmony_ci{
51162306a36Sopenharmony_ci	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
51262306a36Sopenharmony_ci	struct amdgpu_vmid *id = &id_mgr->ids[vmid];
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ci	mutex_lock(&id_mgr->lock);
51562306a36Sopenharmony_ci	id->owner = 0;
51662306a36Sopenharmony_ci	id->gds_base = 0;
51762306a36Sopenharmony_ci	id->gds_size = 0;
51862306a36Sopenharmony_ci	id->gws_base = 0;
51962306a36Sopenharmony_ci	id->gws_size = 0;
52062306a36Sopenharmony_ci	id->oa_base = 0;
52162306a36Sopenharmony_ci	id->oa_size = 0;
52262306a36Sopenharmony_ci	mutex_unlock(&id_mgr->lock);
52362306a36Sopenharmony_ci}
52462306a36Sopenharmony_ci
52562306a36Sopenharmony_ci/**
52662306a36Sopenharmony_ci * amdgpu_vmid_reset_all - reset VMID to zero
52762306a36Sopenharmony_ci *
52862306a36Sopenharmony_ci * @adev: amdgpu device structure
52962306a36Sopenharmony_ci *
53062306a36Sopenharmony_ci * Reset VMID to force flush on next use
53162306a36Sopenharmony_ci */
53262306a36Sopenharmony_civoid amdgpu_vmid_reset_all(struct amdgpu_device *adev)
53362306a36Sopenharmony_ci{
53462306a36Sopenharmony_ci	unsigned i, j;
53562306a36Sopenharmony_ci
53662306a36Sopenharmony_ci	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
53762306a36Sopenharmony_ci		struct amdgpu_vmid_mgr *id_mgr =
53862306a36Sopenharmony_ci			&adev->vm_manager.id_mgr[i];
53962306a36Sopenharmony_ci
54062306a36Sopenharmony_ci		for (j = 1; j < id_mgr->num_ids; ++j)
54162306a36Sopenharmony_ci			amdgpu_vmid_reset(adev, i, j);
54262306a36Sopenharmony_ci	}
54362306a36Sopenharmony_ci}
54462306a36Sopenharmony_ci
54562306a36Sopenharmony_ci/**
54662306a36Sopenharmony_ci * amdgpu_vmid_mgr_init - init the VMID manager
54762306a36Sopenharmony_ci *
54862306a36Sopenharmony_ci * @adev: amdgpu_device pointer
54962306a36Sopenharmony_ci *
55062306a36Sopenharmony_ci * Initialize the VM manager structures
55162306a36Sopenharmony_ci */
55262306a36Sopenharmony_civoid amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
55362306a36Sopenharmony_ci{
55462306a36Sopenharmony_ci	unsigned i, j;
55562306a36Sopenharmony_ci
55662306a36Sopenharmony_ci	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
55762306a36Sopenharmony_ci		struct amdgpu_vmid_mgr *id_mgr =
55862306a36Sopenharmony_ci			&adev->vm_manager.id_mgr[i];
55962306a36Sopenharmony_ci
56062306a36Sopenharmony_ci		mutex_init(&id_mgr->lock);
56162306a36Sopenharmony_ci		INIT_LIST_HEAD(&id_mgr->ids_lru);
56262306a36Sopenharmony_ci		id_mgr->reserved_use_count = 0;
56362306a36Sopenharmony_ci
56462306a36Sopenharmony_ci		/* manage only VMIDs not used by KFD */
56562306a36Sopenharmony_ci		id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
56662306a36Sopenharmony_ci
56762306a36Sopenharmony_ci		/* skip over VMID 0, since it is the system VM */
56862306a36Sopenharmony_ci		for (j = 1; j < id_mgr->num_ids; ++j) {
56962306a36Sopenharmony_ci			amdgpu_vmid_reset(adev, i, j);
57062306a36Sopenharmony_ci			amdgpu_sync_create(&id_mgr->ids[j].active);
57162306a36Sopenharmony_ci			list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
57262306a36Sopenharmony_ci		}
57362306a36Sopenharmony_ci	}
57462306a36Sopenharmony_ci	/* alloc a default reserved vmid to enforce isolation */
57562306a36Sopenharmony_ci	if (enforce_isolation)
57662306a36Sopenharmony_ci		amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
57762306a36Sopenharmony_ci
57862306a36Sopenharmony_ci}
57962306a36Sopenharmony_ci
58062306a36Sopenharmony_ci/**
58162306a36Sopenharmony_ci * amdgpu_vmid_mgr_fini - cleanup VM manager
58262306a36Sopenharmony_ci *
58362306a36Sopenharmony_ci * @adev: amdgpu_device pointer
58462306a36Sopenharmony_ci *
58562306a36Sopenharmony_ci * Cleanup the VM manager and free resources.
58662306a36Sopenharmony_ci */
58762306a36Sopenharmony_civoid amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
58862306a36Sopenharmony_ci{
58962306a36Sopenharmony_ci	unsigned i, j;
59062306a36Sopenharmony_ci
59162306a36Sopenharmony_ci	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
59262306a36Sopenharmony_ci		struct amdgpu_vmid_mgr *id_mgr =
59362306a36Sopenharmony_ci			&adev->vm_manager.id_mgr[i];
59462306a36Sopenharmony_ci
59562306a36Sopenharmony_ci		mutex_destroy(&id_mgr->lock);
59662306a36Sopenharmony_ci		for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
59762306a36Sopenharmony_ci			struct amdgpu_vmid *id = &id_mgr->ids[j];
59862306a36Sopenharmony_ci
59962306a36Sopenharmony_ci			amdgpu_sync_free(&id->active);
60062306a36Sopenharmony_ci			dma_fence_put(id->last_flush);
60162306a36Sopenharmony_ci			dma_fence_put(id->pasid_mapping);
60262306a36Sopenharmony_ci		}
60362306a36Sopenharmony_ci	}
60462306a36Sopenharmony_ci}
605