162306a36Sopenharmony_ci/*
262306a36Sopenharmony_ci * Copyright 2014 Advanced Micro Devices, Inc.
362306a36Sopenharmony_ci * Copyright 2008 Red Hat Inc.
462306a36Sopenharmony_ci * Copyright 2009 Jerome Glisse.
562306a36Sopenharmony_ci *
662306a36Sopenharmony_ci * Permission is hereby granted, free of charge, to any person obtaining a
762306a36Sopenharmony_ci * copy of this software and associated documentation files (the "Software"),
862306a36Sopenharmony_ci * to deal in the Software without restriction, including without limitation
962306a36Sopenharmony_ci * the rights to use, copy, modify, merge, publish, distribute, sublicense,
1062306a36Sopenharmony_ci * and/or sell copies of the Software, and to permit persons to whom the
1162306a36Sopenharmony_ci * Software is furnished to do so, subject to the following conditions:
1262306a36Sopenharmony_ci *
1362306a36Sopenharmony_ci * The above copyright notice and this permission notice shall be included in
1462306a36Sopenharmony_ci * all copies or substantial portions of the Software.
1562306a36Sopenharmony_ci *
1662306a36Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1762306a36Sopenharmony_ci * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1862306a36Sopenharmony_ci * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
1962306a36Sopenharmony_ci * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
2062306a36Sopenharmony_ci * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2162306a36Sopenharmony_ci * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2262306a36Sopenharmony_ci * OTHER DEALINGS IN THE SOFTWARE.
2362306a36Sopenharmony_ci *
2462306a36Sopenharmony_ci */
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci#include <linux/firmware.h>
2762306a36Sopenharmony_ci#include "amdgpu.h"
2862306a36Sopenharmony_ci#include "amdgpu_gfx.h"
2962306a36Sopenharmony_ci#include "amdgpu_rlc.h"
3062306a36Sopenharmony_ci#include "amdgpu_ras.h"
3162306a36Sopenharmony_ci#include "amdgpu_xcp.h"
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_ci/* delay 0.1 second to enable gfx off feature */
3462306a36Sopenharmony_ci#define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_ci#define GFX_OFF_NO_DELAY 0
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_ci/*
3962306a36Sopenharmony_ci * GPU GFX IP block helpers function.
4062306a36Sopenharmony_ci */
4162306a36Sopenharmony_ci
4262306a36Sopenharmony_ciint amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
4362306a36Sopenharmony_ci				int pipe, int queue)
4462306a36Sopenharmony_ci{
4562306a36Sopenharmony_ci	int bit = 0;
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci	bit += mec * adev->gfx.mec.num_pipe_per_mec
4862306a36Sopenharmony_ci		* adev->gfx.mec.num_queue_per_pipe;
4962306a36Sopenharmony_ci	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
5062306a36Sopenharmony_ci	bit += queue;
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci	return bit;
5362306a36Sopenharmony_ci}
5462306a36Sopenharmony_ci
5562306a36Sopenharmony_civoid amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
5662306a36Sopenharmony_ci				 int *mec, int *pipe, int *queue)
5762306a36Sopenharmony_ci{
5862306a36Sopenharmony_ci	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
5962306a36Sopenharmony_ci	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
6062306a36Sopenharmony_ci		% adev->gfx.mec.num_pipe_per_mec;
6162306a36Sopenharmony_ci	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
6262306a36Sopenharmony_ci	       / adev->gfx.mec.num_pipe_per_mec;
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci}
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_cibool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
6762306a36Sopenharmony_ci				     int xcc_id, int mec, int pipe, int queue)
6862306a36Sopenharmony_ci{
6962306a36Sopenharmony_ci	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
7062306a36Sopenharmony_ci			adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
7162306a36Sopenharmony_ci}
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_ciint amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
7462306a36Sopenharmony_ci			       int me, int pipe, int queue)
7562306a36Sopenharmony_ci{
7662306a36Sopenharmony_ci	int bit = 0;
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci	bit += me * adev->gfx.me.num_pipe_per_me
7962306a36Sopenharmony_ci		* adev->gfx.me.num_queue_per_pipe;
8062306a36Sopenharmony_ci	bit += pipe * adev->gfx.me.num_queue_per_pipe;
8162306a36Sopenharmony_ci	bit += queue;
8262306a36Sopenharmony_ci
8362306a36Sopenharmony_ci	return bit;
8462306a36Sopenharmony_ci}
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_civoid amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
8762306a36Sopenharmony_ci				int *me, int *pipe, int *queue)
8862306a36Sopenharmony_ci{
8962306a36Sopenharmony_ci	*queue = bit % adev->gfx.me.num_queue_per_pipe;
9062306a36Sopenharmony_ci	*pipe = (bit / adev->gfx.me.num_queue_per_pipe)
9162306a36Sopenharmony_ci		% adev->gfx.me.num_pipe_per_me;
9262306a36Sopenharmony_ci	*me = (bit / adev->gfx.me.num_queue_per_pipe)
9362306a36Sopenharmony_ci		/ adev->gfx.me.num_pipe_per_me;
9462306a36Sopenharmony_ci}
9562306a36Sopenharmony_ci
9662306a36Sopenharmony_cibool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
9762306a36Sopenharmony_ci				    int me, int pipe, int queue)
9862306a36Sopenharmony_ci{
9962306a36Sopenharmony_ci	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
10062306a36Sopenharmony_ci			adev->gfx.me.queue_bitmap);
10162306a36Sopenharmony_ci}
10262306a36Sopenharmony_ci
10362306a36Sopenharmony_ci/**
10462306a36Sopenharmony_ci * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
10562306a36Sopenharmony_ci *
10662306a36Sopenharmony_ci * @mask: array in which the per-shader array disable masks will be stored
10762306a36Sopenharmony_ci * @max_se: number of SEs
10862306a36Sopenharmony_ci * @max_sh: number of SHs
10962306a36Sopenharmony_ci *
11062306a36Sopenharmony_ci * The bitmask of CUs to be disabled in the shader array determined by se and
11162306a36Sopenharmony_ci * sh is stored in mask[se * max_sh + sh].
11262306a36Sopenharmony_ci */
11362306a36Sopenharmony_civoid amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
11462306a36Sopenharmony_ci{
11562306a36Sopenharmony_ci	unsigned int se, sh, cu;
11662306a36Sopenharmony_ci	const char *p;
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci	memset(mask, 0, sizeof(*mask) * max_se * max_sh);
11962306a36Sopenharmony_ci
12062306a36Sopenharmony_ci	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
12162306a36Sopenharmony_ci		return;
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	p = amdgpu_disable_cu;
12462306a36Sopenharmony_ci	for (;;) {
12562306a36Sopenharmony_ci		char *next;
12662306a36Sopenharmony_ci		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci		if (ret < 3) {
12962306a36Sopenharmony_ci			DRM_ERROR("amdgpu: could not parse disable_cu\n");
13062306a36Sopenharmony_ci			return;
13162306a36Sopenharmony_ci		}
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci		if (se < max_se && sh < max_sh && cu < 16) {
13462306a36Sopenharmony_ci			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
13562306a36Sopenharmony_ci			mask[se * max_sh + sh] |= 1u << cu;
13662306a36Sopenharmony_ci		} else {
13762306a36Sopenharmony_ci			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
13862306a36Sopenharmony_ci				  se, sh, cu);
13962306a36Sopenharmony_ci		}
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci		next = strchr(p, ',');
14262306a36Sopenharmony_ci		if (!next)
14362306a36Sopenharmony_ci			break;
14462306a36Sopenharmony_ci		p = next + 1;
14562306a36Sopenharmony_ci	}
14662306a36Sopenharmony_ci}
14762306a36Sopenharmony_ci
14862306a36Sopenharmony_cistatic bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
14962306a36Sopenharmony_ci{
15062306a36Sopenharmony_ci	return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
15162306a36Sopenharmony_ci}
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_cistatic bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
15462306a36Sopenharmony_ci{
15562306a36Sopenharmony_ci	if (amdgpu_compute_multipipe != -1) {
15662306a36Sopenharmony_ci		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
15762306a36Sopenharmony_ci			 amdgpu_compute_multipipe);
15862306a36Sopenharmony_ci		return amdgpu_compute_multipipe == 1;
15962306a36Sopenharmony_ci	}
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ci	if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
16262306a36Sopenharmony_ci		return true;
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci	/* FIXME: spreading the queues across pipes causes perf regressions
16562306a36Sopenharmony_ci	 * on POLARIS11 compute workloads */
16662306a36Sopenharmony_ci	if (adev->asic_type == CHIP_POLARIS11)
16762306a36Sopenharmony_ci		return false;
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_ci	return adev->gfx.mec.num_mec > 1;
17062306a36Sopenharmony_ci}
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_cibool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
17362306a36Sopenharmony_ci						struct amdgpu_ring *ring)
17462306a36Sopenharmony_ci{
17562306a36Sopenharmony_ci	int queue = ring->queue;
17662306a36Sopenharmony_ci	int pipe = ring->pipe;
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_ci	/* Policy: use pipe1 queue0 as high priority graphics queue if we
17962306a36Sopenharmony_ci	 * have more than one gfx pipe.
18062306a36Sopenharmony_ci	 */
18162306a36Sopenharmony_ci	if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
18262306a36Sopenharmony_ci	    adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
18362306a36Sopenharmony_ci		int me = ring->me;
18462306a36Sopenharmony_ci		int bit;
18562306a36Sopenharmony_ci
18662306a36Sopenharmony_ci		bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
18762306a36Sopenharmony_ci		if (ring == &adev->gfx.gfx_ring[bit])
18862306a36Sopenharmony_ci			return true;
18962306a36Sopenharmony_ci	}
19062306a36Sopenharmony_ci
19162306a36Sopenharmony_ci	return false;
19262306a36Sopenharmony_ci}
19362306a36Sopenharmony_ci
19462306a36Sopenharmony_cibool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
19562306a36Sopenharmony_ci					       struct amdgpu_ring *ring)
19662306a36Sopenharmony_ci{
19762306a36Sopenharmony_ci	/* Policy: use 1st queue as high priority compute queue if we
19862306a36Sopenharmony_ci	 * have more than one compute queue.
19962306a36Sopenharmony_ci	 */
20062306a36Sopenharmony_ci	if (adev->gfx.num_compute_rings > 1 &&
20162306a36Sopenharmony_ci	    ring == &adev->gfx.compute_ring[0])
20262306a36Sopenharmony_ci		return true;
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci	return false;
20562306a36Sopenharmony_ci}
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_civoid amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
20862306a36Sopenharmony_ci{
20962306a36Sopenharmony_ci	int i, j, queue, pipe;
21062306a36Sopenharmony_ci	bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
21162306a36Sopenharmony_ci	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
21262306a36Sopenharmony_ci				     adev->gfx.mec.num_queue_per_pipe,
21362306a36Sopenharmony_ci				     adev->gfx.num_compute_rings);
21462306a36Sopenharmony_ci	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci	if (multipipe_policy) {
21762306a36Sopenharmony_ci		/* policy: make queues evenly cross all pipes on MEC1 only
21862306a36Sopenharmony_ci		 * for multiple xcc, just use the original policy for simplicity */
21962306a36Sopenharmony_ci		for (j = 0; j < num_xcc; j++) {
22062306a36Sopenharmony_ci			for (i = 0; i < max_queues_per_mec; i++) {
22162306a36Sopenharmony_ci				pipe = i % adev->gfx.mec.num_pipe_per_mec;
22262306a36Sopenharmony_ci				queue = (i / adev->gfx.mec.num_pipe_per_mec) %
22362306a36Sopenharmony_ci					 adev->gfx.mec.num_queue_per_pipe;
22462306a36Sopenharmony_ci
22562306a36Sopenharmony_ci				set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
22662306a36Sopenharmony_ci					adev->gfx.mec_bitmap[j].queue_bitmap);
22762306a36Sopenharmony_ci			}
22862306a36Sopenharmony_ci		}
22962306a36Sopenharmony_ci	} else {
23062306a36Sopenharmony_ci		/* policy: amdgpu owns all queues in the given pipe */
23162306a36Sopenharmony_ci		for (j = 0; j < num_xcc; j++) {
23262306a36Sopenharmony_ci			for (i = 0; i < max_queues_per_mec; ++i)
23362306a36Sopenharmony_ci				set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
23462306a36Sopenharmony_ci		}
23562306a36Sopenharmony_ci	}
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	for (j = 0; j < num_xcc; j++) {
23862306a36Sopenharmony_ci		dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
23962306a36Sopenharmony_ci			bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
24062306a36Sopenharmony_ci	}
24162306a36Sopenharmony_ci}
24262306a36Sopenharmony_ci
24362306a36Sopenharmony_civoid amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
24462306a36Sopenharmony_ci{
24562306a36Sopenharmony_ci	int i, queue, pipe;
24662306a36Sopenharmony_ci	bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
24762306a36Sopenharmony_ci	int max_queues_per_me = adev->gfx.me.num_pipe_per_me *
24862306a36Sopenharmony_ci					adev->gfx.me.num_queue_per_pipe;
24962306a36Sopenharmony_ci
25062306a36Sopenharmony_ci	if (multipipe_policy) {
25162306a36Sopenharmony_ci		/* policy: amdgpu owns the first queue per pipe at this stage
25262306a36Sopenharmony_ci		 * will extend to mulitple queues per pipe later */
25362306a36Sopenharmony_ci		for (i = 0; i < max_queues_per_me; i++) {
25462306a36Sopenharmony_ci			pipe = i % adev->gfx.me.num_pipe_per_me;
25562306a36Sopenharmony_ci			queue = (i / adev->gfx.me.num_pipe_per_me) %
25662306a36Sopenharmony_ci				adev->gfx.me.num_queue_per_pipe;
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_ci			set_bit(pipe * adev->gfx.me.num_queue_per_pipe + queue,
25962306a36Sopenharmony_ci				adev->gfx.me.queue_bitmap);
26062306a36Sopenharmony_ci		}
26162306a36Sopenharmony_ci	} else {
26262306a36Sopenharmony_ci		for (i = 0; i < max_queues_per_me; ++i)
26362306a36Sopenharmony_ci			set_bit(i, adev->gfx.me.queue_bitmap);
26462306a36Sopenharmony_ci	}
26562306a36Sopenharmony_ci
26662306a36Sopenharmony_ci	/* update the number of active graphics rings */
26762306a36Sopenharmony_ci	adev->gfx.num_gfx_rings =
26862306a36Sopenharmony_ci		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
26962306a36Sopenharmony_ci}
27062306a36Sopenharmony_ci
27162306a36Sopenharmony_cistatic int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
27262306a36Sopenharmony_ci				  struct amdgpu_ring *ring, int xcc_id)
27362306a36Sopenharmony_ci{
27462306a36Sopenharmony_ci	int queue_bit;
27562306a36Sopenharmony_ci	int mec, pipe, queue;
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_ci	queue_bit = adev->gfx.mec.num_mec
27862306a36Sopenharmony_ci		    * adev->gfx.mec.num_pipe_per_mec
27962306a36Sopenharmony_ci		    * adev->gfx.mec.num_queue_per_pipe;
28062306a36Sopenharmony_ci
28162306a36Sopenharmony_ci	while (--queue_bit >= 0) {
28262306a36Sopenharmony_ci		if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
28362306a36Sopenharmony_ci			continue;
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci		/*
28862306a36Sopenharmony_ci		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
28962306a36Sopenharmony_ci		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
29062306a36Sopenharmony_ci		 * only can be issued on queue 0.
29162306a36Sopenharmony_ci		 */
29262306a36Sopenharmony_ci		if ((mec == 1 && pipe > 1) || queue != 0)
29362306a36Sopenharmony_ci			continue;
29462306a36Sopenharmony_ci
29562306a36Sopenharmony_ci		ring->me = mec + 1;
29662306a36Sopenharmony_ci		ring->pipe = pipe;
29762306a36Sopenharmony_ci		ring->queue = queue;
29862306a36Sopenharmony_ci
29962306a36Sopenharmony_ci		return 0;
30062306a36Sopenharmony_ci	}
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
30362306a36Sopenharmony_ci	return -EINVAL;
30462306a36Sopenharmony_ci}
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_ciint amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
30762306a36Sopenharmony_ci			     struct amdgpu_ring *ring,
30862306a36Sopenharmony_ci			     struct amdgpu_irq_src *irq, int xcc_id)
30962306a36Sopenharmony_ci{
31062306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
31162306a36Sopenharmony_ci	int r = 0;
31262306a36Sopenharmony_ci
31362306a36Sopenharmony_ci	spin_lock_init(&kiq->ring_lock);
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	ring->adev = NULL;
31662306a36Sopenharmony_ci	ring->ring_obj = NULL;
31762306a36Sopenharmony_ci	ring->use_doorbell = true;
31862306a36Sopenharmony_ci	ring->xcc_id = xcc_id;
31962306a36Sopenharmony_ci	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
32062306a36Sopenharmony_ci	ring->doorbell_index =
32162306a36Sopenharmony_ci		(adev->doorbell_index.kiq +
32262306a36Sopenharmony_ci		 xcc_id * adev->doorbell_index.xcc_doorbell_range)
32362306a36Sopenharmony_ci		<< 1;
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci	r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
32662306a36Sopenharmony_ci	if (r)
32762306a36Sopenharmony_ci		return r;
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	ring->eop_gpu_addr = kiq->eop_gpu_addr;
33062306a36Sopenharmony_ci	ring->no_scheduler = true;
33162306a36Sopenharmony_ci	sprintf(ring->name, "kiq_%d.%d.%d.%d", xcc_id, ring->me, ring->pipe, ring->queue);
33262306a36Sopenharmony_ci	r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
33362306a36Sopenharmony_ci			     AMDGPU_RING_PRIO_DEFAULT, NULL);
33462306a36Sopenharmony_ci	if (r)
33562306a36Sopenharmony_ci		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci	return r;
33862306a36Sopenharmony_ci}
33962306a36Sopenharmony_ci
34062306a36Sopenharmony_civoid amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
34162306a36Sopenharmony_ci{
34262306a36Sopenharmony_ci	amdgpu_ring_fini(ring);
34362306a36Sopenharmony_ci}
34462306a36Sopenharmony_ci
34562306a36Sopenharmony_civoid amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
34662306a36Sopenharmony_ci{
34762306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
35062306a36Sopenharmony_ci}
35162306a36Sopenharmony_ci
35262306a36Sopenharmony_ciint amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
35362306a36Sopenharmony_ci			unsigned int hpd_size, int xcc_id)
35462306a36Sopenharmony_ci{
35562306a36Sopenharmony_ci	int r;
35662306a36Sopenharmony_ci	u32 *hpd;
35762306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
36062306a36Sopenharmony_ci				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
36162306a36Sopenharmony_ci				    &kiq->eop_gpu_addr, (void **)&hpd);
36262306a36Sopenharmony_ci	if (r) {
36362306a36Sopenharmony_ci		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
36462306a36Sopenharmony_ci		return r;
36562306a36Sopenharmony_ci	}
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci	memset(hpd, 0, hpd_size);
36862306a36Sopenharmony_ci
36962306a36Sopenharmony_ci	r = amdgpu_bo_reserve(kiq->eop_obj, true);
37062306a36Sopenharmony_ci	if (unlikely(r != 0))
37162306a36Sopenharmony_ci		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
37262306a36Sopenharmony_ci	amdgpu_bo_kunmap(kiq->eop_obj);
37362306a36Sopenharmony_ci	amdgpu_bo_unreserve(kiq->eop_obj);
37462306a36Sopenharmony_ci
37562306a36Sopenharmony_ci	return 0;
37662306a36Sopenharmony_ci}
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci/* create MQD for each compute/gfx queue */
37962306a36Sopenharmony_ciint amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
38062306a36Sopenharmony_ci			   unsigned int mqd_size, int xcc_id)
38162306a36Sopenharmony_ci{
38262306a36Sopenharmony_ci	int r, i, j;
38362306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
38462306a36Sopenharmony_ci	struct amdgpu_ring *ring = &kiq->ring;
38562306a36Sopenharmony_ci	u32 domain = AMDGPU_GEM_DOMAIN_GTT;
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ci#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
38862306a36Sopenharmony_ci	/* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
38962306a36Sopenharmony_ci	if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0))
39062306a36Sopenharmony_ci		domain |= AMDGPU_GEM_DOMAIN_VRAM;
39162306a36Sopenharmony_ci#endif
39262306a36Sopenharmony_ci
39362306a36Sopenharmony_ci	/* create MQD for KIQ */
39462306a36Sopenharmony_ci	if (!adev->enable_mes_kiq && !ring->mqd_obj) {
39562306a36Sopenharmony_ci		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
39662306a36Sopenharmony_ci		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
39762306a36Sopenharmony_ci		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
39862306a36Sopenharmony_ci		 * KIQ MQD no matter SRIOV or Bare-metal
39962306a36Sopenharmony_ci		 */
40062306a36Sopenharmony_ci		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
40162306a36Sopenharmony_ci					    AMDGPU_GEM_DOMAIN_VRAM |
40262306a36Sopenharmony_ci					    AMDGPU_GEM_DOMAIN_GTT,
40362306a36Sopenharmony_ci					    &ring->mqd_obj,
40462306a36Sopenharmony_ci					    &ring->mqd_gpu_addr,
40562306a36Sopenharmony_ci					    &ring->mqd_ptr);
40662306a36Sopenharmony_ci		if (r) {
40762306a36Sopenharmony_ci			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
40862306a36Sopenharmony_ci			return r;
40962306a36Sopenharmony_ci		}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci		/* prepare MQD backup */
41262306a36Sopenharmony_ci		kiq->mqd_backup = kmalloc(mqd_size, GFP_KERNEL);
41362306a36Sopenharmony_ci		if (!kiq->mqd_backup) {
41462306a36Sopenharmony_ci			dev_warn(adev->dev,
41562306a36Sopenharmony_ci				 "no memory to create MQD backup for ring %s\n", ring->name);
41662306a36Sopenharmony_ci			return -ENOMEM;
41762306a36Sopenharmony_ci		}
41862306a36Sopenharmony_ci	}
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
42162306a36Sopenharmony_ci		/* create MQD for each KGQ */
42262306a36Sopenharmony_ci		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
42362306a36Sopenharmony_ci			ring = &adev->gfx.gfx_ring[i];
42462306a36Sopenharmony_ci			if (!ring->mqd_obj) {
42562306a36Sopenharmony_ci				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
42662306a36Sopenharmony_ci							    domain, &ring->mqd_obj,
42762306a36Sopenharmony_ci							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
42862306a36Sopenharmony_ci				if (r) {
42962306a36Sopenharmony_ci					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
43062306a36Sopenharmony_ci					return r;
43162306a36Sopenharmony_ci				}
43262306a36Sopenharmony_ci
43362306a36Sopenharmony_ci				ring->mqd_size = mqd_size;
43462306a36Sopenharmony_ci				/* prepare MQD backup */
43562306a36Sopenharmony_ci				adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
43662306a36Sopenharmony_ci				if (!adev->gfx.me.mqd_backup[i]) {
43762306a36Sopenharmony_ci					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
43862306a36Sopenharmony_ci					return -ENOMEM;
43962306a36Sopenharmony_ci				}
44062306a36Sopenharmony_ci			}
44162306a36Sopenharmony_ci		}
44262306a36Sopenharmony_ci	}
44362306a36Sopenharmony_ci
44462306a36Sopenharmony_ci	/* create MQD for each KCQ */
44562306a36Sopenharmony_ci	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
44662306a36Sopenharmony_ci		j = i + xcc_id * adev->gfx.num_compute_rings;
44762306a36Sopenharmony_ci		ring = &adev->gfx.compute_ring[j];
44862306a36Sopenharmony_ci		if (!ring->mqd_obj) {
44962306a36Sopenharmony_ci			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
45062306a36Sopenharmony_ci						    domain, &ring->mqd_obj,
45162306a36Sopenharmony_ci						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
45262306a36Sopenharmony_ci			if (r) {
45362306a36Sopenharmony_ci				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
45462306a36Sopenharmony_ci				return r;
45562306a36Sopenharmony_ci			}
45662306a36Sopenharmony_ci
45762306a36Sopenharmony_ci			ring->mqd_size = mqd_size;
45862306a36Sopenharmony_ci			/* prepare MQD backup */
45962306a36Sopenharmony_ci			adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
46062306a36Sopenharmony_ci			if (!adev->gfx.mec.mqd_backup[j]) {
46162306a36Sopenharmony_ci				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
46262306a36Sopenharmony_ci				return -ENOMEM;
46362306a36Sopenharmony_ci			}
46462306a36Sopenharmony_ci		}
46562306a36Sopenharmony_ci	}
46662306a36Sopenharmony_ci
46762306a36Sopenharmony_ci	return 0;
46862306a36Sopenharmony_ci}
46962306a36Sopenharmony_ci
47062306a36Sopenharmony_civoid amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
47162306a36Sopenharmony_ci{
47262306a36Sopenharmony_ci	struct amdgpu_ring *ring = NULL;
47362306a36Sopenharmony_ci	int i, j;
47462306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
47562306a36Sopenharmony_ci
47662306a36Sopenharmony_ci	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
47762306a36Sopenharmony_ci		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
47862306a36Sopenharmony_ci			ring = &adev->gfx.gfx_ring[i];
47962306a36Sopenharmony_ci			kfree(adev->gfx.me.mqd_backup[i]);
48062306a36Sopenharmony_ci			amdgpu_bo_free_kernel(&ring->mqd_obj,
48162306a36Sopenharmony_ci					      &ring->mqd_gpu_addr,
48262306a36Sopenharmony_ci					      &ring->mqd_ptr);
48362306a36Sopenharmony_ci		}
48462306a36Sopenharmony_ci	}
48562306a36Sopenharmony_ci
48662306a36Sopenharmony_ci	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
48762306a36Sopenharmony_ci		j = i + xcc_id * adev->gfx.num_compute_rings;
48862306a36Sopenharmony_ci		ring = &adev->gfx.compute_ring[j];
48962306a36Sopenharmony_ci		kfree(adev->gfx.mec.mqd_backup[j]);
49062306a36Sopenharmony_ci		amdgpu_bo_free_kernel(&ring->mqd_obj,
49162306a36Sopenharmony_ci				      &ring->mqd_gpu_addr,
49262306a36Sopenharmony_ci				      &ring->mqd_ptr);
49362306a36Sopenharmony_ci	}
49462306a36Sopenharmony_ci
49562306a36Sopenharmony_ci	ring = &kiq->ring;
49662306a36Sopenharmony_ci	kfree(kiq->mqd_backup);
49762306a36Sopenharmony_ci	amdgpu_bo_free_kernel(&ring->mqd_obj,
49862306a36Sopenharmony_ci			      &ring->mqd_gpu_addr,
49962306a36Sopenharmony_ci			      &ring->mqd_ptr);
50062306a36Sopenharmony_ci}
50162306a36Sopenharmony_ci
50262306a36Sopenharmony_ciint amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
50362306a36Sopenharmony_ci{
50462306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
50562306a36Sopenharmony_ci	struct amdgpu_ring *kiq_ring = &kiq->ring;
50662306a36Sopenharmony_ci	int i, r = 0;
50762306a36Sopenharmony_ci	int j;
50862306a36Sopenharmony_ci
50962306a36Sopenharmony_ci	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
51062306a36Sopenharmony_ci		return -EINVAL;
51162306a36Sopenharmony_ci
51262306a36Sopenharmony_ci	spin_lock(&kiq->ring_lock);
51362306a36Sopenharmony_ci	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
51462306a36Sopenharmony_ci					adev->gfx.num_compute_rings)) {
51562306a36Sopenharmony_ci		spin_unlock(&kiq->ring_lock);
51662306a36Sopenharmony_ci		return -ENOMEM;
51762306a36Sopenharmony_ci	}
51862306a36Sopenharmony_ci
51962306a36Sopenharmony_ci	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
52062306a36Sopenharmony_ci		j = i + xcc_id * adev->gfx.num_compute_rings;
52162306a36Sopenharmony_ci		kiq->pmf->kiq_unmap_queues(kiq_ring,
52262306a36Sopenharmony_ci					   &adev->gfx.compute_ring[j],
52362306a36Sopenharmony_ci					   RESET_QUEUES, 0, 0);
52462306a36Sopenharmony_ci	}
52562306a36Sopenharmony_ci
52662306a36Sopenharmony_ci	if (kiq_ring->sched.ready && !adev->job_hang)
52762306a36Sopenharmony_ci		r = amdgpu_ring_test_helper(kiq_ring);
52862306a36Sopenharmony_ci	spin_unlock(&kiq->ring_lock);
52962306a36Sopenharmony_ci
53062306a36Sopenharmony_ci	return r;
53162306a36Sopenharmony_ci}
53262306a36Sopenharmony_ci
53362306a36Sopenharmony_ciint amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
53462306a36Sopenharmony_ci{
53562306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
53662306a36Sopenharmony_ci	struct amdgpu_ring *kiq_ring = &kiq->ring;
53762306a36Sopenharmony_ci	int i, r = 0;
53862306a36Sopenharmony_ci	int j;
53962306a36Sopenharmony_ci
54062306a36Sopenharmony_ci	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
54162306a36Sopenharmony_ci		return -EINVAL;
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_ci	spin_lock(&kiq->ring_lock);
54462306a36Sopenharmony_ci	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
54562306a36Sopenharmony_ci		if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
54662306a36Sopenharmony_ci						adev->gfx.num_gfx_rings)) {
54762306a36Sopenharmony_ci			spin_unlock(&kiq->ring_lock);
54862306a36Sopenharmony_ci			return -ENOMEM;
54962306a36Sopenharmony_ci		}
55062306a36Sopenharmony_ci
55162306a36Sopenharmony_ci		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
55262306a36Sopenharmony_ci			j = i + xcc_id * adev->gfx.num_gfx_rings;
55362306a36Sopenharmony_ci			kiq->pmf->kiq_unmap_queues(kiq_ring,
55462306a36Sopenharmony_ci						   &adev->gfx.gfx_ring[j],
55562306a36Sopenharmony_ci						   PREEMPT_QUEUES, 0, 0);
55662306a36Sopenharmony_ci		}
55762306a36Sopenharmony_ci	}
55862306a36Sopenharmony_ci
55962306a36Sopenharmony_ci	if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
56062306a36Sopenharmony_ci		r = amdgpu_ring_test_helper(kiq_ring);
56162306a36Sopenharmony_ci	spin_unlock(&kiq->ring_lock);
56262306a36Sopenharmony_ci
56362306a36Sopenharmony_ci	return r;
56462306a36Sopenharmony_ci}
56562306a36Sopenharmony_ci
56662306a36Sopenharmony_ciint amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
56762306a36Sopenharmony_ci					int queue_bit)
56862306a36Sopenharmony_ci{
56962306a36Sopenharmony_ci	int mec, pipe, queue;
57062306a36Sopenharmony_ci	int set_resource_bit = 0;
57162306a36Sopenharmony_ci
57262306a36Sopenharmony_ci	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
57362306a36Sopenharmony_ci
57462306a36Sopenharmony_ci	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
57562306a36Sopenharmony_ci
57662306a36Sopenharmony_ci	return set_resource_bit;
57762306a36Sopenharmony_ci}
57862306a36Sopenharmony_ci
57962306a36Sopenharmony_ciint amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
58062306a36Sopenharmony_ci{
58162306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
58262306a36Sopenharmony_ci	struct amdgpu_ring *kiq_ring = &kiq->ring;
58362306a36Sopenharmony_ci	uint64_t queue_mask = 0;
58462306a36Sopenharmony_ci	int r, i, j;
58562306a36Sopenharmony_ci
58662306a36Sopenharmony_ci	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
58762306a36Sopenharmony_ci		return -EINVAL;
58862306a36Sopenharmony_ci
58962306a36Sopenharmony_ci	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
59062306a36Sopenharmony_ci		if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
59162306a36Sopenharmony_ci			continue;
59262306a36Sopenharmony_ci
59362306a36Sopenharmony_ci		/* This situation may be hit in the future if a new HW
59462306a36Sopenharmony_ci		 * generation exposes more than 64 queues. If so, the
59562306a36Sopenharmony_ci		 * definition of queue_mask needs updating */
59662306a36Sopenharmony_ci		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
59762306a36Sopenharmony_ci			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
59862306a36Sopenharmony_ci			break;
59962306a36Sopenharmony_ci		}
60062306a36Sopenharmony_ci
60162306a36Sopenharmony_ci		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
60262306a36Sopenharmony_ci	}
60362306a36Sopenharmony_ci
60462306a36Sopenharmony_ci	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
60562306a36Sopenharmony_ci							kiq_ring->queue);
60662306a36Sopenharmony_ci	amdgpu_device_flush_hdp(adev, NULL);
60762306a36Sopenharmony_ci
60862306a36Sopenharmony_ci	spin_lock(&kiq->ring_lock);
60962306a36Sopenharmony_ci	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
61062306a36Sopenharmony_ci					adev->gfx.num_compute_rings +
61162306a36Sopenharmony_ci					kiq->pmf->set_resources_size);
61262306a36Sopenharmony_ci	if (r) {
61362306a36Sopenharmony_ci		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
61462306a36Sopenharmony_ci		spin_unlock(&kiq->ring_lock);
61562306a36Sopenharmony_ci		return r;
61662306a36Sopenharmony_ci	}
61762306a36Sopenharmony_ci
61862306a36Sopenharmony_ci	if (adev->enable_mes)
61962306a36Sopenharmony_ci		queue_mask = ~0ULL;
62062306a36Sopenharmony_ci
62162306a36Sopenharmony_ci	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
62262306a36Sopenharmony_ci	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
62362306a36Sopenharmony_ci		j = i + xcc_id * adev->gfx.num_compute_rings;
62462306a36Sopenharmony_ci			kiq->pmf->kiq_map_queues(kiq_ring,
62562306a36Sopenharmony_ci						 &adev->gfx.compute_ring[j]);
62662306a36Sopenharmony_ci	}
62762306a36Sopenharmony_ci
62862306a36Sopenharmony_ci	r = amdgpu_ring_test_helper(kiq_ring);
62962306a36Sopenharmony_ci	spin_unlock(&kiq->ring_lock);
63062306a36Sopenharmony_ci	if (r)
63162306a36Sopenharmony_ci		DRM_ERROR("KCQ enable failed\n");
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_ci	return r;
63462306a36Sopenharmony_ci}
63562306a36Sopenharmony_ci
63662306a36Sopenharmony_ciint amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
63762306a36Sopenharmony_ci{
63862306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
63962306a36Sopenharmony_ci	struct amdgpu_ring *kiq_ring = &kiq->ring;
64062306a36Sopenharmony_ci	int r, i, j;
64162306a36Sopenharmony_ci
64262306a36Sopenharmony_ci	if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
64362306a36Sopenharmony_ci		return -EINVAL;
64462306a36Sopenharmony_ci
64562306a36Sopenharmony_ci	amdgpu_device_flush_hdp(adev, NULL);
64662306a36Sopenharmony_ci
64762306a36Sopenharmony_ci	spin_lock(&kiq->ring_lock);
64862306a36Sopenharmony_ci	/* No need to map kcq on the slave */
64962306a36Sopenharmony_ci	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
65062306a36Sopenharmony_ci		r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
65162306a36Sopenharmony_ci						adev->gfx.num_gfx_rings);
65262306a36Sopenharmony_ci		if (r) {
65362306a36Sopenharmony_ci			DRM_ERROR("Failed to lock KIQ (%d).\n", r);
65462306a36Sopenharmony_ci			spin_unlock(&kiq->ring_lock);
65562306a36Sopenharmony_ci			return r;
65662306a36Sopenharmony_ci		}
65762306a36Sopenharmony_ci
65862306a36Sopenharmony_ci		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
65962306a36Sopenharmony_ci			j = i + xcc_id * adev->gfx.num_gfx_rings;
66062306a36Sopenharmony_ci			kiq->pmf->kiq_map_queues(kiq_ring,
66162306a36Sopenharmony_ci						 &adev->gfx.gfx_ring[j]);
66262306a36Sopenharmony_ci		}
66362306a36Sopenharmony_ci	}
66462306a36Sopenharmony_ci
66562306a36Sopenharmony_ci	r = amdgpu_ring_test_helper(kiq_ring);
66662306a36Sopenharmony_ci	spin_unlock(&kiq->ring_lock);
66762306a36Sopenharmony_ci	if (r)
66862306a36Sopenharmony_ci		DRM_ERROR("KCQ enable failed\n");
66962306a36Sopenharmony_ci
67062306a36Sopenharmony_ci	return r;
67162306a36Sopenharmony_ci}
67262306a36Sopenharmony_ci
67362306a36Sopenharmony_ci/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
67462306a36Sopenharmony_ci *
67562306a36Sopenharmony_ci * @adev: amdgpu_device pointer
67662306a36Sopenharmony_ci * @bool enable true: enable gfx off feature, false: disable gfx off feature
67762306a36Sopenharmony_ci *
67862306a36Sopenharmony_ci * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
67962306a36Sopenharmony_ci * 2. other client can send request to disable gfx off feature, the request should be honored.
68062306a36Sopenharmony_ci * 3. other client can cancel their request of disable gfx off feature
68162306a36Sopenharmony_ci * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
68262306a36Sopenharmony_ci */
68362306a36Sopenharmony_ci
68462306a36Sopenharmony_civoid amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
68562306a36Sopenharmony_ci{
68662306a36Sopenharmony_ci	unsigned long delay = GFX_OFF_DELAY_ENABLE;
68762306a36Sopenharmony_ci
68862306a36Sopenharmony_ci	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
68962306a36Sopenharmony_ci		return;
69062306a36Sopenharmony_ci
69162306a36Sopenharmony_ci	mutex_lock(&adev->gfx.gfx_off_mutex);
69262306a36Sopenharmony_ci
69362306a36Sopenharmony_ci	if (enable) {
69462306a36Sopenharmony_ci		/* If the count is already 0, it means there's an imbalance bug somewhere.
69562306a36Sopenharmony_ci		 * Note that the bug may be in a different caller than the one which triggers the
69662306a36Sopenharmony_ci		 * WARN_ON_ONCE.
69762306a36Sopenharmony_ci		 */
69862306a36Sopenharmony_ci		if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
69962306a36Sopenharmony_ci			goto unlock;
70062306a36Sopenharmony_ci
70162306a36Sopenharmony_ci		adev->gfx.gfx_off_req_count--;
70262306a36Sopenharmony_ci
70362306a36Sopenharmony_ci		if (adev->gfx.gfx_off_req_count == 0 &&
70462306a36Sopenharmony_ci		    !adev->gfx.gfx_off_state) {
70562306a36Sopenharmony_ci			/* If going to s2idle, no need to wait */
70662306a36Sopenharmony_ci			if (adev->in_s0ix) {
70762306a36Sopenharmony_ci				if (!amdgpu_dpm_set_powergating_by_smu(adev,
70862306a36Sopenharmony_ci						AMD_IP_BLOCK_TYPE_GFX, true))
70962306a36Sopenharmony_ci					adev->gfx.gfx_off_state = true;
71062306a36Sopenharmony_ci			} else {
71162306a36Sopenharmony_ci				schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
71262306a36Sopenharmony_ci					      delay);
71362306a36Sopenharmony_ci			}
71462306a36Sopenharmony_ci		}
71562306a36Sopenharmony_ci	} else {
71662306a36Sopenharmony_ci		if (adev->gfx.gfx_off_req_count == 0) {
71762306a36Sopenharmony_ci			cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
71862306a36Sopenharmony_ci
71962306a36Sopenharmony_ci			if (adev->gfx.gfx_off_state &&
72062306a36Sopenharmony_ci			    !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
72162306a36Sopenharmony_ci				adev->gfx.gfx_off_state = false;
72262306a36Sopenharmony_ci
72362306a36Sopenharmony_ci				if (adev->gfx.funcs->init_spm_golden) {
72462306a36Sopenharmony_ci					dev_dbg(adev->dev,
72562306a36Sopenharmony_ci						"GFXOFF is disabled, re-init SPM golden settings\n");
72662306a36Sopenharmony_ci					amdgpu_gfx_init_spm_golden(adev);
72762306a36Sopenharmony_ci				}
72862306a36Sopenharmony_ci			}
72962306a36Sopenharmony_ci		}
73062306a36Sopenharmony_ci
73162306a36Sopenharmony_ci		adev->gfx.gfx_off_req_count++;
73262306a36Sopenharmony_ci	}
73362306a36Sopenharmony_ci
73462306a36Sopenharmony_ciunlock:
73562306a36Sopenharmony_ci	mutex_unlock(&adev->gfx.gfx_off_mutex);
73662306a36Sopenharmony_ci}
73762306a36Sopenharmony_ci
73862306a36Sopenharmony_ciint amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
73962306a36Sopenharmony_ci{
74062306a36Sopenharmony_ci	int r = 0;
74162306a36Sopenharmony_ci
74262306a36Sopenharmony_ci	mutex_lock(&adev->gfx.gfx_off_mutex);
74362306a36Sopenharmony_ci
74462306a36Sopenharmony_ci	r = amdgpu_dpm_set_residency_gfxoff(adev, value);
74562306a36Sopenharmony_ci
74662306a36Sopenharmony_ci	mutex_unlock(&adev->gfx.gfx_off_mutex);
74762306a36Sopenharmony_ci
74862306a36Sopenharmony_ci	return r;
74962306a36Sopenharmony_ci}
75062306a36Sopenharmony_ci
75162306a36Sopenharmony_ciint amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
75262306a36Sopenharmony_ci{
75362306a36Sopenharmony_ci	int r = 0;
75462306a36Sopenharmony_ci
75562306a36Sopenharmony_ci	mutex_lock(&adev->gfx.gfx_off_mutex);
75662306a36Sopenharmony_ci
75762306a36Sopenharmony_ci	r = amdgpu_dpm_get_residency_gfxoff(adev, value);
75862306a36Sopenharmony_ci
75962306a36Sopenharmony_ci	mutex_unlock(&adev->gfx.gfx_off_mutex);
76062306a36Sopenharmony_ci
76162306a36Sopenharmony_ci	return r;
76262306a36Sopenharmony_ci}
76362306a36Sopenharmony_ci
76462306a36Sopenharmony_ciint amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
76562306a36Sopenharmony_ci{
76662306a36Sopenharmony_ci	int r = 0;
76762306a36Sopenharmony_ci
76862306a36Sopenharmony_ci	mutex_lock(&adev->gfx.gfx_off_mutex);
76962306a36Sopenharmony_ci
77062306a36Sopenharmony_ci	r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
77162306a36Sopenharmony_ci
77262306a36Sopenharmony_ci	mutex_unlock(&adev->gfx.gfx_off_mutex);
77362306a36Sopenharmony_ci
77462306a36Sopenharmony_ci	return r;
77562306a36Sopenharmony_ci}
77662306a36Sopenharmony_ci
77762306a36Sopenharmony_ciint amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
77862306a36Sopenharmony_ci{
77962306a36Sopenharmony_ci
78062306a36Sopenharmony_ci	int r = 0;
78162306a36Sopenharmony_ci
78262306a36Sopenharmony_ci	mutex_lock(&adev->gfx.gfx_off_mutex);
78362306a36Sopenharmony_ci
78462306a36Sopenharmony_ci	r = amdgpu_dpm_get_status_gfxoff(adev, value);
78562306a36Sopenharmony_ci
78662306a36Sopenharmony_ci	mutex_unlock(&adev->gfx.gfx_off_mutex);
78762306a36Sopenharmony_ci
78862306a36Sopenharmony_ci	return r;
78962306a36Sopenharmony_ci}
79062306a36Sopenharmony_ci
79162306a36Sopenharmony_ciint amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
79262306a36Sopenharmony_ci{
79362306a36Sopenharmony_ci	int r;
79462306a36Sopenharmony_ci
79562306a36Sopenharmony_ci	if (amdgpu_ras_is_supported(adev, ras_block->block)) {
79662306a36Sopenharmony_ci		if (!amdgpu_persistent_edc_harvesting_supported(adev))
79762306a36Sopenharmony_ci			amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
79862306a36Sopenharmony_ci
79962306a36Sopenharmony_ci		r = amdgpu_ras_block_late_init(adev, ras_block);
80062306a36Sopenharmony_ci		if (r)
80162306a36Sopenharmony_ci			return r;
80262306a36Sopenharmony_ci
80362306a36Sopenharmony_ci		if (adev->gfx.cp_ecc_error_irq.funcs) {
80462306a36Sopenharmony_ci			r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
80562306a36Sopenharmony_ci			if (r)
80662306a36Sopenharmony_ci				goto late_fini;
80762306a36Sopenharmony_ci		}
80862306a36Sopenharmony_ci	} else {
80962306a36Sopenharmony_ci		amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
81062306a36Sopenharmony_ci	}
81162306a36Sopenharmony_ci
81262306a36Sopenharmony_ci	return 0;
81362306a36Sopenharmony_cilate_fini:
81462306a36Sopenharmony_ci	amdgpu_ras_block_late_fini(adev, ras_block);
81562306a36Sopenharmony_ci	return r;
81662306a36Sopenharmony_ci}
81762306a36Sopenharmony_ci
81862306a36Sopenharmony_ciint amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
81962306a36Sopenharmony_ci{
82062306a36Sopenharmony_ci	int err = 0;
82162306a36Sopenharmony_ci	struct amdgpu_gfx_ras *ras = NULL;
82262306a36Sopenharmony_ci
82362306a36Sopenharmony_ci	/* adev->gfx.ras is NULL, which means gfx does not
82462306a36Sopenharmony_ci	 * support ras function, then do nothing here.
82562306a36Sopenharmony_ci	 */
82662306a36Sopenharmony_ci	if (!adev->gfx.ras)
82762306a36Sopenharmony_ci		return 0;
82862306a36Sopenharmony_ci
82962306a36Sopenharmony_ci	ras = adev->gfx.ras;
83062306a36Sopenharmony_ci
83162306a36Sopenharmony_ci	err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
83262306a36Sopenharmony_ci	if (err) {
83362306a36Sopenharmony_ci		dev_err(adev->dev, "Failed to register gfx ras block!\n");
83462306a36Sopenharmony_ci		return err;
83562306a36Sopenharmony_ci	}
83662306a36Sopenharmony_ci
83762306a36Sopenharmony_ci	strcpy(ras->ras_block.ras_comm.name, "gfx");
83862306a36Sopenharmony_ci	ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
83962306a36Sopenharmony_ci	ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
84062306a36Sopenharmony_ci	adev->gfx.ras_if = &ras->ras_block.ras_comm;
84162306a36Sopenharmony_ci
84262306a36Sopenharmony_ci	/* If not define special ras_late_init function, use gfx default ras_late_init */
84362306a36Sopenharmony_ci	if (!ras->ras_block.ras_late_init)
84462306a36Sopenharmony_ci		ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
84562306a36Sopenharmony_ci
84662306a36Sopenharmony_ci	/* If not defined special ras_cb function, use default ras_cb */
84762306a36Sopenharmony_ci	if (!ras->ras_block.ras_cb)
84862306a36Sopenharmony_ci		ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
84962306a36Sopenharmony_ci
85062306a36Sopenharmony_ci	return 0;
85162306a36Sopenharmony_ci}
85262306a36Sopenharmony_ci
85362306a36Sopenharmony_ciint amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
85462306a36Sopenharmony_ci						struct amdgpu_iv_entry *entry)
85562306a36Sopenharmony_ci{
85662306a36Sopenharmony_ci	if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
85762306a36Sopenharmony_ci		return adev->gfx.ras->poison_consumption_handler(adev, entry);
85862306a36Sopenharmony_ci
85962306a36Sopenharmony_ci	return 0;
86062306a36Sopenharmony_ci}
86162306a36Sopenharmony_ci
86262306a36Sopenharmony_ciint amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
86362306a36Sopenharmony_ci		void *err_data,
86462306a36Sopenharmony_ci		struct amdgpu_iv_entry *entry)
86562306a36Sopenharmony_ci{
86662306a36Sopenharmony_ci	/* TODO ue will trigger an interrupt.
86762306a36Sopenharmony_ci	 *
86862306a36Sopenharmony_ci	 * When “Full RAS” is enabled, the per-IP interrupt sources should
86962306a36Sopenharmony_ci	 * be disabled and the driver should only look for the aggregated
87062306a36Sopenharmony_ci	 * interrupt via sync flood
87162306a36Sopenharmony_ci	 */
87262306a36Sopenharmony_ci	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
87362306a36Sopenharmony_ci		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
87462306a36Sopenharmony_ci		if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
87562306a36Sopenharmony_ci		    adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
87662306a36Sopenharmony_ci			adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
87762306a36Sopenharmony_ci		amdgpu_ras_reset_gpu(adev);
87862306a36Sopenharmony_ci	}
87962306a36Sopenharmony_ci	return AMDGPU_RAS_SUCCESS;
88062306a36Sopenharmony_ci}
88162306a36Sopenharmony_ci
88262306a36Sopenharmony_ciint amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
88362306a36Sopenharmony_ci				  struct amdgpu_irq_src *source,
88462306a36Sopenharmony_ci				  struct amdgpu_iv_entry *entry)
88562306a36Sopenharmony_ci{
88662306a36Sopenharmony_ci	struct ras_common_if *ras_if = adev->gfx.ras_if;
88762306a36Sopenharmony_ci	struct ras_dispatch_if ih_data = {
88862306a36Sopenharmony_ci		.entry = entry,
88962306a36Sopenharmony_ci	};
89062306a36Sopenharmony_ci
89162306a36Sopenharmony_ci	if (!ras_if)
89262306a36Sopenharmony_ci		return 0;
89362306a36Sopenharmony_ci
89462306a36Sopenharmony_ci	ih_data.head = *ras_if;
89562306a36Sopenharmony_ci
89662306a36Sopenharmony_ci	DRM_ERROR("CP ECC ERROR IRQ\n");
89762306a36Sopenharmony_ci	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
89862306a36Sopenharmony_ci	return 0;
89962306a36Sopenharmony_ci}
90062306a36Sopenharmony_ci
90162306a36Sopenharmony_civoid amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
90262306a36Sopenharmony_ci		void *ras_error_status,
90362306a36Sopenharmony_ci		void (*func)(struct amdgpu_device *adev, void *ras_error_status,
90462306a36Sopenharmony_ci				int xcc_id))
90562306a36Sopenharmony_ci{
90662306a36Sopenharmony_ci	int i;
90762306a36Sopenharmony_ci	int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
90862306a36Sopenharmony_ci	uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
90962306a36Sopenharmony_ci	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
91062306a36Sopenharmony_ci
91162306a36Sopenharmony_ci	if (err_data) {
91262306a36Sopenharmony_ci		err_data->ue_count = 0;
91362306a36Sopenharmony_ci		err_data->ce_count = 0;
91462306a36Sopenharmony_ci	}
91562306a36Sopenharmony_ci
91662306a36Sopenharmony_ci	for_each_inst(i, xcc_mask)
91762306a36Sopenharmony_ci		func(adev, ras_error_status, i);
91862306a36Sopenharmony_ci}
91962306a36Sopenharmony_ci
92062306a36Sopenharmony_ciuint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
92162306a36Sopenharmony_ci{
92262306a36Sopenharmony_ci	signed long r, cnt = 0;
92362306a36Sopenharmony_ci	unsigned long flags;
92462306a36Sopenharmony_ci	uint32_t seq, reg_val_offs = 0, value = 0;
92562306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
92662306a36Sopenharmony_ci	struct amdgpu_ring *ring = &kiq->ring;
92762306a36Sopenharmony_ci
92862306a36Sopenharmony_ci	if (amdgpu_device_skip_hw_access(adev))
92962306a36Sopenharmony_ci		return 0;
93062306a36Sopenharmony_ci
93162306a36Sopenharmony_ci	if (adev->mes.ring.sched.ready)
93262306a36Sopenharmony_ci		return amdgpu_mes_rreg(adev, reg);
93362306a36Sopenharmony_ci
93462306a36Sopenharmony_ci	BUG_ON(!ring->funcs->emit_rreg);
93562306a36Sopenharmony_ci
93662306a36Sopenharmony_ci	spin_lock_irqsave(&kiq->ring_lock, flags);
93762306a36Sopenharmony_ci	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
93862306a36Sopenharmony_ci		pr_err("critical bug! too many kiq readers\n");
93962306a36Sopenharmony_ci		goto failed_unlock;
94062306a36Sopenharmony_ci	}
94162306a36Sopenharmony_ci	amdgpu_ring_alloc(ring, 32);
94262306a36Sopenharmony_ci	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
94362306a36Sopenharmony_ci	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
94462306a36Sopenharmony_ci	if (r)
94562306a36Sopenharmony_ci		goto failed_undo;
94662306a36Sopenharmony_ci
94762306a36Sopenharmony_ci	amdgpu_ring_commit(ring);
94862306a36Sopenharmony_ci	spin_unlock_irqrestore(&kiq->ring_lock, flags);
94962306a36Sopenharmony_ci
95062306a36Sopenharmony_ci	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
95162306a36Sopenharmony_ci
95262306a36Sopenharmony_ci	/* don't wait anymore for gpu reset case because this way may
95362306a36Sopenharmony_ci	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
95462306a36Sopenharmony_ci	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
95562306a36Sopenharmony_ci	 * never return if we keep waiting in virt_kiq_rreg, which cause
95662306a36Sopenharmony_ci	 * gpu_recover() hang there.
95762306a36Sopenharmony_ci	 *
95862306a36Sopenharmony_ci	 * also don't wait anymore for IRQ context
95962306a36Sopenharmony_ci	 * */
96062306a36Sopenharmony_ci	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
96162306a36Sopenharmony_ci		goto failed_kiq_read;
96262306a36Sopenharmony_ci
96362306a36Sopenharmony_ci	might_sleep();
96462306a36Sopenharmony_ci	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
96562306a36Sopenharmony_ci		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
96662306a36Sopenharmony_ci		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
96762306a36Sopenharmony_ci	}
96862306a36Sopenharmony_ci
96962306a36Sopenharmony_ci	if (cnt > MAX_KIQ_REG_TRY)
97062306a36Sopenharmony_ci		goto failed_kiq_read;
97162306a36Sopenharmony_ci
97262306a36Sopenharmony_ci	mb();
97362306a36Sopenharmony_ci	value = adev->wb.wb[reg_val_offs];
97462306a36Sopenharmony_ci	amdgpu_device_wb_free(adev, reg_val_offs);
97562306a36Sopenharmony_ci	return value;
97662306a36Sopenharmony_ci
97762306a36Sopenharmony_cifailed_undo:
97862306a36Sopenharmony_ci	amdgpu_ring_undo(ring);
97962306a36Sopenharmony_cifailed_unlock:
98062306a36Sopenharmony_ci	spin_unlock_irqrestore(&kiq->ring_lock, flags);
98162306a36Sopenharmony_cifailed_kiq_read:
98262306a36Sopenharmony_ci	if (reg_val_offs)
98362306a36Sopenharmony_ci		amdgpu_device_wb_free(adev, reg_val_offs);
98462306a36Sopenharmony_ci	dev_err(adev->dev, "failed to read reg:%x\n", reg);
98562306a36Sopenharmony_ci	return ~0;
98662306a36Sopenharmony_ci}
98762306a36Sopenharmony_ci
98862306a36Sopenharmony_civoid amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
98962306a36Sopenharmony_ci{
99062306a36Sopenharmony_ci	signed long r, cnt = 0;
99162306a36Sopenharmony_ci	unsigned long flags;
99262306a36Sopenharmony_ci	uint32_t seq;
99362306a36Sopenharmony_ci	struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
99462306a36Sopenharmony_ci	struct amdgpu_ring *ring = &kiq->ring;
99562306a36Sopenharmony_ci
99662306a36Sopenharmony_ci	BUG_ON(!ring->funcs->emit_wreg);
99762306a36Sopenharmony_ci
99862306a36Sopenharmony_ci	if (amdgpu_device_skip_hw_access(adev))
99962306a36Sopenharmony_ci		return;
100062306a36Sopenharmony_ci
100162306a36Sopenharmony_ci	if (adev->mes.ring.sched.ready) {
100262306a36Sopenharmony_ci		amdgpu_mes_wreg(adev, reg, v);
100362306a36Sopenharmony_ci		return;
100462306a36Sopenharmony_ci	}
100562306a36Sopenharmony_ci
100662306a36Sopenharmony_ci	spin_lock_irqsave(&kiq->ring_lock, flags);
100762306a36Sopenharmony_ci	amdgpu_ring_alloc(ring, 32);
100862306a36Sopenharmony_ci	amdgpu_ring_emit_wreg(ring, reg, v);
100962306a36Sopenharmony_ci	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
101062306a36Sopenharmony_ci	if (r)
101162306a36Sopenharmony_ci		goto failed_undo;
101262306a36Sopenharmony_ci
101362306a36Sopenharmony_ci	amdgpu_ring_commit(ring);
101462306a36Sopenharmony_ci	spin_unlock_irqrestore(&kiq->ring_lock, flags);
101562306a36Sopenharmony_ci
101662306a36Sopenharmony_ci	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
101762306a36Sopenharmony_ci
101862306a36Sopenharmony_ci	/* don't wait anymore for gpu reset case because this way may
101962306a36Sopenharmony_ci	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
102062306a36Sopenharmony_ci	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
102162306a36Sopenharmony_ci	 * never return if we keep waiting in virt_kiq_rreg, which cause
102262306a36Sopenharmony_ci	 * gpu_recover() hang there.
102362306a36Sopenharmony_ci	 *
102462306a36Sopenharmony_ci	 * also don't wait anymore for IRQ context
102562306a36Sopenharmony_ci	 * */
102662306a36Sopenharmony_ci	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
102762306a36Sopenharmony_ci		goto failed_kiq_write;
102862306a36Sopenharmony_ci
102962306a36Sopenharmony_ci	might_sleep();
103062306a36Sopenharmony_ci	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
103162306a36Sopenharmony_ci
103262306a36Sopenharmony_ci		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
103362306a36Sopenharmony_ci		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
103462306a36Sopenharmony_ci	}
103562306a36Sopenharmony_ci
103662306a36Sopenharmony_ci	if (cnt > MAX_KIQ_REG_TRY)
103762306a36Sopenharmony_ci		goto failed_kiq_write;
103862306a36Sopenharmony_ci
103962306a36Sopenharmony_ci	return;
104062306a36Sopenharmony_ci
104162306a36Sopenharmony_cifailed_undo:
104262306a36Sopenharmony_ci	amdgpu_ring_undo(ring);
104362306a36Sopenharmony_ci	spin_unlock_irqrestore(&kiq->ring_lock, flags);
104462306a36Sopenharmony_cifailed_kiq_write:
104562306a36Sopenharmony_ci	dev_err(adev->dev, "failed to write reg:%x\n", reg);
104662306a36Sopenharmony_ci}
104762306a36Sopenharmony_ci
104862306a36Sopenharmony_ciint amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
104962306a36Sopenharmony_ci{
105062306a36Sopenharmony_ci	if (amdgpu_num_kcq == -1) {
105162306a36Sopenharmony_ci		return 8;
105262306a36Sopenharmony_ci	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
105362306a36Sopenharmony_ci		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
105462306a36Sopenharmony_ci		return 8;
105562306a36Sopenharmony_ci	}
105662306a36Sopenharmony_ci	return amdgpu_num_kcq;
105762306a36Sopenharmony_ci}
105862306a36Sopenharmony_ci
105962306a36Sopenharmony_civoid amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
106062306a36Sopenharmony_ci				  uint32_t ucode_id)
106162306a36Sopenharmony_ci{
106262306a36Sopenharmony_ci	const struct gfx_firmware_header_v1_0 *cp_hdr;
106362306a36Sopenharmony_ci	const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
106462306a36Sopenharmony_ci	struct amdgpu_firmware_info *info = NULL;
106562306a36Sopenharmony_ci	const struct firmware *ucode_fw;
106662306a36Sopenharmony_ci	unsigned int fw_size;
106762306a36Sopenharmony_ci
106862306a36Sopenharmony_ci	switch (ucode_id) {
106962306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_PFP:
107062306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
107162306a36Sopenharmony_ci			adev->gfx.pfp_fw->data;
107262306a36Sopenharmony_ci		adev->gfx.pfp_fw_version =
107362306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->header.ucode_version);
107462306a36Sopenharmony_ci		adev->gfx.pfp_feature_version =
107562306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->ucode_feature_version);
107662306a36Sopenharmony_ci		ucode_fw = adev->gfx.pfp_fw;
107762306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
107862306a36Sopenharmony_ci		break;
107962306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_PFP:
108062306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
108162306a36Sopenharmony_ci			adev->gfx.pfp_fw->data;
108262306a36Sopenharmony_ci		adev->gfx.pfp_fw_version =
108362306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
108462306a36Sopenharmony_ci		adev->gfx.pfp_feature_version =
108562306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
108662306a36Sopenharmony_ci		ucode_fw = adev->gfx.pfp_fw;
108762306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
108862306a36Sopenharmony_ci		break;
108962306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
109062306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
109162306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
109262306a36Sopenharmony_ci			adev->gfx.pfp_fw->data;
109362306a36Sopenharmony_ci		ucode_fw = adev->gfx.pfp_fw;
109462306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
109562306a36Sopenharmony_ci		break;
109662306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_ME:
109762306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
109862306a36Sopenharmony_ci			adev->gfx.me_fw->data;
109962306a36Sopenharmony_ci		adev->gfx.me_fw_version =
110062306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->header.ucode_version);
110162306a36Sopenharmony_ci		adev->gfx.me_feature_version =
110262306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->ucode_feature_version);
110362306a36Sopenharmony_ci		ucode_fw = adev->gfx.me_fw;
110462306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
110562306a36Sopenharmony_ci		break;
110662306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_ME:
110762306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
110862306a36Sopenharmony_ci			adev->gfx.me_fw->data;
110962306a36Sopenharmony_ci		adev->gfx.me_fw_version =
111062306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
111162306a36Sopenharmony_ci		adev->gfx.me_feature_version =
111262306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
111362306a36Sopenharmony_ci		ucode_fw = adev->gfx.me_fw;
111462306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
111562306a36Sopenharmony_ci		break;
111662306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
111762306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
111862306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
111962306a36Sopenharmony_ci			adev->gfx.me_fw->data;
112062306a36Sopenharmony_ci		ucode_fw = adev->gfx.me_fw;
112162306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
112262306a36Sopenharmony_ci		break;
112362306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_CE:
112462306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
112562306a36Sopenharmony_ci			adev->gfx.ce_fw->data;
112662306a36Sopenharmony_ci		adev->gfx.ce_fw_version =
112762306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->header.ucode_version);
112862306a36Sopenharmony_ci		adev->gfx.ce_feature_version =
112962306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->ucode_feature_version);
113062306a36Sopenharmony_ci		ucode_fw = adev->gfx.ce_fw;
113162306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
113262306a36Sopenharmony_ci		break;
113362306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_MEC1:
113462306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
113562306a36Sopenharmony_ci			adev->gfx.mec_fw->data;
113662306a36Sopenharmony_ci		adev->gfx.mec_fw_version =
113762306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->header.ucode_version);
113862306a36Sopenharmony_ci		adev->gfx.mec_feature_version =
113962306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->ucode_feature_version);
114062306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec_fw;
114162306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
114262306a36Sopenharmony_ci			  le32_to_cpu(cp_hdr->jt_size) * 4;
114362306a36Sopenharmony_ci		break;
114462306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_MEC1_JT:
114562306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
114662306a36Sopenharmony_ci			adev->gfx.mec_fw->data;
114762306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec_fw;
114862306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
114962306a36Sopenharmony_ci		break;
115062306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_MEC2:
115162306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
115262306a36Sopenharmony_ci			adev->gfx.mec2_fw->data;
115362306a36Sopenharmony_ci		adev->gfx.mec2_fw_version =
115462306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->header.ucode_version);
115562306a36Sopenharmony_ci		adev->gfx.mec2_feature_version =
115662306a36Sopenharmony_ci			le32_to_cpu(cp_hdr->ucode_feature_version);
115762306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec2_fw;
115862306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
115962306a36Sopenharmony_ci			  le32_to_cpu(cp_hdr->jt_size) * 4;
116062306a36Sopenharmony_ci		break;
116162306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_MEC2_JT:
116262306a36Sopenharmony_ci		cp_hdr = (const struct gfx_firmware_header_v1_0 *)
116362306a36Sopenharmony_ci			adev->gfx.mec2_fw->data;
116462306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec2_fw;
116562306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
116662306a36Sopenharmony_ci		break;
116762306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_MEC:
116862306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
116962306a36Sopenharmony_ci			adev->gfx.mec_fw->data;
117062306a36Sopenharmony_ci		adev->gfx.mec_fw_version =
117162306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
117262306a36Sopenharmony_ci		adev->gfx.mec_feature_version =
117362306a36Sopenharmony_ci			le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
117462306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec_fw;
117562306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
117662306a36Sopenharmony_ci		break;
117762306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
117862306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
117962306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
118062306a36Sopenharmony_ci	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
118162306a36Sopenharmony_ci		cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
118262306a36Sopenharmony_ci			adev->gfx.mec_fw->data;
118362306a36Sopenharmony_ci		ucode_fw = adev->gfx.mec_fw;
118462306a36Sopenharmony_ci		fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
118562306a36Sopenharmony_ci		break;
118662306a36Sopenharmony_ci	default:
118762306a36Sopenharmony_ci		break;
118862306a36Sopenharmony_ci	}
118962306a36Sopenharmony_ci
119062306a36Sopenharmony_ci	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
119162306a36Sopenharmony_ci		info = &adev->firmware.ucode[ucode_id];
119262306a36Sopenharmony_ci		info->ucode_id = ucode_id;
119362306a36Sopenharmony_ci		info->fw = ucode_fw;
119462306a36Sopenharmony_ci		adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
119562306a36Sopenharmony_ci	}
119662306a36Sopenharmony_ci}
119762306a36Sopenharmony_ci
119862306a36Sopenharmony_cibool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
119962306a36Sopenharmony_ci{
120062306a36Sopenharmony_ci	return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
120162306a36Sopenharmony_ci			adev->gfx.num_xcc_per_xcp : 1));
120262306a36Sopenharmony_ci}
120362306a36Sopenharmony_ci
120462306a36Sopenharmony_cistatic ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
120562306a36Sopenharmony_ci						struct device_attribute *addr,
120662306a36Sopenharmony_ci						char *buf)
120762306a36Sopenharmony_ci{
120862306a36Sopenharmony_ci	struct drm_device *ddev = dev_get_drvdata(dev);
120962306a36Sopenharmony_ci	struct amdgpu_device *adev = drm_to_adev(ddev);
121062306a36Sopenharmony_ci	int mode;
121162306a36Sopenharmony_ci
121262306a36Sopenharmony_ci	mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
121362306a36Sopenharmony_ci					       AMDGPU_XCP_FL_NONE);
121462306a36Sopenharmony_ci
121562306a36Sopenharmony_ci	return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
121662306a36Sopenharmony_ci}
121762306a36Sopenharmony_ci
121862306a36Sopenharmony_cistatic ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
121962306a36Sopenharmony_ci						struct device_attribute *addr,
122062306a36Sopenharmony_ci						const char *buf, size_t count)
122162306a36Sopenharmony_ci{
122262306a36Sopenharmony_ci	struct drm_device *ddev = dev_get_drvdata(dev);
122362306a36Sopenharmony_ci	struct amdgpu_device *adev = drm_to_adev(ddev);
122462306a36Sopenharmony_ci	enum amdgpu_gfx_partition mode;
122562306a36Sopenharmony_ci	int ret = 0, num_xcc;
122662306a36Sopenharmony_ci
122762306a36Sopenharmony_ci	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
122862306a36Sopenharmony_ci	if (num_xcc % 2 != 0)
122962306a36Sopenharmony_ci		return -EINVAL;
123062306a36Sopenharmony_ci
123162306a36Sopenharmony_ci	if (!strncasecmp("SPX", buf, strlen("SPX"))) {
123262306a36Sopenharmony_ci		mode = AMDGPU_SPX_PARTITION_MODE;
123362306a36Sopenharmony_ci	} else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
123462306a36Sopenharmony_ci		/*
123562306a36Sopenharmony_ci		 * DPX mode needs AIDs to be in multiple of 2.
123662306a36Sopenharmony_ci		 * Each AID connects 2 XCCs.
123762306a36Sopenharmony_ci		 */
123862306a36Sopenharmony_ci		if (num_xcc%4)
123962306a36Sopenharmony_ci			return -EINVAL;
124062306a36Sopenharmony_ci		mode = AMDGPU_DPX_PARTITION_MODE;
124162306a36Sopenharmony_ci	} else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
124262306a36Sopenharmony_ci		if (num_xcc != 6)
124362306a36Sopenharmony_ci			return -EINVAL;
124462306a36Sopenharmony_ci		mode = AMDGPU_TPX_PARTITION_MODE;
124562306a36Sopenharmony_ci	} else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
124662306a36Sopenharmony_ci		if (num_xcc != 8)
124762306a36Sopenharmony_ci			return -EINVAL;
124862306a36Sopenharmony_ci		mode = AMDGPU_QPX_PARTITION_MODE;
124962306a36Sopenharmony_ci	} else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
125062306a36Sopenharmony_ci		mode = AMDGPU_CPX_PARTITION_MODE;
125162306a36Sopenharmony_ci	} else {
125262306a36Sopenharmony_ci		return -EINVAL;
125362306a36Sopenharmony_ci	}
125462306a36Sopenharmony_ci
125562306a36Sopenharmony_ci	ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
125662306a36Sopenharmony_ci
125762306a36Sopenharmony_ci	if (ret)
125862306a36Sopenharmony_ci		return ret;
125962306a36Sopenharmony_ci
126062306a36Sopenharmony_ci	return count;
126162306a36Sopenharmony_ci}
126262306a36Sopenharmony_ci
126362306a36Sopenharmony_cistatic ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
126462306a36Sopenharmony_ci						struct device_attribute *addr,
126562306a36Sopenharmony_ci						char *buf)
126662306a36Sopenharmony_ci{
126762306a36Sopenharmony_ci	struct drm_device *ddev = dev_get_drvdata(dev);
126862306a36Sopenharmony_ci	struct amdgpu_device *adev = drm_to_adev(ddev);
126962306a36Sopenharmony_ci	char *supported_partition;
127062306a36Sopenharmony_ci
127162306a36Sopenharmony_ci	/* TBD */
127262306a36Sopenharmony_ci	switch (NUM_XCC(adev->gfx.xcc_mask)) {
127362306a36Sopenharmony_ci	case 8:
127462306a36Sopenharmony_ci		supported_partition = "SPX, DPX, QPX, CPX";
127562306a36Sopenharmony_ci		break;
127662306a36Sopenharmony_ci	case 6:
127762306a36Sopenharmony_ci		supported_partition = "SPX, TPX, CPX";
127862306a36Sopenharmony_ci		break;
127962306a36Sopenharmony_ci	case 4:
128062306a36Sopenharmony_ci		supported_partition = "SPX, DPX, CPX";
128162306a36Sopenharmony_ci		break;
128262306a36Sopenharmony_ci	/* this seems only existing in emulation phase */
128362306a36Sopenharmony_ci	case 2:
128462306a36Sopenharmony_ci		supported_partition = "SPX, CPX";
128562306a36Sopenharmony_ci		break;
128662306a36Sopenharmony_ci	default:
128762306a36Sopenharmony_ci		supported_partition = "Not supported";
128862306a36Sopenharmony_ci		break;
128962306a36Sopenharmony_ci	}
129062306a36Sopenharmony_ci
129162306a36Sopenharmony_ci	return sysfs_emit(buf, "%s\n", supported_partition);
129262306a36Sopenharmony_ci}
129362306a36Sopenharmony_ci
129462306a36Sopenharmony_cistatic DEVICE_ATTR(current_compute_partition, 0644,
129562306a36Sopenharmony_ci		   amdgpu_gfx_get_current_compute_partition,
129662306a36Sopenharmony_ci		   amdgpu_gfx_set_compute_partition);
129762306a36Sopenharmony_ci
129862306a36Sopenharmony_cistatic DEVICE_ATTR(available_compute_partition, 0444,
129962306a36Sopenharmony_ci		   amdgpu_gfx_get_available_compute_partition, NULL);
130062306a36Sopenharmony_ci
130162306a36Sopenharmony_ciint amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
130262306a36Sopenharmony_ci{
130362306a36Sopenharmony_ci	int r;
130462306a36Sopenharmony_ci
130562306a36Sopenharmony_ci	r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
130662306a36Sopenharmony_ci	if (r)
130762306a36Sopenharmony_ci		return r;
130862306a36Sopenharmony_ci
130962306a36Sopenharmony_ci	r = device_create_file(adev->dev, &dev_attr_available_compute_partition);
131062306a36Sopenharmony_ci
131162306a36Sopenharmony_ci	return r;
131262306a36Sopenharmony_ci}
131362306a36Sopenharmony_ci
131462306a36Sopenharmony_civoid amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
131562306a36Sopenharmony_ci{
131662306a36Sopenharmony_ci	device_remove_file(adev->dev, &dev_attr_current_compute_partition);
131762306a36Sopenharmony_ci	device_remove_file(adev->dev, &dev_attr_available_compute_partition);
131862306a36Sopenharmony_ci}
1319