1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
4#include <linux/clk.h>
5#include <linux/interconnect.h>
6#include <linux/of_platform.h>
7#include <linux/platform_device.h>
8#include <linux/pm_domain.h>
9#include <linux/pm_opp.h>
10#include <soc/qcom/cmd-db.h>
11#include <drm/drm_gem.h>
12
13#include "a6xx_gpu.h"
14#include "a6xx_gmu.xml.h"
15#include "msm_gem.h"
16#include "msm_gpu_trace.h"
17#include "msm_mmu.h"
18
19static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
20{
21	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
22	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
23	struct msm_gpu *gpu = &adreno_gpu->base;
24
25	/* FIXME: add a banner here */
26	gmu->hung = true;
27
28	/* Turn off the hangcheck timer while we are resetting */
29	del_timer(&gpu->hangcheck_timer);
30
31	/* Queue the GPU handler because we need to treat this as a recovery */
32	kthread_queue_work(gpu->worker, &gpu->recover_work);
33}
34
35static irqreturn_t a6xx_gmu_irq(int irq, void *data)
36{
37	struct a6xx_gmu *gmu = data;
38	u32 status;
39
40	status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
41	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
42
43	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
44		dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
45
46		a6xx_gmu_fault(gmu);
47	}
48
49	if (status &  A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
50		dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
51
52	if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
53		dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
54			gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
55
56	return IRQ_HANDLED;
57}
58
59static irqreturn_t a6xx_hfi_irq(int irq, void *data)
60{
61	struct a6xx_gmu *gmu = data;
62	u32 status;
63
64	status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
65	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
66
67	if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
68		dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
69
70		a6xx_gmu_fault(gmu);
71	}
72
73	return IRQ_HANDLED;
74}
75
76bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
77{
78	u32 val;
79
80	/* This can be called from gpu state code so make sure GMU is valid */
81	if (!gmu->initialized)
82		return false;
83
84	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
85
86	return !(val &
87		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
88		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
89}
90
91/* Check to see if the GX rail is still powered */
92bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
93{
94	u32 val;
95
96	/* This can be called from gpu state code so make sure GMU is valid */
97	if (!gmu->initialized)
98		return false;
99
100	val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
101
102	return !(val &
103		(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
104		A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
105}
106
107void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
108		       bool suspended)
109{
110	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
111	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
112	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
113	u32 perf_index;
114	unsigned long gpu_freq;
115	int ret = 0;
116
117	gpu_freq = dev_pm_opp_get_freq(opp);
118
119	if (gpu_freq == gmu->freq)
120		return;
121
122	for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
123		if (gpu_freq == gmu->gpu_freqs[perf_index])
124			break;
125
126	gmu->current_perf_index = perf_index;
127	gmu->freq = gmu->gpu_freqs[perf_index];
128
129	trace_msm_gmu_freq_change(gmu->freq, perf_index);
130
131	/*
132	 * This can get called from devfreq while the hardware is idle. Don't
133	 * bring up the power if it isn't already active. All we're doing here
134	 * is updating the frequency so that when we come back online we're at
135	 * the right rate.
136	 */
137	if (suspended)
138		return;
139
140	if (!gmu->legacy) {
141		a6xx_hfi_set_freq(gmu, perf_index);
142		dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
143		return;
144	}
145
146	gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
147
148	gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
149			((3 & 0xf) << 28) | perf_index);
150
151	/*
152	 * Send an invalid index as a vote for the bus bandwidth and let the
153	 * firmware decide on the right vote
154	 */
155	gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
156
157	/* Set and clear the OOB for DCVS to trigger the GMU */
158	a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
159	a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
160
161	ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
162	if (ret)
163		dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
164
165	dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
166}
167
168unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
169{
170	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
171	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
172	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
173
174	return  gmu->freq;
175}
176
177static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
178{
179	u32 val;
180	int local = gmu->idle_level;
181
182	/* SPTP and IFPC both report as IFPC */
183	if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
184		local = GMU_IDLE_STATE_IFPC;
185
186	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
187
188	if (val == local) {
189		if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
190			!a6xx_gmu_gx_is_on(gmu))
191			return true;
192	}
193
194	return false;
195}
196
197/* Wait for the GMU to get to its most idle state */
198int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
199{
200	return spin_until(a6xx_gmu_check_idle_level(gmu));
201}
202
203static int a6xx_gmu_start(struct a6xx_gmu *gmu)
204{
205	int ret;
206	u32 val;
207	u32 mask, reset_val;
208
209	val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
210	if (val <= 0x20010004) {
211		mask = 0xffffffff;
212		reset_val = 0xbabeface;
213	} else {
214		mask = 0x1ff;
215		reset_val = 0x100;
216	}
217
218	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
219
220	/* Set the log wptr index
221	 * note: downstream saves the value in poweroff and restores it here
222	 */
223	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
224
225	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
226
227	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
228		(val & mask) == reset_val, 100, 10000);
229
230	if (ret)
231		DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
232
233	return ret;
234}
235
236static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
237{
238	u32 val;
239	int ret;
240
241	gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
242
243	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
244		val & 1, 100, 10000);
245	if (ret)
246		DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
247
248	return ret;
249}
250
251struct a6xx_gmu_oob_bits {
252	int set, ack, set_new, ack_new, clear, clear_new;
253	const char *name;
254};
255
256/* These are the interrupt / ack bits for each OOB request that are set
257 * in a6xx_gmu_set_oob and a6xx_clear_oob
258 */
259static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
260	[GMU_OOB_GPU_SET] = {
261		.name = "GPU_SET",
262		.set = 16,
263		.ack = 24,
264		.set_new = 30,
265		.ack_new = 31,
266		.clear = 24,
267		.clear_new = 31,
268	},
269
270	[GMU_OOB_PERFCOUNTER_SET] = {
271		.name = "PERFCOUNTER",
272		.set = 17,
273		.ack = 25,
274		.set_new = 28,
275		.ack_new = 30,
276		.clear = 25,
277		.clear_new = 29,
278	},
279
280	[GMU_OOB_BOOT_SLUMBER] = {
281		.name = "BOOT_SLUMBER",
282		.set = 22,
283		.ack = 30,
284		.clear = 30,
285	},
286
287	[GMU_OOB_DCVS_SET] = {
288		.name = "GPU_DCVS",
289		.set = 23,
290		.ack = 31,
291		.clear = 31,
292	},
293};
294
295/* Trigger a OOB (out of band) request to the GMU */
296int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
297{
298	int ret;
299	u32 val;
300	int request, ack;
301
302	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
303
304	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
305		return -EINVAL;
306
307	if (gmu->legacy) {
308		request = a6xx_gmu_oob_bits[state].set;
309		ack = a6xx_gmu_oob_bits[state].ack;
310	} else {
311		request = a6xx_gmu_oob_bits[state].set_new;
312		ack = a6xx_gmu_oob_bits[state].ack_new;
313		if (!request || !ack) {
314			DRM_DEV_ERROR(gmu->dev,
315				      "Invalid non-legacy GMU request %s\n",
316				      a6xx_gmu_oob_bits[state].name);
317			return -EINVAL;
318		}
319	}
320
321	/* Trigger the equested OOB operation */
322	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
323
324	/* Wait for the acknowledge interrupt */
325	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
326		val & (1 << ack), 100, 10000);
327
328	if (ret)
329		DRM_DEV_ERROR(gmu->dev,
330			"Timeout waiting for GMU OOB set %s: 0x%x\n",
331				a6xx_gmu_oob_bits[state].name,
332				gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
333
334	/* Clear the acknowledge interrupt */
335	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
336
337	return ret;
338}
339
340/* Clear a pending OOB state in the GMU */
341void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
342{
343	int bit;
344
345	WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
346
347	if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
348		return;
349
350	if (gmu->legacy)
351		bit = a6xx_gmu_oob_bits[state].clear;
352	else
353		bit = a6xx_gmu_oob_bits[state].clear_new;
354
355	gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
356}
357
358/* Enable CPU control of SPTP power power collapse */
359int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
360{
361	int ret;
362	u32 val;
363
364	if (!gmu->legacy)
365		return 0;
366
367	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
368
369	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
370		(val & 0x38) == 0x28, 1, 100);
371
372	if (ret) {
373		DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
374			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
375	}
376
377	return 0;
378}
379
380/* Disable CPU control of SPTP power power collapse */
381void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
382{
383	u32 val;
384	int ret;
385
386	if (!gmu->legacy)
387		return;
388
389	/* Make sure retention is on */
390	gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
391
392	gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
393
394	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
395		(val & 0x04), 100, 10000);
396
397	if (ret)
398		DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
399			gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
400}
401
402/* Let the GMU know we are starting a boot sequence */
403static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
404{
405	u32 vote;
406
407	/* Let the GMU know we are getting ready for boot */
408	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
409
410	/* Choose the "default" power level as the highest available */
411	vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
412
413	gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
414	gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
415
416	/* Let the GMU know the boot sequence has started */
417	return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
418}
419
420/* Let the GMU know that we are about to go into slumber */
421static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
422{
423	int ret;
424
425	/* Disable the power counter so the GMU isn't busy */
426	gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
427
428	/* Disable SPTP_PC if the CPU is responsible for it */
429	if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
430		a6xx_sptprac_disable(gmu);
431
432	if (!gmu->legacy) {
433		ret = a6xx_hfi_send_prep_slumber(gmu);
434		goto out;
435	}
436
437	/* Tell the GMU to get ready to slumber */
438	gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
439
440	ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
441	a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
442
443	if (!ret) {
444		/* Check to see if the GMU really did slumber */
445		if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
446			!= 0x0f) {
447			DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
448			ret = -ETIMEDOUT;
449		}
450	}
451
452out:
453	/* Put fence into allow mode */
454	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
455	return ret;
456}
457
458static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
459{
460	int ret;
461	u32 val;
462
463	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
464	/* Wait for the register to finish posting */
465	wmb();
466
467	ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
468		val & (1 << 1), 100, 10000);
469	if (ret) {
470		DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
471		return ret;
472	}
473
474	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
475		!val, 100, 10000);
476
477	if (ret) {
478		DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
479		return ret;
480	}
481
482	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
483
484	return 0;
485}
486
487static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
488{
489	int ret;
490	u32 val;
491
492	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
493
494	ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
495		val, val & (1 << 16), 100, 10000);
496	if (ret)
497		DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
498
499	gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
500}
501
502static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
503{
504	msm_writel(value, ptr + (offset << 2));
505}
506
507static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
508		const char *name);
509
510static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
511{
512	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
513	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
514	struct platform_device *pdev = to_platform_device(gmu->dev);
515	void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
516	void __iomem *seqptr = NULL;
517	uint32_t pdc_address_offset;
518	bool pdc_in_aop = false;
519
520	if (IS_ERR(pdcptr))
521		goto err;
522
523	if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
524		pdc_in_aop = true;
525	else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
526		pdc_address_offset = 0x30090;
527	else if (adreno_is_a619(adreno_gpu))
528		pdc_address_offset = 0x300a0;
529	else
530		pdc_address_offset = 0x30080;
531
532	if (!pdc_in_aop) {
533		seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
534		if (IS_ERR(seqptr))
535			goto err;
536	}
537
538	/* Disable SDE clock gating */
539	gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
540
541	/* Setup RSC PDC handshake for sleep and wakeup */
542	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
543	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
544	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
545	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
546	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
547	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
548	gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
549	gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
550	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
551	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
552	gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
553
554	/* Load RSC sequencer uCode for sleep and wakeup */
555	if (adreno_is_a650_family(adreno_gpu)) {
556		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
557		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
558		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
559		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
560		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
561	} else {
562		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
563		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
564		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
565		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
566		gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
567	}
568
569	if (pdc_in_aop)
570		goto setup_pdc;
571
572	/* Load PDC sequencer uCode for power up and power down sequence */
573	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
574	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
575	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
576	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
577	pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
578
579	/* Set TCS commands used by PDC sequence for low power modes */
580	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
581	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
582	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
583	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
584	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
585	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
586	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
587	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
588	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
589
590	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
591	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
592	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
593
594	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
595	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
596	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
597	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
598	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
599	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
600
601	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
602	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
603	if (adreno_is_a618(adreno_gpu) || adreno_is_a619(adreno_gpu) ||
604			adreno_is_a650_family(adreno_gpu))
605		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
606	else
607		pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
608	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
609	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
610	pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
611
612	/* Setup GPU PDC */
613setup_pdc:
614	pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
615	pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
616
617	/* ensure no writes happen before the uCode is fully written */
618	wmb();
619
620	a6xx_rpmh_stop(gmu);
621
622err:
623	if (!IS_ERR_OR_NULL(pdcptr))
624		iounmap(pdcptr);
625	if (!IS_ERR_OR_NULL(seqptr))
626		iounmap(seqptr);
627}
628
629/*
630 * The lowest 16 bits of this value are the number of XO clock cycles for main
631 * hysteresis which is set at 0x1680 cycles (300 us).  The higher 16 bits are
632 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
633 */
634
635#define GMU_PWR_COL_HYST 0x000a1680
636
637/* Set up the idle state for the GMU */
638static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
639{
640	/* Disable GMU WB/RB buffer */
641	gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
642	gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
643	gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
644
645	gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
646
647	switch (gmu->idle_level) {
648	case GMU_IDLE_STATE_IFPC:
649		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
650			GMU_PWR_COL_HYST);
651		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
652			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
653			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
654		fallthrough;
655	case GMU_IDLE_STATE_SPTP:
656		gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
657			GMU_PWR_COL_HYST);
658		gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
659			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
660			A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
661	}
662
663	/* Enable RPMh GPU client */
664	gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
665		A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
666		A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
667		A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
668		A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
669		A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
670		A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
671}
672
673struct block_header {
674	u32 addr;
675	u32 size;
676	u32 type;
677	u32 value;
678	u32 data[];
679};
680
681static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
682{
683	if (!in_range(blk->addr, bo->iova, bo->size))
684		return false;
685
686	memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
687	return true;
688}
689
690static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
691{
692	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
693	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
694	const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
695	const struct block_header *blk;
696	u32 reg_offset;
697
698	u32 itcm_base = 0x00000000;
699	u32 dtcm_base = 0x00040000;
700
701	if (adreno_is_a650_family(adreno_gpu))
702		dtcm_base = 0x10004000;
703
704	if (gmu->legacy) {
705		/* Sanity check the size of the firmware that was loaded */
706		if (fw_image->size > 0x8000) {
707			DRM_DEV_ERROR(gmu->dev,
708				"GMU firmware is bigger than the available region\n");
709			return -EINVAL;
710		}
711
712		gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
713			       (u32*) fw_image->data, fw_image->size);
714		return 0;
715	}
716
717
718	for (blk = (const struct block_header *) fw_image->data;
719	     (const u8*) blk < fw_image->data + fw_image->size;
720	     blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
721		if (blk->size == 0)
722			continue;
723
724		if (in_range(blk->addr, itcm_base, SZ_16K)) {
725			reg_offset = (blk->addr - itcm_base) >> 2;
726			gmu_write_bulk(gmu,
727				REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
728				blk->data, blk->size);
729		} else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
730			reg_offset = (blk->addr - dtcm_base) >> 2;
731			gmu_write_bulk(gmu,
732				REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
733				blk->data, blk->size);
734		} else if (!fw_block_mem(&gmu->icache, blk) &&
735			   !fw_block_mem(&gmu->dcache, blk) &&
736			   !fw_block_mem(&gmu->dummy, blk)) {
737			DRM_DEV_ERROR(gmu->dev,
738				"failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
739				blk->addr, blk->size, blk->data[0]);
740		}
741	}
742
743	return 0;
744}
745
746static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
747{
748	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
749	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
750	int ret;
751	u32 chipid;
752
753	if (adreno_is_a650_family(adreno_gpu)) {
754		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
755		gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
756	}
757
758	if (state == GMU_WARM_BOOT) {
759		ret = a6xx_rpmh_start(gmu);
760		if (ret)
761			return ret;
762	} else {
763		if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
764			"GMU firmware is not loaded\n"))
765			return -ENOENT;
766
767		/* Turn on register retention */
768		gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
769
770		ret = a6xx_rpmh_start(gmu);
771		if (ret)
772			return ret;
773
774		ret = a6xx_gmu_fw_load(gmu);
775		if (ret)
776			return ret;
777	}
778
779	gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
780	gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
781
782	/* Write the iova of the HFI table */
783	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
784	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
785
786	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
787		(1 << 31) | (0xa << 18) | (0xa0));
788
789	/*
790	 * Snapshots toggle the NMI bit which will result in a jump to the NMI
791	 * handler instead of __main. Set the M3 config value to avoid that.
792	 */
793	gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
794
795	/*
796	 * Note that the GMU has a slightly different layout for
797	 * chip_id, for whatever reason, so a bit of massaging
798	 * is needed.  The upper 16b are the same, but minor and
799	 * patchid are packed in four bits each with the lower
800	 * 8b unused:
801	 */
802	chipid  = adreno_gpu->chip_id & 0xffff0000;
803	chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */
804	chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
805
806	gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
807
808	gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
809		  gmu->log.iova | (gmu->log.size / SZ_4K - 1));
810
811	/* Set up the lowest idle level on the GMU */
812	a6xx_gmu_power_config(gmu);
813
814	ret = a6xx_gmu_start(gmu);
815	if (ret)
816		return ret;
817
818	if (gmu->legacy) {
819		ret = a6xx_gmu_gfx_rail_on(gmu);
820		if (ret)
821			return ret;
822	}
823
824	/* Enable SPTP_PC if the CPU is responsible for it */
825	if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
826		ret = a6xx_sptprac_enable(gmu);
827		if (ret)
828			return ret;
829	}
830
831	ret = a6xx_gmu_hfi_start(gmu);
832	if (ret)
833		return ret;
834
835	/* FIXME: Do we need this wmb() here? */
836	wmb();
837
838	return 0;
839}
840
841#define A6XX_HFI_IRQ_MASK \
842	(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
843
844#define A6XX_GMU_IRQ_MASK \
845	(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
846	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
847	 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
848
849static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
850{
851	disable_irq(gmu->gmu_irq);
852	disable_irq(gmu->hfi_irq);
853
854	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
855	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
856}
857
858static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
859{
860	u32 val;
861
862	/* Make sure there are no outstanding RPMh votes */
863	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
864		(val & 1), 100, 10000);
865	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
866		(val & 1), 100, 10000);
867	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
868		(val & 1), 100, 10000);
869	gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
870		(val & 1), 100, 1000);
871}
872
873/* Force the GMU off in case it isn't responsive */
874static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
875{
876	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
877	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
878	struct msm_gpu *gpu = &adreno_gpu->base;
879
880	/*
881	 * Turn off keep alive that might have been enabled by the hang
882	 * interrupt
883	 */
884	gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
885
886	/* Flush all the queues */
887	a6xx_hfi_stop(gmu);
888
889	/* Stop the interrupts */
890	a6xx_gmu_irq_disable(gmu);
891
892	/* Force off SPTP in case the GMU is managing it */
893	a6xx_sptprac_disable(gmu);
894
895	/* Make sure there are no outstanding RPMh votes */
896	a6xx_gmu_rpmh_off(gmu);
897
898	/* Clear the WRITEDROPPED fields and put fence into allow mode */
899	gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, 0x7);
900	gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
901
902	/* Make sure the above writes go through */
903	wmb();
904
905	/* Halt the gmu cm3 core */
906	gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
907
908	a6xx_bus_clear_pending_transactions(adreno_gpu, true);
909
910	/* Reset GPU core blocks */
911	a6xx_gpu_sw_reset(gpu, true);
912}
913
914static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
915{
916	struct dev_pm_opp *gpu_opp;
917	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
918
919	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
920	if (IS_ERR(gpu_opp))
921		return;
922
923	gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
924	a6xx_gmu_set_freq(gpu, gpu_opp, false);
925	dev_pm_opp_put(gpu_opp);
926}
927
928static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
929{
930	struct dev_pm_opp *gpu_opp;
931	unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
932
933	gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
934	if (IS_ERR(gpu_opp))
935		return;
936
937	dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
938	dev_pm_opp_put(gpu_opp);
939}
940
941int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
942{
943	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
944	struct msm_gpu *gpu = &adreno_gpu->base;
945	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
946	int status, ret;
947
948	if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
949		return -EINVAL;
950
951	gmu->hung = false;
952
953	/* Turn on the resources */
954	pm_runtime_get_sync(gmu->dev);
955
956	/*
957	 * "enable" the GX power domain which won't actually do anything but it
958	 * will make sure that the refcounting is correct in case we need to
959	 * bring down the GX after a GMU failure
960	 */
961	if (!IS_ERR_OR_NULL(gmu->gxpd))
962		pm_runtime_get_sync(gmu->gxpd);
963
964	/* Use a known rate to bring up the GMU */
965	clk_set_rate(gmu->core_clk, 200000000);
966	clk_set_rate(gmu->hub_clk, 150000000);
967	ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
968	if (ret) {
969		pm_runtime_put(gmu->gxpd);
970		pm_runtime_put(gmu->dev);
971		return ret;
972	}
973
974	/* Set the bus quota to a reasonable value for boot */
975	a6xx_gmu_set_initial_bw(gpu, gmu);
976
977	/* Enable the GMU interrupt */
978	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
979	gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
980	enable_irq(gmu->gmu_irq);
981
982	/* Check to see if we are doing a cold or warm boot */
983	status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
984		GMU_WARM_BOOT : GMU_COLD_BOOT;
985
986	/*
987	 * Warm boot path does not work on newer GPUs
988	 * Presumably this is because icache/dcache regions must be restored
989	 */
990	if (!gmu->legacy)
991		status = GMU_COLD_BOOT;
992
993	ret = a6xx_gmu_fw_start(gmu, status);
994	if (ret)
995		goto out;
996
997	ret = a6xx_hfi_start(gmu, status);
998	if (ret)
999		goto out;
1000
1001	/*
1002	 * Turn on the GMU firmware fault interrupt after we know the boot
1003	 * sequence is successful
1004	 */
1005	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
1006	gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
1007	enable_irq(gmu->hfi_irq);
1008
1009	/* Set the GPU to the current freq */
1010	a6xx_gmu_set_initial_freq(gpu, gmu);
1011
1012out:
1013	/* On failure, shut down the GMU to leave it in a good state */
1014	if (ret) {
1015		disable_irq(gmu->gmu_irq);
1016		a6xx_rpmh_stop(gmu);
1017		pm_runtime_put(gmu->gxpd);
1018		pm_runtime_put(gmu->dev);
1019	}
1020
1021	return ret;
1022}
1023
1024bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
1025{
1026	u32 reg;
1027
1028	if (!gmu->initialized)
1029		return true;
1030
1031	reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1032
1033	if (reg &  A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1034		return false;
1035
1036	return true;
1037}
1038
1039/* Gracefully try to shut down the GMU and by extension the GPU */
1040static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1041{
1042	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1043	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1044	u32 val;
1045
1046	/*
1047	 * The GMU may still be in slumber unless the GPU started so check and
1048	 * skip putting it back into slumber if so
1049	 */
1050	val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1051
1052	if (val != 0xf) {
1053		int ret = a6xx_gmu_wait_for_idle(gmu);
1054
1055		/* If the GMU isn't responding assume it is hung */
1056		if (ret) {
1057			a6xx_gmu_force_off(gmu);
1058			return;
1059		}
1060
1061		a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
1062
1063		/* tell the GMU we want to slumber */
1064		ret = a6xx_gmu_notify_slumber(gmu);
1065		if (ret) {
1066			a6xx_gmu_force_off(gmu);
1067			return;
1068		}
1069
1070		ret = gmu_poll_timeout(gmu,
1071			REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1072			!(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1073			100, 10000);
1074
1075		/*
1076		 * Let the user know we failed to slumber but don't worry too
1077		 * much because we are powering down anyway
1078		 */
1079
1080		if (ret)
1081			DRM_DEV_ERROR(gmu->dev,
1082				"Unable to slumber GMU: status = 0%x/0%x\n",
1083				gmu_read(gmu,
1084					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1085				gmu_read(gmu,
1086					REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1087	}
1088
1089	/* Turn off HFI */
1090	a6xx_hfi_stop(gmu);
1091
1092	/* Stop the interrupts and mask the hardware */
1093	a6xx_gmu_irq_disable(gmu);
1094
1095	/* Tell RPMh to power off the GPU */
1096	a6xx_rpmh_stop(gmu);
1097}
1098
1099
1100int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1101{
1102	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1103	struct msm_gpu *gpu = &a6xx_gpu->base.base;
1104
1105	if (!pm_runtime_active(gmu->dev))
1106		return 0;
1107
1108	/*
1109	 * Force the GMU off if we detected a hang, otherwise try to shut it
1110	 * down gracefully
1111	 */
1112	if (gmu->hung)
1113		a6xx_gmu_force_off(gmu);
1114	else
1115		a6xx_gmu_shutdown(gmu);
1116
1117	/* Remove the bus vote */
1118	dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1119
1120	/*
1121	 * Make sure the GX domain is off before turning off the GMU (CX)
1122	 * domain. Usually the GMU does this but only if the shutdown sequence
1123	 * was successful
1124	 */
1125	if (!IS_ERR_OR_NULL(gmu->gxpd))
1126		pm_runtime_put_sync(gmu->gxpd);
1127
1128	clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1129
1130	pm_runtime_put_sync(gmu->dev);
1131
1132	return 0;
1133}
1134
1135static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1136{
1137	msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1138	msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1139	msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1140	msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1141	msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1142	msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1143
1144	gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1145	msm_gem_address_space_put(gmu->aspace);
1146}
1147
1148static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1149		size_t size, u64 iova, const char *name)
1150{
1151	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1152	struct drm_device *dev = a6xx_gpu->base.base.dev;
1153	uint32_t flags = MSM_BO_WC;
1154	u64 range_start, range_end;
1155	int ret;
1156
1157	size = PAGE_ALIGN(size);
1158	if (!iova) {
1159		/* no fixed address - use GMU's uncached range */
1160		range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1161		range_end = 0x80000000;
1162	} else {
1163		/* range for fixed address */
1164		range_start = iova;
1165		range_end = iova + size;
1166		/* use IOMMU_PRIV for icache/dcache */
1167		flags |= MSM_BO_MAP_PRIV;
1168	}
1169
1170	bo->obj = msm_gem_new(dev, size, flags);
1171	if (IS_ERR(bo->obj))
1172		return PTR_ERR(bo->obj);
1173
1174	ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1175					     range_start, range_end);
1176	if (ret) {
1177		drm_gem_object_put(bo->obj);
1178		return ret;
1179	}
1180
1181	bo->virt = msm_gem_get_vaddr(bo->obj);
1182	bo->size = size;
1183
1184	msm_gem_object_set_name(bo->obj, name);
1185
1186	return 0;
1187}
1188
1189static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1190{
1191	struct msm_mmu *mmu;
1192
1193	mmu = msm_iommu_new(gmu->dev, 0);
1194	if (!mmu)
1195		return -ENODEV;
1196	if (IS_ERR(mmu))
1197		return PTR_ERR(mmu);
1198
1199	gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1200	if (IS_ERR(gmu->aspace))
1201		return PTR_ERR(gmu->aspace);
1202
1203	return 0;
1204}
1205
1206/* Return the 'arc-level' for the given frequency */
1207static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1208					   unsigned long freq)
1209{
1210	struct dev_pm_opp *opp;
1211	unsigned int val;
1212
1213	if (!freq)
1214		return 0;
1215
1216	opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1217	if (IS_ERR(opp))
1218		return 0;
1219
1220	val = dev_pm_opp_get_level(opp);
1221
1222	dev_pm_opp_put(opp);
1223
1224	return val;
1225}
1226
1227static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1228		unsigned long *freqs, int freqs_count, const char *id)
1229{
1230	int i, j;
1231	const u16 *pri, *sec;
1232	size_t pri_count, sec_count;
1233
1234	pri = cmd_db_read_aux_data(id, &pri_count);
1235	if (IS_ERR(pri))
1236		return PTR_ERR(pri);
1237	/*
1238	 * The data comes back as an array of unsigned shorts so adjust the
1239	 * count accordingly
1240	 */
1241	pri_count >>= 1;
1242	if (!pri_count)
1243		return -EINVAL;
1244
1245	sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1246	if (IS_ERR(sec))
1247		return PTR_ERR(sec);
1248
1249	sec_count >>= 1;
1250	if (!sec_count)
1251		return -EINVAL;
1252
1253	/* Construct a vote for each frequency */
1254	for (i = 0; i < freqs_count; i++) {
1255		u8 pindex = 0, sindex = 0;
1256		unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1257
1258		/* Get the primary index that matches the arc level */
1259		for (j = 0; j < pri_count; j++) {
1260			if (pri[j] >= level) {
1261				pindex = j;
1262				break;
1263			}
1264		}
1265
1266		if (j == pri_count) {
1267			DRM_DEV_ERROR(dev,
1268				      "Level %u not found in the RPMh list\n",
1269				      level);
1270			DRM_DEV_ERROR(dev, "Available levels:\n");
1271			for (j = 0; j < pri_count; j++)
1272				DRM_DEV_ERROR(dev, "  %u\n", pri[j]);
1273
1274			return -EINVAL;
1275		}
1276
1277		/*
1278		 * Look for a level in in the secondary list that matches. If
1279		 * nothing fits, use the maximum non zero vote
1280		 */
1281
1282		for (j = 0; j < sec_count; j++) {
1283			if (sec[j] >= level) {
1284				sindex = j;
1285				break;
1286			} else if (sec[j]) {
1287				sindex = j;
1288			}
1289		}
1290
1291		/* Construct the vote */
1292		votes[i] = ((pri[pindex] & 0xffff) << 16) |
1293			(sindex << 8) | pindex;
1294	}
1295
1296	return 0;
1297}
1298
1299/*
1300 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1301 * to construct the list of votes on the CPU and send it over. Query the RPMh
1302 * voltage levels and build the votes
1303 */
1304
1305static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1306{
1307	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1308	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1309	struct msm_gpu *gpu = &adreno_gpu->base;
1310	int ret;
1311
1312	/* Build the GX votes */
1313	ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1314		gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1315
1316	/* Build the CX votes */
1317	ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1318		gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1319
1320	return ret;
1321}
1322
1323static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1324		u32 size)
1325{
1326	int count = dev_pm_opp_get_opp_count(dev);
1327	struct dev_pm_opp *opp;
1328	int i, index = 0;
1329	unsigned long freq = 1;
1330
1331	/*
1332	 * The OPP table doesn't contain the "off" frequency level so we need to
1333	 * add 1 to the table size to account for it
1334	 */
1335
1336	if (WARN(count + 1 > size,
1337		"The GMU frequency table is being truncated\n"))
1338		count = size - 1;
1339
1340	/* Set the "off" frequency */
1341	freqs[index++] = 0;
1342
1343	for (i = 0; i < count; i++) {
1344		opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1345		if (IS_ERR(opp))
1346			break;
1347
1348		dev_pm_opp_put(opp);
1349		freqs[index++] = freq++;
1350	}
1351
1352	return index;
1353}
1354
1355static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1356{
1357	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1358	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1359	struct msm_gpu *gpu = &adreno_gpu->base;
1360
1361	int ret = 0;
1362
1363	/*
1364	 * The GMU handles its own frequency switching so build a list of
1365	 * available frequencies to send during initialization
1366	 */
1367	ret = devm_pm_opp_of_add_table(gmu->dev);
1368	if (ret) {
1369		DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1370		return ret;
1371	}
1372
1373	gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1374		gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1375
1376	/*
1377	 * The GMU also handles GPU frequency switching so build a list
1378	 * from the GPU OPP table
1379	 */
1380	gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1381		gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1382
1383	gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1384
1385	/* Build the list of RPMh votes that we'll send to the GMU */
1386	return a6xx_gmu_rpmh_votes_init(gmu);
1387}
1388
1389static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1390{
1391	int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1392
1393	if (ret < 1)
1394		return ret;
1395
1396	gmu->nr_clocks = ret;
1397
1398	gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1399		gmu->nr_clocks, "gmu");
1400
1401	gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1402		gmu->nr_clocks, "hub");
1403
1404	return 0;
1405}
1406
1407static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1408		const char *name)
1409{
1410	void __iomem *ret;
1411	struct resource *res = platform_get_resource_byname(pdev,
1412			IORESOURCE_MEM, name);
1413
1414	if (!res) {
1415		DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1416		return ERR_PTR(-EINVAL);
1417	}
1418
1419	ret = ioremap(res->start, resource_size(res));
1420	if (!ret) {
1421		DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1422		return ERR_PTR(-EINVAL);
1423	}
1424
1425	return ret;
1426}
1427
1428static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1429		const char *name, irq_handler_t handler)
1430{
1431	int irq, ret;
1432
1433	irq = platform_get_irq_byname(pdev, name);
1434
1435	ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1436	if (ret) {
1437		DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1438			      name, ret);
1439		return ret;
1440	}
1441
1442	disable_irq(irq);
1443
1444	return irq;
1445}
1446
1447void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1448{
1449	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1450	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1451	struct platform_device *pdev = to_platform_device(gmu->dev);
1452
1453	mutex_lock(&gmu->lock);
1454	if (!gmu->initialized) {
1455		mutex_unlock(&gmu->lock);
1456		return;
1457	}
1458
1459	gmu->initialized = false;
1460
1461	mutex_unlock(&gmu->lock);
1462
1463	pm_runtime_force_suspend(gmu->dev);
1464
1465	/*
1466	 * Since cxpd is a virt device, the devlink with gmu-dev will be removed
1467	 * automatically when we do detach
1468	 */
1469	dev_pm_domain_detach(gmu->cxpd, false);
1470
1471	if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1472		pm_runtime_disable(gmu->gxpd);
1473		dev_pm_domain_detach(gmu->gxpd, false);
1474	}
1475
1476	iounmap(gmu->mmio);
1477	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1478		iounmap(gmu->rscc);
1479	gmu->mmio = NULL;
1480	gmu->rscc = NULL;
1481
1482	if (!adreno_has_gmu_wrapper(adreno_gpu)) {
1483		a6xx_gmu_memory_free(gmu);
1484
1485		free_irq(gmu->gmu_irq, gmu);
1486		free_irq(gmu->hfi_irq, gmu);
1487	}
1488
1489	/* Drop reference taken in of_find_device_by_node */
1490	put_device(gmu->dev);
1491}
1492
1493static int cxpd_notifier_cb(struct notifier_block *nb,
1494			unsigned long action, void *data)
1495{
1496	struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb);
1497
1498	if (action == GENPD_NOTIFY_OFF)
1499		complete_all(&gmu->pd_gate);
1500
1501	return 0;
1502}
1503
1504int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1505{
1506	struct platform_device *pdev = of_find_device_by_node(node);
1507	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1508	int ret;
1509
1510	if (!pdev)
1511		return -ENODEV;
1512
1513	gmu->dev = &pdev->dev;
1514
1515	of_dma_configure(gmu->dev, node, true);
1516
1517	pm_runtime_enable(gmu->dev);
1518
1519	/* Mark legacy for manual SPTPRAC control */
1520	gmu->legacy = true;
1521
1522	/* Map the GMU registers */
1523	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1524	if (IS_ERR(gmu->mmio)) {
1525		ret = PTR_ERR(gmu->mmio);
1526		goto err_mmio;
1527	}
1528
1529	gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1530	if (IS_ERR(gmu->cxpd)) {
1531		ret = PTR_ERR(gmu->cxpd);
1532		goto err_mmio;
1533	}
1534
1535	if (!device_link_add(gmu->dev, gmu->cxpd, DL_FLAG_PM_RUNTIME)) {
1536		ret = -ENODEV;
1537		goto detach_cxpd;
1538	}
1539
1540	init_completion(&gmu->pd_gate);
1541	complete_all(&gmu->pd_gate);
1542	gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1543
1544	/* Get a link to the GX power domain to reset the GPU */
1545	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1546	if (IS_ERR(gmu->gxpd)) {
1547		ret = PTR_ERR(gmu->gxpd);
1548		goto err_mmio;
1549	}
1550
1551	gmu->initialized = true;
1552
1553	return 0;
1554
1555detach_cxpd:
1556	dev_pm_domain_detach(gmu->cxpd, false);
1557
1558err_mmio:
1559	iounmap(gmu->mmio);
1560
1561	/* Drop reference taken in of_find_device_by_node */
1562	put_device(gmu->dev);
1563
1564	return ret;
1565}
1566
1567int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1568{
1569	struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1570	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1571	struct platform_device *pdev = of_find_device_by_node(node);
1572	int ret;
1573
1574	if (!pdev)
1575		return -ENODEV;
1576
1577	gmu->dev = &pdev->dev;
1578
1579	of_dma_configure(gmu->dev, node, true);
1580
1581	/* Fow now, don't do anything fancy until we get our feet under us */
1582	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1583
1584	pm_runtime_enable(gmu->dev);
1585
1586	/* Get the list of clocks */
1587	ret = a6xx_gmu_clocks_probe(gmu);
1588	if (ret)
1589		goto err_put_device;
1590
1591	ret = a6xx_gmu_memory_probe(gmu);
1592	if (ret)
1593		goto err_put_device;
1594
1595
1596	/* A660 now requires handling "prealloc requests" in GMU firmware
1597	 * For now just hardcode allocations based on the known firmware.
1598	 * note: there is no indication that these correspond to "dummy" or
1599	 * "debug" regions, but this "guess" allows reusing these BOs which
1600	 * are otherwise unused by a660.
1601	 */
1602	gmu->dummy.size = SZ_4K;
1603	if (adreno_is_a660_family(adreno_gpu)) {
1604		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
1605					    0x60400000, "debug");
1606		if (ret)
1607			goto err_memory;
1608
1609		gmu->dummy.size = SZ_8K;
1610	}
1611
1612	/* Allocate memory for the GMU dummy page */
1613	ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
1614				    0x60000000, "dummy");
1615	if (ret)
1616		goto err_memory;
1617
1618	/* Note that a650 family also includes a660 family: */
1619	if (adreno_is_a650_family(adreno_gpu)) {
1620		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1621			SZ_16M - SZ_16K, 0x04000, "icache");
1622		if (ret)
1623			goto err_memory;
1624	/*
1625	 * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition
1626	 * to allocate icache/dcache here, as per downstream code flow, but it may not actually be
1627	 * necessary. If you omit this step and you don't get random pagefaults, you are likely
1628	 * good to go without this!
1629	 */
1630	} else if (adreno_is_a640_family(adreno_gpu)) {
1631		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1632			SZ_256K - SZ_16K, 0x04000, "icache");
1633		if (ret)
1634			goto err_memory;
1635
1636		ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1637			SZ_256K - SZ_16K, 0x44000, "dcache");
1638		if (ret)
1639			goto err_memory;
1640	} else if (adreno_is_a630_family(adreno_gpu)) {
1641		/* HFI v1, has sptprac */
1642		gmu->legacy = true;
1643
1644		/* Allocate memory for the GMU debug region */
1645		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
1646		if (ret)
1647			goto err_memory;
1648	}
1649
1650	/* Allocate memory for the GMU log region */
1651	ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_16K, 0, "log");
1652	if (ret)
1653		goto err_memory;
1654
1655	/* Allocate memory for for the HFI queues */
1656	ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
1657	if (ret)
1658		goto err_memory;
1659
1660	/* Map the GMU registers */
1661	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1662	if (IS_ERR(gmu->mmio)) {
1663		ret = PTR_ERR(gmu->mmio);
1664		goto err_memory;
1665	}
1666
1667	if (adreno_is_a650_family(adreno_gpu)) {
1668		gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1669		if (IS_ERR(gmu->rscc)) {
1670			ret = -ENODEV;
1671			goto err_mmio;
1672		}
1673	} else {
1674		gmu->rscc = gmu->mmio + 0x23000;
1675	}
1676
1677	/* Get the HFI and GMU interrupts */
1678	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1679	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1680
1681	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) {
1682		ret = -ENODEV;
1683		goto err_mmio;
1684	}
1685
1686	gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
1687	if (IS_ERR(gmu->cxpd)) {
1688		ret = PTR_ERR(gmu->cxpd);
1689		goto err_mmio;
1690	}
1691
1692	if (!device_link_add(gmu->dev, gmu->cxpd,
1693					DL_FLAG_PM_RUNTIME)) {
1694		ret = -ENODEV;
1695		goto detach_cxpd;
1696	}
1697
1698	init_completion(&gmu->pd_gate);
1699	complete_all(&gmu->pd_gate);
1700	gmu->pd_nb.notifier_call = cxpd_notifier_cb;
1701
1702	/*
1703	 * Get a link to the GX power domain to reset the GPU in case of GMU
1704	 * crash
1705	 */
1706	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1707
1708	/* Get the power levels for the GMU and GPU */
1709	a6xx_gmu_pwrlevels_probe(gmu);
1710
1711	/* Set up the HFI queues */
1712	a6xx_hfi_init(gmu);
1713
1714	/* Initialize RPMh */
1715	a6xx_gmu_rpmh_init(gmu);
1716
1717	gmu->initialized = true;
1718
1719	return 0;
1720
1721detach_cxpd:
1722	dev_pm_domain_detach(gmu->cxpd, false);
1723
1724err_mmio:
1725	iounmap(gmu->mmio);
1726	if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1727		iounmap(gmu->rscc);
1728	free_irq(gmu->gmu_irq, gmu);
1729	free_irq(gmu->hfi_irq, gmu);
1730
1731err_memory:
1732	a6xx_gmu_memory_free(gmu);
1733err_put_device:
1734	/* Drop reference taken in of_find_device_by_node */
1735	put_device(gmu->dev);
1736
1737	return ret;
1738}
1739