1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include <linux/firmware.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/pci.h>
27
28#include "amdgpu.h"
29#include "amdgpu_atombios.h"
30#include "amdgpu_ih.h"
31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h"
33#include "amdgpu_ucode.h"
34#include "amdgpu_psp.h"
35#include "atom.h"
36#include "amd_pcie.h"
37
38#include "uvd/uvd_7_0_offset.h"
39#include "gc/gc_9_0_offset.h"
40#include "gc/gc_9_0_sh_mask.h"
41#include "sdma0/sdma0_4_0_offset.h"
42#include "sdma1/sdma1_4_0_offset.h"
43#include "hdp/hdp_4_0_offset.h"
44#include "hdp/hdp_4_0_sh_mask.h"
45#include "smuio/smuio_9_0_offset.h"
46#include "smuio/smuio_9_0_sh_mask.h"
47#include "nbio/nbio_7_0_default.h"
48#include "nbio/nbio_7_0_offset.h"
49#include "nbio/nbio_7_0_sh_mask.h"
50#include "nbio/nbio_7_0_smn.h"
51#include "mp/mp_9_0_offset.h"
52
53#include "soc15.h"
54#include "soc15_common.h"
55#include "gfx_v9_0.h"
56#include "gmc_v9_0.h"
57#include "gfxhub_v1_0.h"
58#include "mmhub_v1_0.h"
59#include "df_v1_7.h"
60#include "df_v3_6.h"
61#include "nbio_v6_1.h"
62#include "nbio_v7_0.h"
63#include "nbio_v7_4.h"
64#include "vega10_ih.h"
65#include "sdma_v4_0.h"
66#include "uvd_v7_0.h"
67#include "vce_v4_0.h"
68#include "vcn_v1_0.h"
69#include "vcn_v2_0.h"
70#include "jpeg_v2_0.h"
71#include "vcn_v2_5.h"
72#include "jpeg_v2_5.h"
73#include "dce_virtual.h"
74#include "mxgpu_ai.h"
75#include "amdgpu_smu.h"
76#include "amdgpu_ras.h"
77#include "amdgpu_xgmi.h"
78#include <uapi/linux/kfd_ioctl.h>
79
80#define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
81#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
82#define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
83#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
84
85/* for Vega20 register name change */
86#define mmHDP_MEM_POWER_CTRL	0x00d4
87#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK	0x00000001L
88#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK	0x00000002L
89#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK	0x00010000L
90#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK		0x00020000L
91#define mmHDP_MEM_POWER_CTRL_BASE_IDX	0
92
93/* for Vega20/arcturus regiter offset change */
94#define	mmROM_INDEX_VG20				0x00e4
95#define	mmROM_INDEX_VG20_BASE_IDX			0
96#define	mmROM_DATA_VG20					0x00e5
97#define	mmROM_DATA_VG20_BASE_IDX			0
98
99/*
100 * Indirect registers accessor
101 */
102static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
103{
104	unsigned long address, data;
105	address = adev->nbio.funcs->get_pcie_index_offset(adev);
106	data = adev->nbio.funcs->get_pcie_data_offset(adev);
107
108	return amdgpu_device_indirect_rreg(adev, address, data, reg);
109}
110
111static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
112{
113	unsigned long address, data;
114
115	address = adev->nbio.funcs->get_pcie_index_offset(adev);
116	data = adev->nbio.funcs->get_pcie_data_offset(adev);
117
118	amdgpu_device_indirect_wreg(adev, address, data, reg, v);
119}
120
121static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
122{
123	unsigned long address, data;
124	address = adev->nbio.funcs->get_pcie_index_offset(adev);
125	data = adev->nbio.funcs->get_pcie_data_offset(adev);
126
127	return amdgpu_device_indirect_rreg64(adev, address, data, reg);
128}
129
130static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
131{
132	unsigned long address, data;
133
134	address = adev->nbio.funcs->get_pcie_index_offset(adev);
135	data = adev->nbio.funcs->get_pcie_data_offset(adev);
136
137	amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
138}
139
140static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
141{
142	unsigned long flags, address, data;
143	u32 r;
144
145	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
146	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
147
148	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
149	WREG32(address, ((reg) & 0x1ff));
150	r = RREG32(data);
151	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
152	return r;
153}
154
155static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
156{
157	unsigned long flags, address, data;
158
159	address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX);
160	data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA);
161
162	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
163	WREG32(address, ((reg) & 0x1ff));
164	WREG32(data, (v));
165	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
166}
167
168static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg)
169{
170	unsigned long flags, address, data;
171	u32 r;
172
173	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
174	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
175
176	spin_lock_irqsave(&adev->didt_idx_lock, flags);
177	WREG32(address, (reg));
178	r = RREG32(data);
179	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
180	return r;
181}
182
183static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
184{
185	unsigned long flags, address, data;
186
187	address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX);
188	data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA);
189
190	spin_lock_irqsave(&adev->didt_idx_lock, flags);
191	WREG32(address, (reg));
192	WREG32(data, (v));
193	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
194}
195
196static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
197{
198	unsigned long flags;
199	u32 r;
200
201	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
202	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
203	r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA);
204	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
205	return r;
206}
207
208static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
209{
210	unsigned long flags;
211
212	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
213	WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg));
214	WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v));
215	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
216}
217
218static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg)
219{
220	unsigned long flags;
221	u32 r;
222
223	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
224	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
225	r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA);
226	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
227	return r;
228}
229
230static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
231{
232	unsigned long flags;
233
234	spin_lock_irqsave(&adev->se_cac_idx_lock, flags);
235	WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg));
236	WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v));
237	spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags);
238}
239
240static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
241{
242	return adev->nbio.funcs->get_memsize(adev);
243}
244
245static u32 soc15_get_xclk(struct amdgpu_device *adev)
246{
247	u32 reference_clock = adev->clock.spll.reference_freq;
248
249	if (adev->asic_type == CHIP_RENOIR)
250		return 10000;
251	if (adev->asic_type == CHIP_RAVEN)
252		return reference_clock / 4;
253
254	return reference_clock;
255}
256
257
258void soc15_grbm_select(struct amdgpu_device *adev,
259		     u32 me, u32 pipe, u32 queue, u32 vmid)
260{
261	u32 grbm_gfx_cntl = 0;
262	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe);
263	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me);
264	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
265	grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
266
267	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
268}
269
270static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
271{
272	/* todo */
273}
274
275static bool soc15_read_disabled_bios(struct amdgpu_device *adev)
276{
277	/* todo */
278	return false;
279}
280
281static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
282				     u8 *bios, u32 length_bytes)
283{
284	u32 *dw_ptr;
285	u32 i, length_dw;
286	uint32_t rom_index_offset;
287	uint32_t rom_data_offset;
288
289	if (bios == NULL)
290		return false;
291	if (length_bytes == 0)
292		return false;
293	/* APU vbios image is part of sbios image */
294	if (adev->flags & AMD_IS_APU)
295		return false;
296
297	dw_ptr = (u32 *)bios;
298	length_dw = ALIGN(length_bytes, 4) / 4;
299
300	switch (adev->asic_type) {
301	case CHIP_VEGA20:
302	case CHIP_ARCTURUS:
303		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
304		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
305		break;
306	default:
307		rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
308		rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
309		break;
310	}
311
312	/* set rom index to 0 */
313	WREG32(rom_index_offset, 0);
314	/* read out the rom data */
315	for (i = 0; i < length_dw; i++)
316		dw_ptr[i] = RREG32(rom_data_offset);
317
318	return true;
319}
320
321static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
322	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
323	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
324	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
325	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
326	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
327	{ SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
328	{ SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
329	{ SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
330	{ SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
331	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
332	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
333	{ SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
334	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
335	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
336	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
337	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)},
338	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
339	{ SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
340	{ SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
341	{ SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)},
342};
343
344static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
345					 u32 sh_num, u32 reg_offset)
346{
347	uint32_t val;
348
349	mutex_lock(&adev->grbm_idx_mutex);
350	if (se_num != 0xffffffff || sh_num != 0xffffffff)
351		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
352
353	val = RREG32(reg_offset);
354
355	if (se_num != 0xffffffff || sh_num != 0xffffffff)
356		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
357	mutex_unlock(&adev->grbm_idx_mutex);
358	return val;
359}
360
361static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
362					 bool indexed, u32 se_num,
363					 u32 sh_num, u32 reg_offset)
364{
365	if (indexed) {
366		return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
367	} else {
368		if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
369			return adev->gfx.config.gb_addr_config;
370		else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2))
371			return adev->gfx.config.db_debug2;
372		return RREG32(reg_offset);
373	}
374}
375
376static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
377			    u32 sh_num, u32 reg_offset, u32 *value)
378{
379	uint32_t i;
380	struct soc15_allowed_register_entry  *en;
381
382	*value = 0;
383	for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
384		en = &soc15_allowed_read_registers[i];
385		if (!adev->reg_offset[en->hwip][en->inst])
386			continue;
387		else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
388					+ en->reg_offset))
389			continue;
390
391		*value = soc15_get_register_value(adev,
392						  soc15_allowed_read_registers[i].grbm_indexed,
393						  se_num, sh_num, reg_offset);
394		return 0;
395	}
396	return -EINVAL;
397}
398
399
400/**
401 * soc15_program_register_sequence - program an array of registers.
402 *
403 * @adev: amdgpu_device pointer
404 * @regs: pointer to the register array
405 * @array_size: size of the register array
406 *
407 * Programs an array or registers with and and or masks.
408 * This is a helper for setting golden registers.
409 */
410
411void soc15_program_register_sequence(struct amdgpu_device *adev,
412					     const struct soc15_reg_golden *regs,
413					     const u32 array_size)
414{
415	const struct soc15_reg_golden *entry;
416	u32 tmp, reg;
417	int i;
418
419	for (i = 0; i < array_size; ++i) {
420		entry = &regs[i];
421		reg =  adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
422
423		if (entry->and_mask == 0xffffffff) {
424			tmp = entry->or_mask;
425		} else {
426			tmp = RREG32(reg);
427			tmp &= ~(entry->and_mask);
428			tmp |= (entry->or_mask & entry->and_mask);
429		}
430
431		if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
432			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
433			reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
434			reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
435			WREG32_RLC(reg, tmp);
436		else
437			WREG32(reg, tmp);
438
439	}
440
441}
442
443static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
444{
445	u32 i;
446	int ret = 0;
447
448	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
449
450	dev_info(adev->dev, "GPU mode1 reset\n");
451
452	/* disable BM */
453	pci_clear_master(adev->pdev);
454
455	amdgpu_device_cache_pci_state(adev->pdev);
456
457	ret = psp_gpu_reset(adev);
458	if (ret)
459		dev_err(adev->dev, "GPU mode1 reset failed\n");
460
461	amdgpu_device_load_pci_state(adev->pdev);
462
463	/* wait for asic to come out of reset */
464	for (i = 0; i < adev->usec_timeout; i++) {
465		u32 memsize = adev->nbio.funcs->get_memsize(adev);
466
467		if (memsize != 0xffffffff)
468			break;
469		udelay(1);
470	}
471
472	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
473
474	return ret;
475}
476
477static int soc15_asic_baco_reset(struct amdgpu_device *adev)
478{
479	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
480	int ret = 0;
481
482	/* avoid NBIF got stuck when do RAS recovery in BACO reset */
483	if (ras && ras->supported)
484		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
485
486	ret = amdgpu_dpm_baco_reset(adev);
487	if (ret)
488		return ret;
489
490	/* re-enable doorbell interrupt after BACO exit */
491	if (ras && ras->supported)
492		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
493
494	return 0;
495}
496
497static enum amd_reset_method
498soc15_asic_reset_method(struct amdgpu_device *adev)
499{
500	bool baco_reset = false;
501	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
502
503	if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
504	    amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
505		amdgpu_reset_method == AMD_RESET_METHOD_BACO)
506		return amdgpu_reset_method;
507
508	if (amdgpu_reset_method != -1)
509		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
510				  amdgpu_reset_method);
511
512	switch (adev->asic_type) {
513	case CHIP_RAVEN:
514	case CHIP_RENOIR:
515		return AMD_RESET_METHOD_MODE2;
516	case CHIP_VEGA10:
517	case CHIP_VEGA12:
518	case CHIP_ARCTURUS:
519		baco_reset = amdgpu_dpm_is_baco_supported(adev);
520		break;
521	case CHIP_VEGA20:
522		if (adev->psp.sos_fw_version >= 0x80067)
523			baco_reset = amdgpu_dpm_is_baco_supported(adev);
524
525		/*
526		 * 1. PMFW version > 0x284300: all cases use baco
527		 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
528		 */
529		if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
530			baco_reset = false;
531		break;
532	default:
533		break;
534	}
535
536	if (baco_reset)
537		return AMD_RESET_METHOD_BACO;
538	else
539		return AMD_RESET_METHOD_MODE1;
540}
541
542static int soc15_asic_reset(struct amdgpu_device *adev)
543{
544	/* original raven doesn't have full asic reset */
545	if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
546	    !(adev->apu_flags & AMD_APU_IS_RAVEN2))
547		return 0;
548
549	switch (soc15_asic_reset_method(adev)) {
550		case AMD_RESET_METHOD_BACO:
551			dev_info(adev->dev, "BACO reset\n");
552			return soc15_asic_baco_reset(adev);
553		case AMD_RESET_METHOD_MODE2:
554			dev_info(adev->dev, "MODE2 reset\n");
555			return amdgpu_dpm_mode2_reset(adev);
556		default:
557			dev_info(adev->dev, "MODE1 reset\n");
558			return soc15_asic_mode1_reset(adev);
559	}
560}
561
562static bool soc15_supports_baco(struct amdgpu_device *adev)
563{
564	switch (adev->asic_type) {
565	case CHIP_VEGA10:
566	case CHIP_VEGA12:
567	case CHIP_ARCTURUS:
568		return amdgpu_dpm_is_baco_supported(adev);
569	case CHIP_VEGA20:
570		if (adev->psp.sos_fw_version >= 0x80067)
571			return amdgpu_dpm_is_baco_supported(adev);
572		return false;
573	default:
574		return false;
575	}
576}
577
578/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
579			u32 cntl_reg, u32 status_reg)
580{
581	return 0;
582}*/
583
584static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
585{
586	/*int r;
587
588	r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
589	if (r)
590		return r;
591
592	r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
593	*/
594	return 0;
595}
596
597static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
598{
599	/* todo */
600
601	return 0;
602}
603
604static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
605{
606	if (pci_is_root_bus(adev->pdev->bus))
607		return;
608
609	if (amdgpu_pcie_gen2 == 0)
610		return;
611
612	if (adev->flags & AMD_IS_APU)
613		return;
614
615	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
616					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
617		return;
618
619	/* todo */
620}
621
622static void soc15_program_aspm(struct amdgpu_device *adev)
623{
624
625	if (amdgpu_aspm == 0)
626		return;
627
628	/* todo */
629}
630
631static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
632					   bool enable)
633{
634	adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
635	adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
636}
637
638static const struct amdgpu_ip_block_version vega10_common_ip_block =
639{
640	.type = AMD_IP_BLOCK_TYPE_COMMON,
641	.major = 2,
642	.minor = 0,
643	.rev = 0,
644	.funcs = &soc15_common_ip_funcs,
645};
646
647static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
648{
649	return adev->nbio.funcs->get_rev_id(adev);
650}
651
652static void soc15_reg_base_init(struct amdgpu_device *adev)
653{
654	int r;
655
656	/* Set IP register base before any HW register access */
657	switch (adev->asic_type) {
658	case CHIP_VEGA10:
659	case CHIP_VEGA12:
660	case CHIP_RAVEN:
661		vega10_reg_base_init(adev);
662		break;
663	case CHIP_RENOIR:
664		/* It's safe to do ip discovery here for Renior,
665		 * it doesn't support SRIOV. */
666		if (amdgpu_discovery) {
667			r = amdgpu_discovery_reg_base_init(adev);
668			if (r == 0)
669				break;
670			DRM_WARN("failed to init reg base from ip discovery table, "
671				 "fallback to legacy init method\n");
672		}
673		vega10_reg_base_init(adev);
674		break;
675	case CHIP_VEGA20:
676		vega20_reg_base_init(adev);
677		break;
678	case CHIP_ARCTURUS:
679		arct_reg_base_init(adev);
680		break;
681	default:
682		DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
683		break;
684	}
685}
686
687void soc15_set_virt_ops(struct amdgpu_device *adev)
688{
689	adev->virt.ops = &xgpu_ai_virt_ops;
690
691	/* init soc15 reg base early enough so we can
692	 * request request full access for sriov before
693	 * set_ip_blocks. */
694	soc15_reg_base_init(adev);
695}
696
697int soc15_set_ip_blocks(struct amdgpu_device *adev)
698{
699	/* for bare metal case */
700	if (!amdgpu_sriov_vf(adev))
701		soc15_reg_base_init(adev);
702
703	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
704		adev->gmc.xgmi.supported = true;
705
706	if (adev->flags & AMD_IS_APU) {
707		adev->nbio.funcs = &nbio_v7_0_funcs;
708		adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
709	} else if (adev->asic_type == CHIP_VEGA20 ||
710		   adev->asic_type == CHIP_ARCTURUS) {
711		adev->nbio.funcs = &nbio_v7_4_funcs;
712		adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
713	} else {
714		adev->nbio.funcs = &nbio_v6_1_funcs;
715		adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
716	}
717
718	if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
719		adev->df.funcs = &df_v3_6_funcs;
720	else
721		adev->df.funcs = &df_v1_7_funcs;
722
723	adev->rev_id = soc15_get_rev_id(adev);
724
725	switch (adev->asic_type) {
726	case CHIP_VEGA10:
727	case CHIP_VEGA12:
728	case CHIP_VEGA20:
729		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
730		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
731
732		/* For Vega10 SR-IOV, PSP need to be initialized before IH */
733		if (amdgpu_sriov_vf(adev)) {
734			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
735				if (adev->asic_type == CHIP_VEGA20)
736					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
737				else
738					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
739			}
740			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
741		} else {
742			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
743			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
744				if (adev->asic_type == CHIP_VEGA20)
745					amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
746				else
747					amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
748			}
749		}
750		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
751		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
752		if (is_support_sw_smu(adev)) {
753			if (!amdgpu_sriov_vf(adev))
754				amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
755		} else {
756			amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
757		}
758		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
759			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
760#if defined(CONFIG_DRM_AMD_DC)
761		else if (amdgpu_device_has_dc_support(adev))
762			amdgpu_device_ip_block_add(adev, &dm_ip_block);
763#endif
764		if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
765			amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
766			amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
767		}
768		break;
769	case CHIP_RAVEN:
770		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
771		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
772		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
773		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
774			amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
775		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
776		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
777		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
778		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
779			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
780#if defined(CONFIG_DRM_AMD_DC)
781		else if (amdgpu_device_has_dc_support(adev))
782			amdgpu_device_ip_block_add(adev, &dm_ip_block);
783#endif
784		amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
785		break;
786	case CHIP_ARCTURUS:
787		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
788		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
789
790		if (amdgpu_sriov_vf(adev)) {
791			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
792				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
793			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
794		} else {
795			amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
796			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
797				amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
798		}
799
800		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
801			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
802		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
803		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
804		amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
805
806		if (amdgpu_sriov_vf(adev)) {
807			if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
808				amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
809		} else {
810			amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
811		}
812		if (!amdgpu_sriov_vf(adev))
813			amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
814		break;
815	case CHIP_RENOIR:
816		amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
817		amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
818		amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
819		if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
820			amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
821		amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
822		amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
823		amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
824		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
825			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
826#if defined(CONFIG_DRM_AMD_DC)
827                else if (amdgpu_device_has_dc_support(adev))
828                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
829#endif
830		amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
831		amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
832		break;
833	default:
834		return -EINVAL;
835	}
836
837	return 0;
838}
839
840static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
841{
842	adev->nbio.funcs->hdp_flush(adev, ring);
843}
844
845static void soc15_invalidate_hdp(struct amdgpu_device *adev,
846				 struct amdgpu_ring *ring)
847{
848	if (!ring || !ring->funcs->emit_wreg)
849		WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
850	else
851		amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
852			HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
853}
854
855static bool soc15_need_full_reset(struct amdgpu_device *adev)
856{
857	/* change this when we implement soft reset */
858	return true;
859}
860
861static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
862{
863	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
864		return;
865	/*read back hdp ras counter to reset it to 0 */
866	RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
867}
868
869static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
870				 uint64_t *count1)
871{
872	uint32_t perfctr = 0;
873	uint64_t cnt0_of, cnt1_of;
874	int tmp;
875
876	/* This reports 0 on APUs, so return to avoid writing/reading registers
877	 * that may or may not be different from their GPU counterparts
878	 */
879	if (adev->flags & AMD_IS_APU)
880		return;
881
882	/* Set the 2 events that we wish to watch, defined above */
883	/* Reg 40 is # received msgs */
884	/* Reg 104 is # of posted requests sent */
885	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
886	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
887
888	/* Write to enable desired perf counters */
889	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr);
890	/* Zero out and enable the perf counters
891	 * Write 0x5:
892	 * Bit 0 = Start all counters(1)
893	 * Bit 2 = Global counter reset enable(1)
894	 */
895	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
896
897	msleep(1000);
898
899	/* Load the shadow and disable the perf counters
900	 * Write 0x2:
901	 * Bit 0 = Stop counters(0)
902	 * Bit 1 = Load the shadow counters(1)
903	 */
904	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
905
906	/* Read register values to get any >32bit overflow */
907	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK);
908	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
909	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
910
911	/* Get the values and add the overflow */
912	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
913	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
914}
915
916static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
917				 uint64_t *count1)
918{
919	uint32_t perfctr = 0;
920	uint64_t cnt0_of, cnt1_of;
921	int tmp;
922
923	/* This reports 0 on APUs, so return to avoid writing/reading registers
924	 * that may or may not be different from their GPU counterparts
925	 */
926	if (adev->flags & AMD_IS_APU)
927		return;
928
929	/* Set the 2 events that we wish to watch, defined above */
930	/* Reg 40 is # received msgs */
931	/* Reg 108 is # of posted requests sent on VG20 */
932	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
933				EVENT0_SEL, 40);
934	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3,
935				EVENT1_SEL, 108);
936
937	/* Write to enable desired perf counters */
938	WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr);
939	/* Zero out and enable the perf counters
940	 * Write 0x5:
941	 * Bit 0 = Start all counters(1)
942	 * Bit 2 = Global counter reset enable(1)
943	 */
944	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005);
945
946	msleep(1000);
947
948	/* Load the shadow and disable the perf counters
949	 * Write 0x2:
950	 * Bit 0 = Stop counters(0)
951	 * Bit 1 = Load the shadow counters(1)
952	 */
953	WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002);
954
955	/* Read register values to get any >32bit overflow */
956	tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3);
957	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER);
958	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER);
959
960	/* Get the values and add the overflow */
961	*count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32);
962	*count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32);
963}
964
965static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
966{
967	u32 sol_reg;
968
969	/* Just return false for soc15 GPUs.  Reset does not seem to
970	 * be necessary.
971	 */
972	if (!amdgpu_passthrough(adev))
973		return false;
974
975	if (adev->flags & AMD_IS_APU)
976		return false;
977
978	/* Check sOS sign of life register to confirm sys driver and sOS
979	 * are already been loaded.
980	 */
981	sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
982	if (sol_reg)
983		return true;
984
985	return false;
986}
987
988static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
989{
990	uint64_t nak_r, nak_g;
991
992	/* Get the number of NAKs received and generated */
993	nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
994	nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
995
996	/* Add the total number of NAKs, i.e the number of replays */
997	return (nak_r + nak_g);
998}
999
1000static void soc15_pre_asic_init(struct amdgpu_device *adev)
1001{
1002	gmc_v9_0_restore_registers(adev);
1003}
1004
1005static const struct amdgpu_asic_funcs soc15_asic_funcs =
1006{
1007	.read_disabled_bios = &soc15_read_disabled_bios,
1008	.read_bios_from_rom = &soc15_read_bios_from_rom,
1009	.read_register = &soc15_read_register,
1010	.reset = &soc15_asic_reset,
1011	.reset_method = &soc15_asic_reset_method,
1012	.set_vga_state = &soc15_vga_set_state,
1013	.get_xclk = &soc15_get_xclk,
1014	.set_uvd_clocks = &soc15_set_uvd_clocks,
1015	.set_vce_clocks = &soc15_set_vce_clocks,
1016	.get_config_memsize = &soc15_get_config_memsize,
1017	.flush_hdp = &soc15_flush_hdp,
1018	.invalidate_hdp = &soc15_invalidate_hdp,
1019	.need_full_reset = &soc15_need_full_reset,
1020	.init_doorbell_index = &vega10_doorbell_index_init,
1021	.get_pcie_usage = &soc15_get_pcie_usage,
1022	.need_reset_on_init = &soc15_need_reset_on_init,
1023	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1024	.supports_baco = &soc15_supports_baco,
1025	.pre_asic_init = &soc15_pre_asic_init,
1026};
1027
1028static const struct amdgpu_asic_funcs vega20_asic_funcs =
1029{
1030	.read_disabled_bios = &soc15_read_disabled_bios,
1031	.read_bios_from_rom = &soc15_read_bios_from_rom,
1032	.read_register = &soc15_read_register,
1033	.reset = &soc15_asic_reset,
1034	.reset_method = &soc15_asic_reset_method,
1035	.set_vga_state = &soc15_vga_set_state,
1036	.get_xclk = &soc15_get_xclk,
1037	.set_uvd_clocks = &soc15_set_uvd_clocks,
1038	.set_vce_clocks = &soc15_set_vce_clocks,
1039	.get_config_memsize = &soc15_get_config_memsize,
1040	.flush_hdp = &soc15_flush_hdp,
1041	.invalidate_hdp = &soc15_invalidate_hdp,
1042	.reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
1043	.need_full_reset = &soc15_need_full_reset,
1044	.init_doorbell_index = &vega20_doorbell_index_init,
1045	.get_pcie_usage = &vega20_get_pcie_usage,
1046	.need_reset_on_init = &soc15_need_reset_on_init,
1047	.get_pcie_replay_count = &soc15_get_pcie_replay_count,
1048	.supports_baco = &soc15_supports_baco,
1049	.pre_asic_init = &soc15_pre_asic_init,
1050};
1051
1052static int soc15_common_early_init(void *handle)
1053{
1054#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
1055	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1056
1057	adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
1058	adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
1059	adev->smc_rreg = NULL;
1060	adev->smc_wreg = NULL;
1061	adev->pcie_rreg = &soc15_pcie_rreg;
1062	adev->pcie_wreg = &soc15_pcie_wreg;
1063	adev->pcie_rreg64 = &soc15_pcie_rreg64;
1064	adev->pcie_wreg64 = &soc15_pcie_wreg64;
1065	adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg;
1066	adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg;
1067	adev->didt_rreg = &soc15_didt_rreg;
1068	adev->didt_wreg = &soc15_didt_wreg;
1069	adev->gc_cac_rreg = &soc15_gc_cac_rreg;
1070	adev->gc_cac_wreg = &soc15_gc_cac_wreg;
1071	adev->se_cac_rreg = &soc15_se_cac_rreg;
1072	adev->se_cac_wreg = &soc15_se_cac_wreg;
1073
1074
1075	adev->external_rev_id = 0xFF;
1076	switch (adev->asic_type) {
1077	case CHIP_VEGA10:
1078		adev->asic_funcs = &soc15_asic_funcs;
1079		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1080			AMD_CG_SUPPORT_GFX_MGLS |
1081			AMD_CG_SUPPORT_GFX_RLC_LS |
1082			AMD_CG_SUPPORT_GFX_CP_LS |
1083			AMD_CG_SUPPORT_GFX_3D_CGCG |
1084			AMD_CG_SUPPORT_GFX_3D_CGLS |
1085			AMD_CG_SUPPORT_GFX_CGCG |
1086			AMD_CG_SUPPORT_GFX_CGLS |
1087			AMD_CG_SUPPORT_BIF_MGCG |
1088			AMD_CG_SUPPORT_BIF_LS |
1089			AMD_CG_SUPPORT_HDP_LS |
1090			AMD_CG_SUPPORT_DRM_MGCG |
1091			AMD_CG_SUPPORT_DRM_LS |
1092			AMD_CG_SUPPORT_ROM_MGCG |
1093			AMD_CG_SUPPORT_DF_MGCG |
1094			AMD_CG_SUPPORT_SDMA_MGCG |
1095			AMD_CG_SUPPORT_SDMA_LS |
1096			AMD_CG_SUPPORT_MC_MGCG |
1097			AMD_CG_SUPPORT_MC_LS;
1098		adev->pg_flags = 0;
1099		adev->external_rev_id = 0x1;
1100		break;
1101	case CHIP_VEGA12:
1102		adev->asic_funcs = &soc15_asic_funcs;
1103		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1104			AMD_CG_SUPPORT_GFX_MGLS |
1105			AMD_CG_SUPPORT_GFX_CGCG |
1106			AMD_CG_SUPPORT_GFX_CGLS |
1107			AMD_CG_SUPPORT_GFX_3D_CGCG |
1108			AMD_CG_SUPPORT_GFX_3D_CGLS |
1109			AMD_CG_SUPPORT_GFX_CP_LS |
1110			AMD_CG_SUPPORT_MC_LS |
1111			AMD_CG_SUPPORT_MC_MGCG |
1112			AMD_CG_SUPPORT_SDMA_MGCG |
1113			AMD_CG_SUPPORT_SDMA_LS |
1114			AMD_CG_SUPPORT_BIF_MGCG |
1115			AMD_CG_SUPPORT_BIF_LS |
1116			AMD_CG_SUPPORT_HDP_MGCG |
1117			AMD_CG_SUPPORT_HDP_LS |
1118			AMD_CG_SUPPORT_ROM_MGCG |
1119			AMD_CG_SUPPORT_VCE_MGCG |
1120			AMD_CG_SUPPORT_UVD_MGCG;
1121		adev->pg_flags = 0;
1122		adev->external_rev_id = adev->rev_id + 0x14;
1123		break;
1124	case CHIP_VEGA20:
1125		adev->asic_funcs = &vega20_asic_funcs;
1126		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1127			AMD_CG_SUPPORT_GFX_MGLS |
1128			AMD_CG_SUPPORT_GFX_CGCG |
1129			AMD_CG_SUPPORT_GFX_CGLS |
1130			AMD_CG_SUPPORT_GFX_3D_CGCG |
1131			AMD_CG_SUPPORT_GFX_3D_CGLS |
1132			AMD_CG_SUPPORT_GFX_CP_LS |
1133			AMD_CG_SUPPORT_MC_LS |
1134			AMD_CG_SUPPORT_MC_MGCG |
1135			AMD_CG_SUPPORT_SDMA_MGCG |
1136			AMD_CG_SUPPORT_SDMA_LS |
1137			AMD_CG_SUPPORT_BIF_MGCG |
1138			AMD_CG_SUPPORT_BIF_LS |
1139			AMD_CG_SUPPORT_HDP_MGCG |
1140			AMD_CG_SUPPORT_HDP_LS |
1141			AMD_CG_SUPPORT_ROM_MGCG |
1142			AMD_CG_SUPPORT_VCE_MGCG |
1143			AMD_CG_SUPPORT_UVD_MGCG;
1144		adev->pg_flags = 0;
1145		adev->external_rev_id = adev->rev_id + 0x28;
1146		break;
1147	case CHIP_RAVEN:
1148		adev->asic_funcs = &soc15_asic_funcs;
1149		if (adev->pdev->device == 0x15dd)
1150			adev->apu_flags |= AMD_APU_IS_RAVEN;
1151		if (adev->pdev->device == 0x15d8)
1152			adev->apu_flags |= AMD_APU_IS_PICASSO;
1153		if (adev->rev_id >= 0x8)
1154			adev->apu_flags |= AMD_APU_IS_RAVEN2;
1155
1156		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1157			adev->external_rev_id = adev->rev_id + 0x79;
1158		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1159			adev->external_rev_id = adev->rev_id + 0x41;
1160		else if (adev->rev_id == 1)
1161			adev->external_rev_id = adev->rev_id + 0x20;
1162		else
1163			adev->external_rev_id = adev->rev_id + 0x01;
1164
1165		if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1166			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1167				AMD_CG_SUPPORT_GFX_MGLS |
1168				AMD_CG_SUPPORT_GFX_CP_LS |
1169				AMD_CG_SUPPORT_GFX_3D_CGCG |
1170				AMD_CG_SUPPORT_GFX_3D_CGLS |
1171				AMD_CG_SUPPORT_GFX_CGCG |
1172				AMD_CG_SUPPORT_GFX_CGLS |
1173				AMD_CG_SUPPORT_BIF_LS |
1174				AMD_CG_SUPPORT_HDP_LS |
1175				AMD_CG_SUPPORT_ROM_MGCG |
1176				AMD_CG_SUPPORT_MC_MGCG |
1177				AMD_CG_SUPPORT_MC_LS |
1178				AMD_CG_SUPPORT_SDMA_MGCG |
1179				AMD_CG_SUPPORT_SDMA_LS |
1180				AMD_CG_SUPPORT_VCN_MGCG;
1181
1182			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1183		} else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
1184			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1185				AMD_CG_SUPPORT_GFX_MGLS |
1186				AMD_CG_SUPPORT_GFX_CP_LS |
1187				AMD_CG_SUPPORT_GFX_3D_CGLS |
1188				AMD_CG_SUPPORT_GFX_CGCG |
1189				AMD_CG_SUPPORT_GFX_CGLS |
1190				AMD_CG_SUPPORT_BIF_LS |
1191				AMD_CG_SUPPORT_HDP_LS |
1192				AMD_CG_SUPPORT_ROM_MGCG |
1193				AMD_CG_SUPPORT_MC_MGCG |
1194				AMD_CG_SUPPORT_MC_LS |
1195				AMD_CG_SUPPORT_SDMA_MGCG |
1196				AMD_CG_SUPPORT_SDMA_LS;
1197
1198			/*
1199			 * MMHUB PG needs to be disabled for Picasso for
1200			 * stability reasons.
1201			 */
1202			adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1203				AMD_PG_SUPPORT_VCN;
1204		} else {
1205			adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1206				AMD_CG_SUPPORT_GFX_MGLS |
1207				AMD_CG_SUPPORT_GFX_RLC_LS |
1208				AMD_CG_SUPPORT_GFX_CP_LS |
1209				AMD_CG_SUPPORT_GFX_3D_CGLS |
1210				AMD_CG_SUPPORT_GFX_CGCG |
1211				AMD_CG_SUPPORT_GFX_CGLS |
1212				AMD_CG_SUPPORT_BIF_MGCG |
1213				AMD_CG_SUPPORT_BIF_LS |
1214				AMD_CG_SUPPORT_HDP_MGCG |
1215				AMD_CG_SUPPORT_HDP_LS |
1216				AMD_CG_SUPPORT_DRM_MGCG |
1217				AMD_CG_SUPPORT_DRM_LS |
1218				AMD_CG_SUPPORT_ROM_MGCG |
1219				AMD_CG_SUPPORT_MC_MGCG |
1220				AMD_CG_SUPPORT_MC_LS |
1221				AMD_CG_SUPPORT_SDMA_MGCG |
1222				AMD_CG_SUPPORT_SDMA_LS |
1223				AMD_CG_SUPPORT_VCN_MGCG;
1224
1225			adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
1226		}
1227		break;
1228	case CHIP_ARCTURUS:
1229		adev->asic_funcs = &vega20_asic_funcs;
1230		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1231			AMD_CG_SUPPORT_GFX_MGLS |
1232			AMD_CG_SUPPORT_GFX_CGCG |
1233			AMD_CG_SUPPORT_GFX_CGLS |
1234			AMD_CG_SUPPORT_GFX_CP_LS |
1235			AMD_CG_SUPPORT_HDP_MGCG |
1236			AMD_CG_SUPPORT_HDP_LS |
1237			AMD_CG_SUPPORT_SDMA_MGCG |
1238			AMD_CG_SUPPORT_SDMA_LS |
1239			AMD_CG_SUPPORT_MC_MGCG |
1240			AMD_CG_SUPPORT_MC_LS |
1241			AMD_CG_SUPPORT_IH_CG |
1242			AMD_CG_SUPPORT_VCN_MGCG |
1243			AMD_CG_SUPPORT_JPEG_MGCG;
1244		adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
1245		adev->external_rev_id = adev->rev_id + 0x32;
1246		break;
1247	case CHIP_RENOIR:
1248		adev->asic_funcs = &soc15_asic_funcs;
1249		if ((adev->pdev->device == 0x1636) ||
1250		    (adev->pdev->device == 0x164c))
1251			adev->apu_flags |= AMD_APU_IS_RENOIR;
1252		else
1253			adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1254
1255		if (adev->apu_flags & AMD_APU_IS_RENOIR)
1256			adev->external_rev_id = adev->rev_id + 0x91;
1257		else
1258			adev->external_rev_id = adev->rev_id + 0xa1;
1259		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1260				 AMD_CG_SUPPORT_GFX_MGLS |
1261				 AMD_CG_SUPPORT_GFX_3D_CGCG |
1262				 AMD_CG_SUPPORT_GFX_3D_CGLS |
1263				 AMD_CG_SUPPORT_GFX_CGCG |
1264				 AMD_CG_SUPPORT_GFX_CGLS |
1265				 AMD_CG_SUPPORT_GFX_CP_LS |
1266				 AMD_CG_SUPPORT_MC_MGCG |
1267				 AMD_CG_SUPPORT_MC_LS |
1268				 AMD_CG_SUPPORT_SDMA_MGCG |
1269				 AMD_CG_SUPPORT_SDMA_LS |
1270				 AMD_CG_SUPPORT_BIF_LS |
1271				 AMD_CG_SUPPORT_HDP_LS |
1272				 AMD_CG_SUPPORT_ROM_MGCG |
1273				 AMD_CG_SUPPORT_VCN_MGCG |
1274				 AMD_CG_SUPPORT_JPEG_MGCG |
1275				 AMD_CG_SUPPORT_IH_CG |
1276				 AMD_CG_SUPPORT_ATHUB_LS |
1277				 AMD_CG_SUPPORT_ATHUB_MGCG |
1278				 AMD_CG_SUPPORT_DF_MGCG;
1279		adev->pg_flags = AMD_PG_SUPPORT_SDMA |
1280				 AMD_PG_SUPPORT_VCN |
1281				 AMD_PG_SUPPORT_JPEG |
1282				 AMD_PG_SUPPORT_VCN_DPG;
1283		break;
1284	default:
1285		/* FIXME: not supported yet */
1286		return -EINVAL;
1287	}
1288
1289	if (amdgpu_sriov_vf(adev)) {
1290		amdgpu_virt_init_setting(adev);
1291		xgpu_ai_mailbox_set_irq_funcs(adev);
1292	}
1293
1294	return 0;
1295}
1296
1297static int soc15_common_late_init(void *handle)
1298{
1299	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300	int r = 0;
1301
1302	if (amdgpu_sriov_vf(adev))
1303		xgpu_ai_mailbox_get_irq(adev);
1304
1305	if (adev->asic_funcs &&
1306	    adev->asic_funcs->reset_hdp_ras_error_count)
1307		adev->asic_funcs->reset_hdp_ras_error_count(adev);
1308
1309	if (adev->nbio.funcs->ras_late_init)
1310		r = adev->nbio.funcs->ras_late_init(adev);
1311
1312	return r;
1313}
1314
1315static int soc15_common_sw_init(void *handle)
1316{
1317	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318
1319	if (amdgpu_sriov_vf(adev))
1320		xgpu_ai_mailbox_add_irq_id(adev);
1321
1322	adev->df.funcs->sw_init(adev);
1323
1324	return 0;
1325}
1326
1327static int soc15_common_sw_fini(void *handle)
1328{
1329	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1330
1331	amdgpu_nbio_ras_fini(adev);
1332	adev->df.funcs->sw_fini(adev);
1333	return 0;
1334}
1335
1336static void soc15_doorbell_range_init(struct amdgpu_device *adev)
1337{
1338	int i;
1339	struct amdgpu_ring *ring;
1340
1341	/* sdma/ih doorbell range are programed by hypervisor */
1342	if (!amdgpu_sriov_vf(adev)) {
1343		for (i = 0; i < adev->sdma.num_instances; i++) {
1344			ring = &adev->sdma.instance[i].ring;
1345			adev->nbio.funcs->sdma_doorbell_range(adev, i,
1346				ring->use_doorbell, ring->doorbell_index,
1347				adev->doorbell_index.sdma_doorbell_range);
1348		}
1349
1350		adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
1351						adev->irq.ih.doorbell_index);
1352	}
1353}
1354
1355static int soc15_common_hw_init(void *handle)
1356{
1357	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1358
1359	/* enable pcie gen2/3 link */
1360	soc15_pcie_gen3_enable(adev);
1361	/* enable aspm */
1362	soc15_program_aspm(adev);
1363	/* setup nbio registers */
1364	adev->nbio.funcs->init_registers(adev);
1365	/* remap HDP registers to a hole in mmio space,
1366	 * for the purpose of expose those registers
1367	 * to process space
1368	 */
1369	if (adev->nbio.funcs->remap_hdp_registers)
1370		adev->nbio.funcs->remap_hdp_registers(adev);
1371
1372	/* enable the doorbell aperture */
1373	soc15_enable_doorbell_aperture(adev, true);
1374	/* HW doorbell routing policy: doorbell writing not
1375	 * in SDMA/IH/MM/ACV range will be routed to CP. So
1376	 * we need to init SDMA/IH/MM/ACV doorbell range prior
1377	 * to CP ip block init and ring test.
1378	 */
1379	soc15_doorbell_range_init(adev);
1380
1381	return 0;
1382}
1383
1384static int soc15_common_hw_fini(void *handle)
1385{
1386	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1387
1388	/* disable the doorbell aperture */
1389	soc15_enable_doorbell_aperture(adev, false);
1390	if (amdgpu_sriov_vf(adev))
1391		xgpu_ai_mailbox_put_irq(adev);
1392
1393	if (adev->nbio.ras_if &&
1394	    amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
1395		if (adev->nbio.funcs->init_ras_controller_interrupt)
1396			amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
1397		if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
1398			amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
1399	}
1400
1401	return 0;
1402}
1403
1404static int soc15_common_suspend(void *handle)
1405{
1406	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1407
1408	return soc15_common_hw_fini(adev);
1409}
1410
1411static int soc15_common_resume(void *handle)
1412{
1413	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1414
1415	return soc15_common_hw_init(adev);
1416}
1417
1418static bool soc15_common_is_idle(void *handle)
1419{
1420	return true;
1421}
1422
1423static int soc15_common_wait_for_idle(void *handle)
1424{
1425	return 0;
1426}
1427
1428static int soc15_common_soft_reset(void *handle)
1429{
1430	return 0;
1431}
1432
1433static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)
1434{
1435	uint32_t def, data;
1436
1437	if (adev->asic_type == CHIP_VEGA20 ||
1438		adev->asic_type == CHIP_ARCTURUS ||
1439		adev->asic_type == CHIP_RENOIR) {
1440		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
1441
1442		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1443			data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1444				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1445				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1446				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
1447		else
1448			data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
1449				HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
1450				HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
1451				HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
1452
1453		if (def != data)
1454			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
1455	} else {
1456		def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1457
1458		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1459			data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1460		else
1461			data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1462
1463		if (def != data)
1464			WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
1465	}
1466}
1467
1468static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
1469{
1470	uint32_t def, data;
1471
1472	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1473
1474	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG))
1475		data &= ~(0x01000000 |
1476			  0x02000000 |
1477			  0x04000000 |
1478			  0x08000000 |
1479			  0x10000000 |
1480			  0x20000000 |
1481			  0x40000000 |
1482			  0x80000000);
1483	else
1484		data |= (0x01000000 |
1485			 0x02000000 |
1486			 0x04000000 |
1487			 0x08000000 |
1488			 0x10000000 |
1489			 0x20000000 |
1490			 0x40000000 |
1491			 0x80000000);
1492
1493	if (def != data)
1494		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data);
1495}
1496
1497static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)
1498{
1499	uint32_t def, data;
1500
1501	def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1502
1503	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1504		data |= 1;
1505	else
1506		data &= ~1;
1507
1508	if (def != data)
1509		WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data);
1510}
1511
1512static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1513						       bool enable)
1514{
1515	uint32_t def, data;
1516
1517	def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1518
1519	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1520		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1521			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1522	else
1523		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1524			CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1525
1526	if (def != data)
1527		WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data);
1528}
1529
1530static int soc15_common_set_clockgating_state(void *handle,
1531					    enum amd_clockgating_state state)
1532{
1533	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1534
1535	if (amdgpu_sriov_vf(adev))
1536		return 0;
1537
1538	switch (adev->asic_type) {
1539	case CHIP_VEGA10:
1540	case CHIP_VEGA12:
1541	case CHIP_VEGA20:
1542		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1543				state == AMD_CG_STATE_GATE);
1544		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1545				state == AMD_CG_STATE_GATE);
1546		soc15_update_hdp_light_sleep(adev,
1547				state == AMD_CG_STATE_GATE);
1548		soc15_update_drm_clock_gating(adev,
1549				state == AMD_CG_STATE_GATE);
1550		soc15_update_drm_light_sleep(adev,
1551				state == AMD_CG_STATE_GATE);
1552		soc15_update_rom_medium_grain_clock_gating(adev,
1553				state == AMD_CG_STATE_GATE);
1554		adev->df.funcs->update_medium_grain_clock_gating(adev,
1555				state == AMD_CG_STATE_GATE);
1556		break;
1557	case CHIP_RAVEN:
1558	case CHIP_RENOIR:
1559		adev->nbio.funcs->update_medium_grain_clock_gating(adev,
1560				state == AMD_CG_STATE_GATE);
1561		adev->nbio.funcs->update_medium_grain_light_sleep(adev,
1562				state == AMD_CG_STATE_GATE);
1563		soc15_update_hdp_light_sleep(adev,
1564				state == AMD_CG_STATE_GATE);
1565		soc15_update_drm_clock_gating(adev,
1566				state == AMD_CG_STATE_GATE);
1567		soc15_update_drm_light_sleep(adev,
1568				state == AMD_CG_STATE_GATE);
1569		soc15_update_rom_medium_grain_clock_gating(adev,
1570				state == AMD_CG_STATE_GATE);
1571		break;
1572	case CHIP_ARCTURUS:
1573		soc15_update_hdp_light_sleep(adev,
1574				state == AMD_CG_STATE_GATE);
1575		break;
1576	default:
1577		break;
1578	}
1579	return 0;
1580}
1581
1582static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
1583{
1584	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1585	int data;
1586
1587	if (amdgpu_sriov_vf(adev))
1588		*flags = 0;
1589
1590	adev->nbio.funcs->get_clockgating_state(adev, flags);
1591
1592	/* AMD_CG_SUPPORT_HDP_LS */
1593	data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
1594	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1595		*flags |= AMD_CG_SUPPORT_HDP_LS;
1596
1597	/* AMD_CG_SUPPORT_DRM_MGCG */
1598	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
1599	if (!(data & 0x01000000))
1600		*flags |= AMD_CG_SUPPORT_DRM_MGCG;
1601
1602	/* AMD_CG_SUPPORT_DRM_LS */
1603	data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
1604	if (data & 0x1)
1605		*flags |= AMD_CG_SUPPORT_DRM_LS;
1606
1607	/* AMD_CG_SUPPORT_ROM_MGCG */
1608	data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0));
1609	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1610		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1611
1612	adev->df.funcs->get_clockgating_state(adev, flags);
1613}
1614
1615static int soc15_common_set_powergating_state(void *handle,
1616					    enum amd_powergating_state state)
1617{
1618	/* todo */
1619	return 0;
1620}
1621
1622const struct amd_ip_funcs soc15_common_ip_funcs = {
1623	.name = "soc15_common",
1624	.early_init = soc15_common_early_init,
1625	.late_init = soc15_common_late_init,
1626	.sw_init = soc15_common_sw_init,
1627	.sw_fini = soc15_common_sw_fini,
1628	.hw_init = soc15_common_hw_init,
1629	.hw_fini = soc15_common_hw_fini,
1630	.suspend = soc15_common_suspend,
1631	.resume = soc15_common_resume,
1632	.is_idle = soc15_common_is_idle,
1633	.wait_for_idle = soc15_common_wait_for_idle,
1634	.soft_reset = soc15_common_soft_reset,
1635	.set_clockgating_state = soc15_common_set_clockgating_state,
1636	.set_powergating_state = soc15_common_set_powergating_state,
1637	.get_clockgating_state= soc15_common_get_clockgating_state,
1638};
1639