1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
41 
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
44 
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
47 
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
50 
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
53 
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
56 
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
59 
60 #include "vid.h"
61 #include "vi.h"
62 #include "gmc_v8_0.h"
63 #include "gmc_v7_0.h"
64 #include "gfx_v8_0.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
70 #include "tonga_ih.h"
71 #include "cz_ih.h"
72 #include "uvd_v5_0.h"
73 #include "uvd_v6_0.h"
74 #include "vce_v3_0.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
77 #endif
78 #include "dce_virtual.h"
79 #include "mxgpu_vi.h"
80 #include "amdgpu_dm.h"
81 
82 /*
83  * Indirect registers accessor
84  */
vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
86 {
87 	unsigned long flags;
88 	u32 r;
89 
90 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
92 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
93 	r = RREG32_NO_KIQ(mmPCIE_DATA);
94 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
95 	return r;
96 }
97 
vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
99 {
100 	unsigned long flags;
101 
102 	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103 	WREG32_NO_KIQ(mmPCIE_INDEX, reg);
104 	(void)RREG32_NO_KIQ(mmPCIE_INDEX);
105 	WREG32_NO_KIQ(mmPCIE_DATA, v);
106 	(void)RREG32_NO_KIQ(mmPCIE_DATA);
107 	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
108 }
109 
vi_smc_rreg(struct amdgpu_device *adev, u32 reg)110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
111 {
112 	unsigned long flags;
113 	u32 r;
114 
115 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
116 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
117 	r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
118 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
119 	return r;
120 }
121 
vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
127 	WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
128 	WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
129 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
130 }
131 
132 /* smu_8_0_d.h */
133 #define mmMP0PUB_IND_INDEX                                                      0x180
134 #define mmMP0PUB_IND_DATA                                                       0x181
135 
cz_smc_rreg(struct amdgpu_device *adev, u32 reg)136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
137 {
138 	unsigned long flags;
139 	u32 r;
140 
141 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
142 	WREG32(mmMP0PUB_IND_INDEX, (reg));
143 	r = RREG32(mmMP0PUB_IND_DATA);
144 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
145 	return r;
146 }
147 
cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
149 {
150 	unsigned long flags;
151 
152 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
153 	WREG32(mmMP0PUB_IND_INDEX, (reg));
154 	WREG32(mmMP0PUB_IND_DATA, (v));
155 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
156 }
157 
vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
159 {
160 	unsigned long flags;
161 	u32 r;
162 
163 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165 	r = RREG32(mmUVD_CTX_DATA);
166 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
167 	return r;
168 }
169 
vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
171 {
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 	WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176 	WREG32(mmUVD_CTX_DATA, (v));
177 	spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
178 }
179 
vi_didt_rreg(struct amdgpu_device *adev, u32 reg)180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
181 {
182 	unsigned long flags;
183 	u32 r;
184 
185 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
186 	WREG32(mmDIDT_IND_INDEX, (reg));
187 	r = RREG32(mmDIDT_IND_DATA);
188 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
189 	return r;
190 }
191 
vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
193 {
194 	unsigned long flags;
195 
196 	spin_lock_irqsave(&adev->didt_idx_lock, flags);
197 	WREG32(mmDIDT_IND_INDEX, (reg));
198 	WREG32(mmDIDT_IND_DATA, (v));
199 	spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
200 }
201 
vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
203 {
204 	unsigned long flags;
205 	u32 r;
206 
207 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208 	WREG32(mmGC_CAC_IND_INDEX, (reg));
209 	r = RREG32(mmGC_CAC_IND_DATA);
210 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
211 	return r;
212 }
213 
vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
215 {
216 	unsigned long flags;
217 
218 	spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219 	WREG32(mmGC_CAC_IND_INDEX, (reg));
220 	WREG32(mmGC_CAC_IND_DATA, (v));
221 	spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
222 }
223 
224 
225 static const u32 tonga_mgcg_cgcg_init[] =
226 {
227 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229 	mmPCIE_DATA, 0x000f0000, 0x00000000,
230 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
234 };
235 
236 static const u32 fiji_mgcg_cgcg_init[] =
237 {
238 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240 	mmPCIE_DATA, 0x000f0000, 0x00000000,
241 	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
245 };
246 
247 static const u32 iceland_mgcg_cgcg_init[] =
248 {
249 	mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250 	mmPCIE_DATA, 0x000f0000, 0x00000000,
251 	mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252 	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
254 };
255 
256 static const u32 cz_mgcg_cgcg_init[] =
257 {
258 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259 	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260 	mmPCIE_DATA, 0x000f0000, 0x00000000,
261 	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262 	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
263 };
264 
265 static const u32 stoney_mgcg_cgcg_init[] =
266 {
267 	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268 	mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269 	mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
270 };
271 
vi_init_golden_registers(struct amdgpu_device *adev)272 static void vi_init_golden_registers(struct amdgpu_device *adev)
273 {
274 	/* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 	mutex_lock(&adev->grbm_idx_mutex);
276 
277 	if (amdgpu_sriov_vf(adev)) {
278 		xgpu_vi_init_golden_registers(adev);
279 		mutex_unlock(&adev->grbm_idx_mutex);
280 		return;
281 	}
282 
283 	switch (adev->asic_type) {
284 	case CHIP_TOPAZ:
285 		amdgpu_device_program_register_sequence(adev,
286 							iceland_mgcg_cgcg_init,
287 							ARRAY_SIZE(iceland_mgcg_cgcg_init));
288 		break;
289 	case CHIP_FIJI:
290 		amdgpu_device_program_register_sequence(adev,
291 							fiji_mgcg_cgcg_init,
292 							ARRAY_SIZE(fiji_mgcg_cgcg_init));
293 		break;
294 	case CHIP_TONGA:
295 		amdgpu_device_program_register_sequence(adev,
296 							tonga_mgcg_cgcg_init,
297 							ARRAY_SIZE(tonga_mgcg_cgcg_init));
298 		break;
299 	case CHIP_CARRIZO:
300 		amdgpu_device_program_register_sequence(adev,
301 							cz_mgcg_cgcg_init,
302 							ARRAY_SIZE(cz_mgcg_cgcg_init));
303 		break;
304 	case CHIP_STONEY:
305 		amdgpu_device_program_register_sequence(adev,
306 							stoney_mgcg_cgcg_init,
307 							ARRAY_SIZE(stoney_mgcg_cgcg_init));
308 		break;
309 	case CHIP_POLARIS10:
310 	case CHIP_POLARIS11:
311 	case CHIP_POLARIS12:
312 	case CHIP_VEGAM:
313 	default:
314 		break;
315 	}
316 	mutex_unlock(&adev->grbm_idx_mutex);
317 }
318 
319 /**
320  * vi_get_xclk - get the xclk
321  *
322  * @adev: amdgpu_device pointer
323  *
324  * Returns the reference clock used by the gfx engine
325  * (VI).
326  */
vi_get_xclk(struct amdgpu_device *adev)327 static u32 vi_get_xclk(struct amdgpu_device *adev)
328 {
329 	u32 reference_clock = adev->clock.spll.reference_freq;
330 	u32 tmp;
331 
332 	if (adev->flags & AMD_IS_APU) {
333 		switch (adev->asic_type) {
334 		case CHIP_STONEY:
335 			/* vbios says 48Mhz, but the actual freq is 100Mhz */
336 			return 10000;
337 		default:
338 			return reference_clock;
339 		}
340 	}
341 
342 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
343 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
344 		return 1000;
345 
346 	tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
347 	if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
348 		return reference_clock / 4;
349 
350 	return reference_clock;
351 }
352 
353 /**
354  * vi_srbm_select - select specific register instances
355  *
356  * @adev: amdgpu_device pointer
357  * @me: selected ME (micro engine)
358  * @pipe: pipe
359  * @queue: queue
360  * @vmid: VMID
361  *
362  * Switches the currently active registers instances.  Some
363  * registers are instanced per VMID, others are instanced per
364  * me/pipe/queue combination.
365  */
vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid)366 void vi_srbm_select(struct amdgpu_device *adev,
367 		     u32 me, u32 pipe, u32 queue, u32 vmid)
368 {
369 	u32 srbm_gfx_cntl = 0;
370 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
371 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
372 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
373 	srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
374 	WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
375 }
376 
vi_vga_set_state(struct amdgpu_device *adev, bool state)377 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
378 {
379 	/* todo */
380 }
381 
vi_read_disabled_bios(struct amdgpu_device *adev)382 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
383 {
384 	u32 bus_cntl;
385 	u32 d1vga_control = 0;
386 	u32 d2vga_control = 0;
387 	u32 vga_render_control = 0;
388 	u32 rom_cntl;
389 	bool r;
390 
391 	bus_cntl = RREG32(mmBUS_CNTL);
392 	if (adev->mode_info.num_crtc) {
393 		d1vga_control = RREG32(mmD1VGA_CONTROL);
394 		d2vga_control = RREG32(mmD2VGA_CONTROL);
395 		vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
396 	}
397 	rom_cntl = RREG32_SMC(ixROM_CNTL);
398 
399 	/* enable the rom */
400 	WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
401 	if (adev->mode_info.num_crtc) {
402 		/* Disable VGA mode */
403 		WREG32(mmD1VGA_CONTROL,
404 		       (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
405 					  D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
406 		WREG32(mmD2VGA_CONTROL,
407 		       (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
408 					  D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
409 		WREG32(mmVGA_RENDER_CONTROL,
410 		       (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
411 	}
412 	WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
413 
414 	r = amdgpu_read_bios(adev);
415 
416 	/* restore regs */
417 	WREG32(mmBUS_CNTL, bus_cntl);
418 	if (adev->mode_info.num_crtc) {
419 		WREG32(mmD1VGA_CONTROL, d1vga_control);
420 		WREG32(mmD2VGA_CONTROL, d2vga_control);
421 		WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
422 	}
423 	WREG32_SMC(ixROM_CNTL, rom_cntl);
424 	return r;
425 }
426 
vi_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes)427 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
428 				  u8 *bios, u32 length_bytes)
429 {
430 	u32 *dw_ptr;
431 	unsigned long flags;
432 	u32 i, length_dw;
433 
434 	if (bios == NULL)
435 		return false;
436 	if (length_bytes == 0)
437 		return false;
438 	/* APU vbios image is part of sbios image */
439 	if (adev->flags & AMD_IS_APU)
440 		return false;
441 
442 	dw_ptr = (u32 *)bios;
443 	length_dw = ALIGN(length_bytes, 4) / 4;
444 	/* take the smc lock since we are using the smc index */
445 	spin_lock_irqsave(&adev->smc_idx_lock, flags);
446 	/* set rom index to 0 */
447 	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
448 	WREG32(mmSMC_IND_DATA_11, 0);
449 	/* set index to data for continous read */
450 	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
451 	for (i = 0; i < length_dw; i++)
452 		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
453 	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
454 
455 	return true;
456 }
457 
458 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
459 	{mmGRBM_STATUS},
460 	{mmGRBM_STATUS2},
461 	{mmGRBM_STATUS_SE0},
462 	{mmGRBM_STATUS_SE1},
463 	{mmGRBM_STATUS_SE2},
464 	{mmGRBM_STATUS_SE3},
465 	{mmSRBM_STATUS},
466 	{mmSRBM_STATUS2},
467 	{mmSRBM_STATUS3},
468 	{mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
469 	{mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
470 	{mmCP_STAT},
471 	{mmCP_STALLED_STAT1},
472 	{mmCP_STALLED_STAT2},
473 	{mmCP_STALLED_STAT3},
474 	{mmCP_CPF_BUSY_STAT},
475 	{mmCP_CPF_STALLED_STAT1},
476 	{mmCP_CPF_STATUS},
477 	{mmCP_CPC_BUSY_STAT},
478 	{mmCP_CPC_STALLED_STAT1},
479 	{mmCP_CPC_STATUS},
480 	{mmGB_ADDR_CONFIG},
481 	{mmMC_ARB_RAMCFG},
482 	{mmGB_TILE_MODE0},
483 	{mmGB_TILE_MODE1},
484 	{mmGB_TILE_MODE2},
485 	{mmGB_TILE_MODE3},
486 	{mmGB_TILE_MODE4},
487 	{mmGB_TILE_MODE5},
488 	{mmGB_TILE_MODE6},
489 	{mmGB_TILE_MODE7},
490 	{mmGB_TILE_MODE8},
491 	{mmGB_TILE_MODE9},
492 	{mmGB_TILE_MODE10},
493 	{mmGB_TILE_MODE11},
494 	{mmGB_TILE_MODE12},
495 	{mmGB_TILE_MODE13},
496 	{mmGB_TILE_MODE14},
497 	{mmGB_TILE_MODE15},
498 	{mmGB_TILE_MODE16},
499 	{mmGB_TILE_MODE17},
500 	{mmGB_TILE_MODE18},
501 	{mmGB_TILE_MODE19},
502 	{mmGB_TILE_MODE20},
503 	{mmGB_TILE_MODE21},
504 	{mmGB_TILE_MODE22},
505 	{mmGB_TILE_MODE23},
506 	{mmGB_TILE_MODE24},
507 	{mmGB_TILE_MODE25},
508 	{mmGB_TILE_MODE26},
509 	{mmGB_TILE_MODE27},
510 	{mmGB_TILE_MODE28},
511 	{mmGB_TILE_MODE29},
512 	{mmGB_TILE_MODE30},
513 	{mmGB_TILE_MODE31},
514 	{mmGB_MACROTILE_MODE0},
515 	{mmGB_MACROTILE_MODE1},
516 	{mmGB_MACROTILE_MODE2},
517 	{mmGB_MACROTILE_MODE3},
518 	{mmGB_MACROTILE_MODE4},
519 	{mmGB_MACROTILE_MODE5},
520 	{mmGB_MACROTILE_MODE6},
521 	{mmGB_MACROTILE_MODE7},
522 	{mmGB_MACROTILE_MODE8},
523 	{mmGB_MACROTILE_MODE9},
524 	{mmGB_MACROTILE_MODE10},
525 	{mmGB_MACROTILE_MODE11},
526 	{mmGB_MACROTILE_MODE12},
527 	{mmGB_MACROTILE_MODE13},
528 	{mmGB_MACROTILE_MODE14},
529 	{mmGB_MACROTILE_MODE15},
530 	{mmCC_RB_BACKEND_DISABLE, true},
531 	{mmGC_USER_RB_BACKEND_DISABLE, true},
532 	{mmGB_BACKEND_MAP, false},
533 	{mmPA_SC_RASTER_CONFIG, true},
534 	{mmPA_SC_RASTER_CONFIG_1, true},
535 };
536 
vi_get_register_value(struct amdgpu_device *adev, bool indexed, u32 se_num, u32 sh_num, u32 reg_offset)537 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
538 				      bool indexed, u32 se_num,
539 				      u32 sh_num, u32 reg_offset)
540 {
541 	if (indexed) {
542 		uint32_t val;
543 		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
544 		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
545 
546 		switch (reg_offset) {
547 		case mmCC_RB_BACKEND_DISABLE:
548 			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
549 		case mmGC_USER_RB_BACKEND_DISABLE:
550 			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
551 		case mmPA_SC_RASTER_CONFIG:
552 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
553 		case mmPA_SC_RASTER_CONFIG_1:
554 			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
555 		}
556 
557 		mutex_lock(&adev->grbm_idx_mutex);
558 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
559 			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
560 
561 		val = RREG32(reg_offset);
562 
563 		if (se_num != 0xffffffff || sh_num != 0xffffffff)
564 			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
565 		mutex_unlock(&adev->grbm_idx_mutex);
566 		return val;
567 	} else {
568 		unsigned idx;
569 
570 		switch (reg_offset) {
571 		case mmGB_ADDR_CONFIG:
572 			return adev->gfx.config.gb_addr_config;
573 		case mmMC_ARB_RAMCFG:
574 			return adev->gfx.config.mc_arb_ramcfg;
575 		case mmGB_TILE_MODE0:
576 		case mmGB_TILE_MODE1:
577 		case mmGB_TILE_MODE2:
578 		case mmGB_TILE_MODE3:
579 		case mmGB_TILE_MODE4:
580 		case mmGB_TILE_MODE5:
581 		case mmGB_TILE_MODE6:
582 		case mmGB_TILE_MODE7:
583 		case mmGB_TILE_MODE8:
584 		case mmGB_TILE_MODE9:
585 		case mmGB_TILE_MODE10:
586 		case mmGB_TILE_MODE11:
587 		case mmGB_TILE_MODE12:
588 		case mmGB_TILE_MODE13:
589 		case mmGB_TILE_MODE14:
590 		case mmGB_TILE_MODE15:
591 		case mmGB_TILE_MODE16:
592 		case mmGB_TILE_MODE17:
593 		case mmGB_TILE_MODE18:
594 		case mmGB_TILE_MODE19:
595 		case mmGB_TILE_MODE20:
596 		case mmGB_TILE_MODE21:
597 		case mmGB_TILE_MODE22:
598 		case mmGB_TILE_MODE23:
599 		case mmGB_TILE_MODE24:
600 		case mmGB_TILE_MODE25:
601 		case mmGB_TILE_MODE26:
602 		case mmGB_TILE_MODE27:
603 		case mmGB_TILE_MODE28:
604 		case mmGB_TILE_MODE29:
605 		case mmGB_TILE_MODE30:
606 		case mmGB_TILE_MODE31:
607 			idx = (reg_offset - mmGB_TILE_MODE0);
608 			return adev->gfx.config.tile_mode_array[idx];
609 		case mmGB_MACROTILE_MODE0:
610 		case mmGB_MACROTILE_MODE1:
611 		case mmGB_MACROTILE_MODE2:
612 		case mmGB_MACROTILE_MODE3:
613 		case mmGB_MACROTILE_MODE4:
614 		case mmGB_MACROTILE_MODE5:
615 		case mmGB_MACROTILE_MODE6:
616 		case mmGB_MACROTILE_MODE7:
617 		case mmGB_MACROTILE_MODE8:
618 		case mmGB_MACROTILE_MODE9:
619 		case mmGB_MACROTILE_MODE10:
620 		case mmGB_MACROTILE_MODE11:
621 		case mmGB_MACROTILE_MODE12:
622 		case mmGB_MACROTILE_MODE13:
623 		case mmGB_MACROTILE_MODE14:
624 		case mmGB_MACROTILE_MODE15:
625 			idx = (reg_offset - mmGB_MACROTILE_MODE0);
626 			return adev->gfx.config.macrotile_mode_array[idx];
627 		default:
628 			return RREG32(reg_offset);
629 		}
630 	}
631 }
632 
vi_read_register(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value)633 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
634 			    u32 sh_num, u32 reg_offset, u32 *value)
635 {
636 	uint32_t i;
637 
638 	*value = 0;
639 	for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
640 		bool indexed = vi_allowed_read_registers[i].grbm_indexed;
641 
642 		if (reg_offset != vi_allowed_read_registers[i].reg_offset)
643 			continue;
644 
645 		*value = vi_get_register_value(adev, indexed, se_num, sh_num,
646 					       reg_offset);
647 		return 0;
648 	}
649 	return -EINVAL;
650 }
651 
vi_gpu_pci_config_reset(struct amdgpu_device *adev)652 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
653 {
654 	u32 i;
655 
656 	dev_info(adev->dev, "GPU pci config reset\n");
657 
658 	/* disable BM */
659 	pci_clear_master(adev->pdev);
660 	/* reset */
661 	amdgpu_device_pci_config_reset(adev);
662 
663 	udelay(100);
664 
665 	/* wait for asic to come out of reset */
666 	for (i = 0; i < adev->usec_timeout; i++) {
667 		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
668 			/* enable BM */
669 			pci_set_master(adev->pdev);
670 			adev->has_hw_reset = true;
671 			return 0;
672 		}
673 		udelay(1);
674 	}
675 	return -EINVAL;
676 }
677 
678 /**
679  * vi_asic_pci_config_reset - soft reset GPU
680  *
681  * @adev: amdgpu_device pointer
682  *
683  * Use PCI Config method to reset the GPU.
684  *
685  * Returns 0 for success.
686  */
vi_asic_pci_config_reset(struct amdgpu_device *adev)687 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
688 {
689 	int r;
690 
691 	amdgpu_atombios_scratch_regs_engine_hung(adev, true);
692 
693 	r = vi_gpu_pci_config_reset(adev);
694 
695 	amdgpu_atombios_scratch_regs_engine_hung(adev, false);
696 
697 	return r;
698 }
699 
vi_asic_supports_baco(struct amdgpu_device *adev)700 static bool vi_asic_supports_baco(struct amdgpu_device *adev)
701 {
702 	switch (adev->asic_type) {
703 	case CHIP_FIJI:
704 	case CHIP_TONGA:
705 	case CHIP_POLARIS10:
706 	case CHIP_POLARIS11:
707 	case CHIP_POLARIS12:
708 	case CHIP_TOPAZ:
709 		return amdgpu_dpm_is_baco_supported(adev);
710 	default:
711 		return false;
712 	}
713 }
714 
715 static enum amd_reset_method
vi_asic_reset_method(struct amdgpu_device *adev)716 vi_asic_reset_method(struct amdgpu_device *adev)
717 {
718 	bool baco_reset;
719 
720 	if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
721 	    amdgpu_reset_method == AMD_RESET_METHOD_BACO)
722 		return amdgpu_reset_method;
723 
724 	if (amdgpu_reset_method != -1)
725 		dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
726 				  amdgpu_reset_method);
727 
728 	switch (adev->asic_type) {
729 	case CHIP_FIJI:
730 	case CHIP_TONGA:
731 	case CHIP_POLARIS10:
732 	case CHIP_POLARIS11:
733 	case CHIP_POLARIS12:
734 	case CHIP_TOPAZ:
735 		baco_reset = amdgpu_dpm_is_baco_supported(adev);
736 		break;
737 	default:
738 		baco_reset = false;
739 		break;
740 	}
741 
742 	if (baco_reset)
743 		return AMD_RESET_METHOD_BACO;
744 	else
745 		return AMD_RESET_METHOD_LEGACY;
746 }
747 
748 /**
749  * vi_asic_reset - soft reset GPU
750  *
751  * @adev: amdgpu_device pointer
752  *
753  * Look up which blocks are hung and attempt
754  * to reset them.
755  * Returns 0 for success.
756  */
vi_asic_reset(struct amdgpu_device *adev)757 static int vi_asic_reset(struct amdgpu_device *adev)
758 {
759 	int r;
760 
761 	if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
762 		dev_info(adev->dev, "BACO reset\n");
763 		r = amdgpu_dpm_baco_reset(adev);
764 	} else {
765 		dev_info(adev->dev, "PCI CONFIG reset\n");
766 		r = vi_asic_pci_config_reset(adev);
767 	}
768 
769 	return r;
770 }
771 
vi_get_config_memsize(struct amdgpu_device *adev)772 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
773 {
774 	return RREG32(mmCONFIG_MEMSIZE);
775 }
776 
vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg)777 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
778 			u32 cntl_reg, u32 status_reg)
779 {
780 	int r, i;
781 	struct atom_clock_dividers dividers;
782 	uint32_t tmp;
783 
784 	r = amdgpu_atombios_get_clock_dividers(adev,
785 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
786 					       clock, false, &dividers);
787 	if (r)
788 		return r;
789 
790 	tmp = RREG32_SMC(cntl_reg);
791 
792 	if (adev->flags & AMD_IS_APU)
793 		tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
794 	else
795 		tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
796 				CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
797 	tmp |= dividers.post_divider;
798 	WREG32_SMC(cntl_reg, tmp);
799 
800 	for (i = 0; i < 100; i++) {
801 		tmp = RREG32_SMC(status_reg);
802 		if (adev->flags & AMD_IS_APU) {
803 			if (tmp & 0x10000)
804 				break;
805 		} else {
806 			if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
807 				break;
808 		}
809 		mdelay(10);
810 	}
811 	if (i == 100)
812 		return -ETIMEDOUT;
813 	return 0;
814 }
815 
816 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
817 #define ixGNB_CLK1_STATUS   0xD822010C
818 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
819 #define ixGNB_CLK2_STATUS   0xD822012C
820 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
821 #define ixGNB_CLK3_STATUS   0xD822014C
822 
vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)823 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
824 {
825 	int r;
826 
827 	if (adev->flags & AMD_IS_APU) {
828 		r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
829 		if (r)
830 			return r;
831 
832 		r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
833 		if (r)
834 			return r;
835 	} else {
836 		r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
837 		if (r)
838 			return r;
839 
840 		r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
841 		if (r)
842 			return r;
843 	}
844 
845 	return 0;
846 }
847 
vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)848 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
849 {
850 	int r, i;
851 	struct atom_clock_dividers dividers;
852 	u32 tmp;
853 	u32 reg_ctrl;
854 	u32 reg_status;
855 	u32 status_mask;
856 	u32 reg_mask;
857 
858 	if (adev->flags & AMD_IS_APU) {
859 		reg_ctrl = ixGNB_CLK3_DFS_CNTL;
860 		reg_status = ixGNB_CLK3_STATUS;
861 		status_mask = 0x00010000;
862 		reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
863 	} else {
864 		reg_ctrl = ixCG_ECLK_CNTL;
865 		reg_status = ixCG_ECLK_STATUS;
866 		status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
867 		reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
868 	}
869 
870 	r = amdgpu_atombios_get_clock_dividers(adev,
871 					       COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
872 					       ecclk, false, &dividers);
873 	if (r)
874 		return r;
875 
876 	for (i = 0; i < 100; i++) {
877 		if (RREG32_SMC(reg_status) & status_mask)
878 			break;
879 		mdelay(10);
880 	}
881 
882 	if (i == 100)
883 		return -ETIMEDOUT;
884 
885 	tmp = RREG32_SMC(reg_ctrl);
886 	tmp &= ~reg_mask;
887 	tmp |= dividers.post_divider;
888 	WREG32_SMC(reg_ctrl, tmp);
889 
890 	for (i = 0; i < 100; i++) {
891 		if (RREG32_SMC(reg_status) & status_mask)
892 			break;
893 		mdelay(10);
894 	}
895 
896 	if (i == 100)
897 		return -ETIMEDOUT;
898 
899 	return 0;
900 }
901 
vi_pcie_gen3_enable(struct amdgpu_device *adev)902 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
903 {
904 	if (pci_is_root_bus(adev->pdev->bus))
905 		return;
906 
907 	if (amdgpu_pcie_gen2 == 0)
908 		return;
909 
910 	if (adev->flags & AMD_IS_APU)
911 		return;
912 
913 	if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
914 					CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
915 		return;
916 
917 	/* todo */
918 }
919 
vi_program_aspm(struct amdgpu_device *adev)920 static void vi_program_aspm(struct amdgpu_device *adev)
921 {
922 
923 	if (amdgpu_aspm == 0)
924 		return;
925 
926 	/* todo */
927 }
928 
vi_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable)929 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
930 					bool enable)
931 {
932 	u32 tmp;
933 
934 	/* not necessary on CZ */
935 	if (adev->flags & AMD_IS_APU)
936 		return;
937 
938 	tmp = RREG32(mmBIF_DOORBELL_APER_EN);
939 	if (enable)
940 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
941 	else
942 		tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
943 
944 	WREG32(mmBIF_DOORBELL_APER_EN, tmp);
945 }
946 
947 #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044
948 #define ATI_REV_ID_FUSE_MACRO__SHIFT        9
949 #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00
950 
vi_get_rev_id(struct amdgpu_device *adev)951 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
952 {
953 	if (adev->flags & AMD_IS_APU)
954 		return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
955 			>> ATI_REV_ID_FUSE_MACRO__SHIFT;
956 	else
957 		return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
958 			>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
959 }
960 
vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)961 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
962 {
963 	if (!ring || !ring->funcs->emit_wreg) {
964 		WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
965 		RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
966 	} else {
967 		amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
968 	}
969 }
970 
vi_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)971 static void vi_invalidate_hdp(struct amdgpu_device *adev,
972 			      struct amdgpu_ring *ring)
973 {
974 	if (!ring || !ring->funcs->emit_wreg) {
975 		WREG32(mmHDP_DEBUG0, 1);
976 		RREG32(mmHDP_DEBUG0);
977 	} else {
978 		amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
979 	}
980 }
981 
vi_need_full_reset(struct amdgpu_device *adev)982 static bool vi_need_full_reset(struct amdgpu_device *adev)
983 {
984 	switch (adev->asic_type) {
985 	case CHIP_CARRIZO:
986 	case CHIP_STONEY:
987 		/* CZ has hang issues with full reset at the moment */
988 		return false;
989 	case CHIP_FIJI:
990 	case CHIP_TONGA:
991 		/* XXX: soft reset should work on fiji and tonga */
992 		return true;
993 	case CHIP_POLARIS10:
994 	case CHIP_POLARIS11:
995 	case CHIP_POLARIS12:
996 	case CHIP_TOPAZ:
997 	default:
998 		/* change this when we support soft reset */
999 		return true;
1000 	}
1001 }
1002 
vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, uint64_t *count1)1003 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1004 			      uint64_t *count1)
1005 {
1006 	uint32_t perfctr = 0;
1007 	uint64_t cnt0_of, cnt1_of;
1008 	int tmp;
1009 
1010 	/* This reports 0 on APUs, so return to avoid writing/reading registers
1011 	 * that may or may not be different from their GPU counterparts
1012 	 */
1013 	if (adev->flags & AMD_IS_APU)
1014 		return;
1015 
1016 	/* Set the 2 events that we wish to watch, defined above */
1017 	/* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1018 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1019 	perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1020 
1021 	/* Write to enable desired perf counters */
1022 	WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1023 	/* Zero out and enable the perf counters
1024 	 * Write 0x5:
1025 	 * Bit 0 = Start all counters(1)
1026 	 * Bit 2 = Global counter reset enable(1)
1027 	 */
1028 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1029 
1030 	msleep(1000);
1031 
1032 	/* Load the shadow and disable the perf counters
1033 	 * Write 0x2:
1034 	 * Bit 0 = Stop counters(0)
1035 	 * Bit 1 = Load the shadow counters(1)
1036 	 */
1037 	WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1038 
1039 	/* Read register values to get any >32bit overflow */
1040 	tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1041 	cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1042 	cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1043 
1044 	/* Get the values and add the overflow */
1045 	*count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1046 	*count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1047 }
1048 
vi_get_pcie_replay_count(struct amdgpu_device *adev)1049 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1050 {
1051 	uint64_t nak_r, nak_g;
1052 
1053 	/* Get the number of NAKs received and generated */
1054 	nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1055 	nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1056 
1057 	/* Add the total number of NAKs, i.e the number of replays */
1058 	return (nak_r + nak_g);
1059 }
1060 
vi_need_reset_on_init(struct amdgpu_device *adev)1061 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1062 {
1063 	u32 clock_cntl, pc;
1064 
1065 	if (adev->flags & AMD_IS_APU)
1066 		return false;
1067 
1068 	/* check if the SMC is already running */
1069 	clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1070 	pc = RREG32_SMC(ixSMC_PC_C);
1071 	if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1072 	    (0x20100 <= pc))
1073 		return true;
1074 
1075 	return false;
1076 }
1077 
vi_pre_asic_init(struct amdgpu_device *adev)1078 static void vi_pre_asic_init(struct amdgpu_device *adev)
1079 {
1080 }
1081 
1082 static const struct amdgpu_asic_funcs vi_asic_funcs =
1083 {
1084 	.read_disabled_bios = &vi_read_disabled_bios,
1085 	.read_bios_from_rom = &vi_read_bios_from_rom,
1086 	.read_register = &vi_read_register,
1087 	.reset = &vi_asic_reset,
1088 	.reset_method = &vi_asic_reset_method,
1089 	.set_vga_state = &vi_vga_set_state,
1090 	.get_xclk = &vi_get_xclk,
1091 	.set_uvd_clocks = &vi_set_uvd_clocks,
1092 	.set_vce_clocks = &vi_set_vce_clocks,
1093 	.get_config_memsize = &vi_get_config_memsize,
1094 	.flush_hdp = &vi_flush_hdp,
1095 	.invalidate_hdp = &vi_invalidate_hdp,
1096 	.need_full_reset = &vi_need_full_reset,
1097 	.init_doorbell_index = &legacy_doorbell_index_init,
1098 	.get_pcie_usage = &vi_get_pcie_usage,
1099 	.need_reset_on_init = &vi_need_reset_on_init,
1100 	.get_pcie_replay_count = &vi_get_pcie_replay_count,
1101 	.supports_baco = &vi_asic_supports_baco,
1102 	.pre_asic_init = &vi_pre_asic_init,
1103 };
1104 
1105 #define CZ_REV_BRISTOL(rev)	 \
1106 	((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1107 
vi_common_early_init(void *handle)1108 static int vi_common_early_init(void *handle)
1109 {
1110 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1111 
1112 	if (adev->flags & AMD_IS_APU) {
1113 		adev->smc_rreg = &cz_smc_rreg;
1114 		adev->smc_wreg = &cz_smc_wreg;
1115 	} else {
1116 		adev->smc_rreg = &vi_smc_rreg;
1117 		adev->smc_wreg = &vi_smc_wreg;
1118 	}
1119 	adev->pcie_rreg = &vi_pcie_rreg;
1120 	adev->pcie_wreg = &vi_pcie_wreg;
1121 	adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1122 	adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1123 	adev->didt_rreg = &vi_didt_rreg;
1124 	adev->didt_wreg = &vi_didt_wreg;
1125 	adev->gc_cac_rreg = &vi_gc_cac_rreg;
1126 	adev->gc_cac_wreg = &vi_gc_cac_wreg;
1127 
1128 	adev->asic_funcs = &vi_asic_funcs;
1129 
1130 	adev->rev_id = vi_get_rev_id(adev);
1131 	adev->external_rev_id = 0xFF;
1132 	switch (adev->asic_type) {
1133 	case CHIP_TOPAZ:
1134 		adev->cg_flags = 0;
1135 		adev->pg_flags = 0;
1136 		adev->external_rev_id = 0x1;
1137 		break;
1138 	case CHIP_FIJI:
1139 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1140 			AMD_CG_SUPPORT_GFX_MGLS |
1141 			AMD_CG_SUPPORT_GFX_RLC_LS |
1142 			AMD_CG_SUPPORT_GFX_CP_LS |
1143 			AMD_CG_SUPPORT_GFX_CGTS |
1144 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1145 			AMD_CG_SUPPORT_GFX_CGCG |
1146 			AMD_CG_SUPPORT_GFX_CGLS |
1147 			AMD_CG_SUPPORT_SDMA_MGCG |
1148 			AMD_CG_SUPPORT_SDMA_LS |
1149 			AMD_CG_SUPPORT_BIF_LS |
1150 			AMD_CG_SUPPORT_HDP_MGCG |
1151 			AMD_CG_SUPPORT_HDP_LS |
1152 			AMD_CG_SUPPORT_ROM_MGCG |
1153 			AMD_CG_SUPPORT_MC_MGCG |
1154 			AMD_CG_SUPPORT_MC_LS |
1155 			AMD_CG_SUPPORT_UVD_MGCG;
1156 		adev->pg_flags = 0;
1157 		adev->external_rev_id = adev->rev_id + 0x3c;
1158 		break;
1159 	case CHIP_TONGA:
1160 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1161 			AMD_CG_SUPPORT_GFX_CGCG |
1162 			AMD_CG_SUPPORT_GFX_CGLS |
1163 			AMD_CG_SUPPORT_SDMA_MGCG |
1164 			AMD_CG_SUPPORT_SDMA_LS |
1165 			AMD_CG_SUPPORT_BIF_LS |
1166 			AMD_CG_SUPPORT_HDP_MGCG |
1167 			AMD_CG_SUPPORT_HDP_LS |
1168 			AMD_CG_SUPPORT_ROM_MGCG |
1169 			AMD_CG_SUPPORT_MC_MGCG |
1170 			AMD_CG_SUPPORT_MC_LS |
1171 			AMD_CG_SUPPORT_DRM_LS |
1172 			AMD_CG_SUPPORT_UVD_MGCG;
1173 		adev->pg_flags = 0;
1174 		adev->external_rev_id = adev->rev_id + 0x14;
1175 		break;
1176 	case CHIP_POLARIS11:
1177 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1178 			AMD_CG_SUPPORT_GFX_RLC_LS |
1179 			AMD_CG_SUPPORT_GFX_CP_LS |
1180 			AMD_CG_SUPPORT_GFX_CGCG |
1181 			AMD_CG_SUPPORT_GFX_CGLS |
1182 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1183 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1184 			AMD_CG_SUPPORT_SDMA_MGCG |
1185 			AMD_CG_SUPPORT_SDMA_LS |
1186 			AMD_CG_SUPPORT_BIF_MGCG |
1187 			AMD_CG_SUPPORT_BIF_LS |
1188 			AMD_CG_SUPPORT_HDP_MGCG |
1189 			AMD_CG_SUPPORT_HDP_LS |
1190 			AMD_CG_SUPPORT_ROM_MGCG |
1191 			AMD_CG_SUPPORT_MC_MGCG |
1192 			AMD_CG_SUPPORT_MC_LS |
1193 			AMD_CG_SUPPORT_DRM_LS |
1194 			AMD_CG_SUPPORT_UVD_MGCG |
1195 			AMD_CG_SUPPORT_VCE_MGCG;
1196 		adev->pg_flags = 0;
1197 		adev->external_rev_id = adev->rev_id + 0x5A;
1198 		break;
1199 	case CHIP_POLARIS10:
1200 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1201 			AMD_CG_SUPPORT_GFX_RLC_LS |
1202 			AMD_CG_SUPPORT_GFX_CP_LS |
1203 			AMD_CG_SUPPORT_GFX_CGCG |
1204 			AMD_CG_SUPPORT_GFX_CGLS |
1205 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1206 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1207 			AMD_CG_SUPPORT_SDMA_MGCG |
1208 			AMD_CG_SUPPORT_SDMA_LS |
1209 			AMD_CG_SUPPORT_BIF_MGCG |
1210 			AMD_CG_SUPPORT_BIF_LS |
1211 			AMD_CG_SUPPORT_HDP_MGCG |
1212 			AMD_CG_SUPPORT_HDP_LS |
1213 			AMD_CG_SUPPORT_ROM_MGCG |
1214 			AMD_CG_SUPPORT_MC_MGCG |
1215 			AMD_CG_SUPPORT_MC_LS |
1216 			AMD_CG_SUPPORT_DRM_LS |
1217 			AMD_CG_SUPPORT_UVD_MGCG |
1218 			AMD_CG_SUPPORT_VCE_MGCG;
1219 		adev->pg_flags = 0;
1220 		adev->external_rev_id = adev->rev_id + 0x50;
1221 		break;
1222 	case CHIP_POLARIS12:
1223 		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1224 			AMD_CG_SUPPORT_GFX_RLC_LS |
1225 			AMD_CG_SUPPORT_GFX_CP_LS |
1226 			AMD_CG_SUPPORT_GFX_CGCG |
1227 			AMD_CG_SUPPORT_GFX_CGLS |
1228 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1229 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1230 			AMD_CG_SUPPORT_SDMA_MGCG |
1231 			AMD_CG_SUPPORT_SDMA_LS |
1232 			AMD_CG_SUPPORT_BIF_MGCG |
1233 			AMD_CG_SUPPORT_BIF_LS |
1234 			AMD_CG_SUPPORT_HDP_MGCG |
1235 			AMD_CG_SUPPORT_HDP_LS |
1236 			AMD_CG_SUPPORT_ROM_MGCG |
1237 			AMD_CG_SUPPORT_MC_MGCG |
1238 			AMD_CG_SUPPORT_MC_LS |
1239 			AMD_CG_SUPPORT_DRM_LS |
1240 			AMD_CG_SUPPORT_UVD_MGCG |
1241 			AMD_CG_SUPPORT_VCE_MGCG;
1242 		adev->pg_flags = 0;
1243 		adev->external_rev_id = adev->rev_id + 0x64;
1244 		break;
1245 	case CHIP_VEGAM:
1246 		adev->cg_flags = 0;
1247 			/*AMD_CG_SUPPORT_GFX_MGCG |
1248 			AMD_CG_SUPPORT_GFX_RLC_LS |
1249 			AMD_CG_SUPPORT_GFX_CP_LS |
1250 			AMD_CG_SUPPORT_GFX_CGCG |
1251 			AMD_CG_SUPPORT_GFX_CGLS |
1252 			AMD_CG_SUPPORT_GFX_3D_CGCG |
1253 			AMD_CG_SUPPORT_GFX_3D_CGLS |
1254 			AMD_CG_SUPPORT_SDMA_MGCG |
1255 			AMD_CG_SUPPORT_SDMA_LS |
1256 			AMD_CG_SUPPORT_BIF_MGCG |
1257 			AMD_CG_SUPPORT_BIF_LS |
1258 			AMD_CG_SUPPORT_HDP_MGCG |
1259 			AMD_CG_SUPPORT_HDP_LS |
1260 			AMD_CG_SUPPORT_ROM_MGCG |
1261 			AMD_CG_SUPPORT_MC_MGCG |
1262 			AMD_CG_SUPPORT_MC_LS |
1263 			AMD_CG_SUPPORT_DRM_LS |
1264 			AMD_CG_SUPPORT_UVD_MGCG |
1265 			AMD_CG_SUPPORT_VCE_MGCG;*/
1266 		adev->pg_flags = 0;
1267 		adev->external_rev_id = adev->rev_id + 0x6E;
1268 		break;
1269 	case CHIP_CARRIZO:
1270 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1271 			AMD_CG_SUPPORT_GFX_MGCG |
1272 			AMD_CG_SUPPORT_GFX_MGLS |
1273 			AMD_CG_SUPPORT_GFX_RLC_LS |
1274 			AMD_CG_SUPPORT_GFX_CP_LS |
1275 			AMD_CG_SUPPORT_GFX_CGTS |
1276 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1277 			AMD_CG_SUPPORT_GFX_CGCG |
1278 			AMD_CG_SUPPORT_GFX_CGLS |
1279 			AMD_CG_SUPPORT_BIF_LS |
1280 			AMD_CG_SUPPORT_HDP_MGCG |
1281 			AMD_CG_SUPPORT_HDP_LS |
1282 			AMD_CG_SUPPORT_SDMA_MGCG |
1283 			AMD_CG_SUPPORT_SDMA_LS |
1284 			AMD_CG_SUPPORT_VCE_MGCG;
1285 		/* rev0 hardware requires workarounds to support PG */
1286 		adev->pg_flags = 0;
1287 		if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1288 			adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1289 				AMD_PG_SUPPORT_GFX_PIPELINE |
1290 				AMD_PG_SUPPORT_CP |
1291 				AMD_PG_SUPPORT_UVD |
1292 				AMD_PG_SUPPORT_VCE;
1293 		}
1294 		adev->external_rev_id = adev->rev_id + 0x1;
1295 		break;
1296 	case CHIP_STONEY:
1297 		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1298 			AMD_CG_SUPPORT_GFX_MGCG |
1299 			AMD_CG_SUPPORT_GFX_MGLS |
1300 			AMD_CG_SUPPORT_GFX_RLC_LS |
1301 			AMD_CG_SUPPORT_GFX_CP_LS |
1302 			AMD_CG_SUPPORT_GFX_CGTS |
1303 			AMD_CG_SUPPORT_GFX_CGTS_LS |
1304 			AMD_CG_SUPPORT_GFX_CGLS |
1305 			AMD_CG_SUPPORT_BIF_LS |
1306 			AMD_CG_SUPPORT_HDP_MGCG |
1307 			AMD_CG_SUPPORT_HDP_LS |
1308 			AMD_CG_SUPPORT_SDMA_MGCG |
1309 			AMD_CG_SUPPORT_SDMA_LS |
1310 			AMD_CG_SUPPORT_VCE_MGCG;
1311 		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1312 			AMD_PG_SUPPORT_GFX_SMG |
1313 			AMD_PG_SUPPORT_GFX_PIPELINE |
1314 			AMD_PG_SUPPORT_CP |
1315 			AMD_PG_SUPPORT_UVD |
1316 			AMD_PG_SUPPORT_VCE;
1317 		adev->external_rev_id = adev->rev_id + 0x61;
1318 		break;
1319 	default:
1320 		/* FIXME: not supported yet */
1321 		return -EINVAL;
1322 	}
1323 
1324 	if (amdgpu_sriov_vf(adev)) {
1325 		amdgpu_virt_init_setting(adev);
1326 		xgpu_vi_mailbox_set_irq_funcs(adev);
1327 	}
1328 
1329 	return 0;
1330 }
1331 
vi_common_late_init(void *handle)1332 static int vi_common_late_init(void *handle)
1333 {
1334 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1335 
1336 	if (amdgpu_sriov_vf(adev))
1337 		xgpu_vi_mailbox_get_irq(adev);
1338 
1339 	return 0;
1340 }
1341 
vi_common_sw_init(void *handle)1342 static int vi_common_sw_init(void *handle)
1343 {
1344 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1345 
1346 	if (amdgpu_sriov_vf(adev))
1347 		xgpu_vi_mailbox_add_irq_id(adev);
1348 
1349 	return 0;
1350 }
1351 
vi_common_sw_fini(void *handle)1352 static int vi_common_sw_fini(void *handle)
1353 {
1354 	return 0;
1355 }
1356 
vi_common_hw_init(void *handle)1357 static int vi_common_hw_init(void *handle)
1358 {
1359 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1360 
1361 	/* move the golden regs per IP block */
1362 	vi_init_golden_registers(adev);
1363 	/* enable pcie gen2/3 link */
1364 	vi_pcie_gen3_enable(adev);
1365 	/* enable aspm */
1366 	vi_program_aspm(adev);
1367 	/* enable the doorbell aperture */
1368 	vi_enable_doorbell_aperture(adev, true);
1369 
1370 	return 0;
1371 }
1372 
vi_common_hw_fini(void *handle)1373 static int vi_common_hw_fini(void *handle)
1374 {
1375 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1376 
1377 	/* enable the doorbell aperture */
1378 	vi_enable_doorbell_aperture(adev, false);
1379 
1380 	if (amdgpu_sriov_vf(adev))
1381 		xgpu_vi_mailbox_put_irq(adev);
1382 
1383 	return 0;
1384 }
1385 
vi_common_suspend(void *handle)1386 static int vi_common_suspend(void *handle)
1387 {
1388 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1389 
1390 	return vi_common_hw_fini(adev);
1391 }
1392 
vi_common_resume(void *handle)1393 static int vi_common_resume(void *handle)
1394 {
1395 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1396 
1397 	return vi_common_hw_init(adev);
1398 }
1399 
vi_common_is_idle(void *handle)1400 static bool vi_common_is_idle(void *handle)
1401 {
1402 	return true;
1403 }
1404 
vi_common_wait_for_idle(void *handle)1405 static int vi_common_wait_for_idle(void *handle)
1406 {
1407 	return 0;
1408 }
1409 
vi_common_soft_reset(void *handle)1410 static int vi_common_soft_reset(void *handle)
1411 {
1412 	return 0;
1413 }
1414 
vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable)1415 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1416 						   bool enable)
1417 {
1418 	uint32_t temp, data;
1419 
1420 	temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1421 
1422 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1423 		data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1424 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1425 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1426 	else
1427 		data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1428 				PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1429 				PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1430 
1431 	if (temp != data)
1432 		WREG32_PCIE(ixPCIE_CNTL2, data);
1433 }
1434 
vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable)1435 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1436 						    bool enable)
1437 {
1438 	uint32_t temp, data;
1439 
1440 	temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1441 
1442 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1443 		data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1444 	else
1445 		data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1446 
1447 	if (temp != data)
1448 		WREG32(mmHDP_HOST_PATH_CNTL, data);
1449 }
1450 
vi_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable)1451 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1452 				      bool enable)
1453 {
1454 	uint32_t temp, data;
1455 
1456 	temp = data = RREG32(mmHDP_MEM_POWER_LS);
1457 
1458 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1459 		data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1460 	else
1461 		data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1462 
1463 	if (temp != data)
1464 		WREG32(mmHDP_MEM_POWER_LS, data);
1465 }
1466 
vi_update_drm_light_sleep(struct amdgpu_device *adev, bool enable)1467 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1468 				      bool enable)
1469 {
1470 	uint32_t temp, data;
1471 
1472 	temp = data = RREG32(0x157a);
1473 
1474 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1475 		data |= 1;
1476 	else
1477 		data &= ~1;
1478 
1479 	if (temp != data)
1480 		WREG32(0x157a, data);
1481 }
1482 
1483 
vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable)1484 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1485 						    bool enable)
1486 {
1487 	uint32_t temp, data;
1488 
1489 	temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1490 
1491 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1492 		data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1493 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1494 	else
1495 		data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1496 				CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1497 
1498 	if (temp != data)
1499 		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1500 }
1501 
vi_common_set_clockgating_state_by_smu(void *handle, enum amd_clockgating_state state)1502 static int vi_common_set_clockgating_state_by_smu(void *handle,
1503 					   enum amd_clockgating_state state)
1504 {
1505 	uint32_t msg_id, pp_state = 0;
1506 	uint32_t pp_support_state = 0;
1507 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508 
1509 	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1510 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1511 			pp_support_state = PP_STATE_SUPPORT_LS;
1512 			pp_state = PP_STATE_LS;
1513 		}
1514 		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1515 			pp_support_state |= PP_STATE_SUPPORT_CG;
1516 			pp_state |= PP_STATE_CG;
1517 		}
1518 		if (state == AMD_CG_STATE_UNGATE)
1519 			pp_state = 0;
1520 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1521 			       PP_BLOCK_SYS_MC,
1522 			       pp_support_state,
1523 			       pp_state);
1524 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1525 	}
1526 
1527 	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1528 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1529 			pp_support_state = PP_STATE_SUPPORT_LS;
1530 			pp_state = PP_STATE_LS;
1531 		}
1532 		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1533 			pp_support_state |= PP_STATE_SUPPORT_CG;
1534 			pp_state |= PP_STATE_CG;
1535 		}
1536 		if (state == AMD_CG_STATE_UNGATE)
1537 			pp_state = 0;
1538 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1539 			       PP_BLOCK_SYS_SDMA,
1540 			       pp_support_state,
1541 			       pp_state);
1542 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1543 	}
1544 
1545 	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1546 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1547 			pp_support_state = PP_STATE_SUPPORT_LS;
1548 			pp_state = PP_STATE_LS;
1549 		}
1550 		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1551 			pp_support_state |= PP_STATE_SUPPORT_CG;
1552 			pp_state |= PP_STATE_CG;
1553 		}
1554 		if (state == AMD_CG_STATE_UNGATE)
1555 			pp_state = 0;
1556 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1557 			       PP_BLOCK_SYS_HDP,
1558 			       pp_support_state,
1559 			       pp_state);
1560 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1561 	}
1562 
1563 
1564 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1565 		if (state == AMD_CG_STATE_UNGATE)
1566 			pp_state = 0;
1567 		else
1568 			pp_state = PP_STATE_LS;
1569 
1570 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1571 			       PP_BLOCK_SYS_BIF,
1572 			       PP_STATE_SUPPORT_LS,
1573 			        pp_state);
1574 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1575 	}
1576 	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1577 		if (state == AMD_CG_STATE_UNGATE)
1578 			pp_state = 0;
1579 		else
1580 			pp_state = PP_STATE_CG;
1581 
1582 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1583 			       PP_BLOCK_SYS_BIF,
1584 			       PP_STATE_SUPPORT_CG,
1585 			       pp_state);
1586 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1587 	}
1588 
1589 	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1590 
1591 		if (state == AMD_CG_STATE_UNGATE)
1592 			pp_state = 0;
1593 		else
1594 			pp_state = PP_STATE_LS;
1595 
1596 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1597 			       PP_BLOCK_SYS_DRM,
1598 			       PP_STATE_SUPPORT_LS,
1599 			       pp_state);
1600 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1601 	}
1602 
1603 	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1604 
1605 		if (state == AMD_CG_STATE_UNGATE)
1606 			pp_state = 0;
1607 		else
1608 			pp_state = PP_STATE_CG;
1609 
1610 		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1611 			       PP_BLOCK_SYS_ROM,
1612 			       PP_STATE_SUPPORT_CG,
1613 			       pp_state);
1614 		amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1615 	}
1616 	return 0;
1617 }
1618 
vi_common_set_clockgating_state(void *handle, enum amd_clockgating_state state)1619 static int vi_common_set_clockgating_state(void *handle,
1620 					   enum amd_clockgating_state state)
1621 {
1622 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1623 
1624 	if (amdgpu_sriov_vf(adev))
1625 		return 0;
1626 
1627 	switch (adev->asic_type) {
1628 	case CHIP_FIJI:
1629 		vi_update_bif_medium_grain_light_sleep(adev,
1630 				state == AMD_CG_STATE_GATE);
1631 		vi_update_hdp_medium_grain_clock_gating(adev,
1632 				state == AMD_CG_STATE_GATE);
1633 		vi_update_hdp_light_sleep(adev,
1634 				state == AMD_CG_STATE_GATE);
1635 		vi_update_rom_medium_grain_clock_gating(adev,
1636 				state == AMD_CG_STATE_GATE);
1637 		break;
1638 	case CHIP_CARRIZO:
1639 	case CHIP_STONEY:
1640 		vi_update_bif_medium_grain_light_sleep(adev,
1641 				state == AMD_CG_STATE_GATE);
1642 		vi_update_hdp_medium_grain_clock_gating(adev,
1643 				state == AMD_CG_STATE_GATE);
1644 		vi_update_hdp_light_sleep(adev,
1645 				state == AMD_CG_STATE_GATE);
1646 		vi_update_drm_light_sleep(adev,
1647 				state == AMD_CG_STATE_GATE);
1648 		break;
1649 	case CHIP_TONGA:
1650 	case CHIP_POLARIS10:
1651 	case CHIP_POLARIS11:
1652 	case CHIP_POLARIS12:
1653 	case CHIP_VEGAM:
1654 		vi_common_set_clockgating_state_by_smu(adev, state);
1655 	default:
1656 		break;
1657 	}
1658 	return 0;
1659 }
1660 
vi_common_set_powergating_state(void *handle, enum amd_powergating_state state)1661 static int vi_common_set_powergating_state(void *handle,
1662 					    enum amd_powergating_state state)
1663 {
1664 	return 0;
1665 }
1666 
vi_common_get_clockgating_state(void *handle, u32 *flags)1667 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1668 {
1669 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1670 	int data;
1671 
1672 	if (amdgpu_sriov_vf(adev))
1673 		*flags = 0;
1674 
1675 	/* AMD_CG_SUPPORT_BIF_LS */
1676 	data = RREG32_PCIE(ixPCIE_CNTL2);
1677 	if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1678 		*flags |= AMD_CG_SUPPORT_BIF_LS;
1679 
1680 	/* AMD_CG_SUPPORT_HDP_LS */
1681 	data = RREG32(mmHDP_MEM_POWER_LS);
1682 	if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1683 		*flags |= AMD_CG_SUPPORT_HDP_LS;
1684 
1685 	/* AMD_CG_SUPPORT_HDP_MGCG */
1686 	data = RREG32(mmHDP_HOST_PATH_CNTL);
1687 	if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1688 		*flags |= AMD_CG_SUPPORT_HDP_MGCG;
1689 
1690 	/* AMD_CG_SUPPORT_ROM_MGCG */
1691 	data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1692 	if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1693 		*flags |= AMD_CG_SUPPORT_ROM_MGCG;
1694 }
1695 
1696 static const struct amd_ip_funcs vi_common_ip_funcs = {
1697 	.name = "vi_common",
1698 	.early_init = vi_common_early_init,
1699 	.late_init = vi_common_late_init,
1700 	.sw_init = vi_common_sw_init,
1701 	.sw_fini = vi_common_sw_fini,
1702 	.hw_init = vi_common_hw_init,
1703 	.hw_fini = vi_common_hw_fini,
1704 	.suspend = vi_common_suspend,
1705 	.resume = vi_common_resume,
1706 	.is_idle = vi_common_is_idle,
1707 	.wait_for_idle = vi_common_wait_for_idle,
1708 	.soft_reset = vi_common_soft_reset,
1709 	.set_clockgating_state = vi_common_set_clockgating_state,
1710 	.set_powergating_state = vi_common_set_powergating_state,
1711 	.get_clockgating_state = vi_common_get_clockgating_state,
1712 };
1713 
1714 static const struct amdgpu_ip_block_version vi_common_ip_block =
1715 {
1716 	.type = AMD_IP_BLOCK_TYPE_COMMON,
1717 	.major = 1,
1718 	.minor = 0,
1719 	.rev = 0,
1720 	.funcs = &vi_common_ip_funcs,
1721 };
1722 
vi_set_virt_ops(struct amdgpu_device *adev)1723 void vi_set_virt_ops(struct amdgpu_device *adev)
1724 {
1725 	adev->virt.ops = &xgpu_vi_virt_ops;
1726 }
1727 
vi_set_ip_blocks(struct amdgpu_device *adev)1728 int vi_set_ip_blocks(struct amdgpu_device *adev)
1729 {
1730 	switch (adev->asic_type) {
1731 	case CHIP_TOPAZ:
1732 		/* topaz has no DCE, UVD, VCE */
1733 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1734 		amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1735 		amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1736 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1737 		amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1738 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1739 		if (adev->enable_virtual_display)
1740 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1741 		break;
1742 	case CHIP_FIJI:
1743 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1744 		amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1745 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1746 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1747 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1748 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1749 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1750 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1751 #if defined(CONFIG_DRM_AMD_DC)
1752 		else if (amdgpu_device_has_dc_support(adev))
1753 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1754 #endif
1755 		else
1756 			amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1757 		if (!amdgpu_sriov_vf(adev)) {
1758 			amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1759 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1760 		}
1761 		break;
1762 	case CHIP_TONGA:
1763 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1764 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1765 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1766 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1767 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1768 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1769 		if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1770 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1771 #if defined(CONFIG_DRM_AMD_DC)
1772 		else if (amdgpu_device_has_dc_support(adev))
1773 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1774 #endif
1775 		else
1776 			amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1777 		if (!amdgpu_sriov_vf(adev)) {
1778 			amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1779 			amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1780 		}
1781 		break;
1782 	case CHIP_POLARIS10:
1783 	case CHIP_POLARIS11:
1784 	case CHIP_POLARIS12:
1785 	case CHIP_VEGAM:
1786 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1787 		amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1788 		amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1789 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1790 		amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1791 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1792 		if (adev->enable_virtual_display)
1793 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1794 #if defined(CONFIG_DRM_AMD_DC)
1795 		else if (amdgpu_device_has_dc_support(adev))
1796 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1797 #endif
1798 		else
1799 			amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1800 		amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1801 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1802 		break;
1803 	case CHIP_CARRIZO:
1804 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1805 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1806 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1807 		amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1808 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1809 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1810 		if (adev->enable_virtual_display)
1811 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1812 #if defined(CONFIG_DRM_AMD_DC)
1813 		else if (amdgpu_device_has_dc_support(adev))
1814 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1815 #endif
1816 		else
1817 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1818 		amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1819 		amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1820 #if defined(CONFIG_DRM_AMD_ACP)
1821 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1822 #endif
1823 		break;
1824 	case CHIP_STONEY:
1825 		amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1826 		amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1827 		amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1828 		amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1829 		amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1830 		amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1831 		if (adev->enable_virtual_display)
1832 			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1833 #if defined(CONFIG_DRM_AMD_DC)
1834 		else if (amdgpu_device_has_dc_support(adev))
1835 			amdgpu_device_ip_block_add(adev, &dm_ip_block);
1836 #endif
1837 		else
1838 			amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1839 		amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1840 		amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1841 #if defined(CONFIG_DRM_AMD_ACP)
1842 		amdgpu_device_ip_block_add(adev, &acp_ip_block);
1843 #endif
1844 		break;
1845 	default:
1846 		/* FIXME: not supported yet */
1847 		return -EINVAL;
1848 	}
1849 
1850 	return 0;
1851 }
1852 
legacy_doorbell_index_init(struct amdgpu_device *adev)1853 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1854 {
1855 	adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1856 	adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1857 	adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1858 	adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1859 	adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1860 	adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1861 	adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1862 	adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1863 	adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1864 	adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1865 	adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1866 	adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1867 	adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1868 	adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
1869 }
1870