1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <linux/pci.h>
26#include <linux/seq_file.h>
27
28#include "atom.h"
29#include "ci_dpm.h"
30#include "cikd.h"
31#include "r600_dpm.h"
32#include "radeon.h"
33#include "radeon_asic.h"
34#include "radeon_ucode.h"
35
36#define MC_CG_ARB_FREQ_F0           0x0a
37#define MC_CG_ARB_FREQ_F1           0x0b
38#define MC_CG_ARB_FREQ_F2           0x0c
39#define MC_CG_ARB_FREQ_F3           0x0d
40
41#define SMC_RAM_END 0x40000
42
43#define VOLTAGE_SCALE               4
44#define VOLTAGE_VID_OFFSET_SCALE1    625
45#define VOLTAGE_VID_OFFSET_SCALE2    100
46
47static const struct ci_pt_defaults defaults_hawaii_xt =
48{
49	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
50	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
51	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
52};
53
54static const struct ci_pt_defaults defaults_hawaii_pro =
55{
56	1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
57	{ 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
58	{ 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
59};
60
61static const struct ci_pt_defaults defaults_bonaire_xt =
62{
63	1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
64	{ 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
65	{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
66};
67
68static const struct ci_pt_defaults defaults_saturn_xt =
69{
70	1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
71	{ 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
72	{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
73};
74
75static const struct ci_pt_config_reg didt_config_ci[] =
76{
77	{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
78	{ 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
79	{ 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
80	{ 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
81	{ 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
82	{ 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
83	{ 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
84	{ 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
85	{ 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
86	{ 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
87	{ 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88	{ 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89	{ 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
90	{ 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
91	{ 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
92	{ 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
93	{ 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
94	{ 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95	{ 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96	{ 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97	{ 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98	{ 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99	{ 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100	{ 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101	{ 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102	{ 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
103	{ 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
104	{ 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105	{ 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106	{ 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107	{ 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
108	{ 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
109	{ 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
110	{ 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
111	{ 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
112	{ 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113	{ 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114	{ 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115	{ 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116	{ 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117	{ 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118	{ 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119	{ 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120	{ 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121	{ 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122	{ 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123	{ 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124	{ 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125	{ 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
126	{ 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
127	{ 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
128	{ 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
129	{ 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130	{ 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131	{ 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132	{ 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133	{ 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134	{ 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135	{ 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136	{ 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137	{ 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138	{ 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139	{ 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140	{ 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141	{ 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142	{ 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143	{ 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
144	{ 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
145	{ 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
146	{ 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
147	{ 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148	{ 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149	{ 0xFFFFFFFF }
150};
151
152extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
153extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
154				       u32 arb_freq_src, u32 arb_freq_dest);
155extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
156extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
157extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
158						     u32 max_voltage_steps,
159						     struct atom_voltage_table *voltage_table);
160extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
161extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
162extern int ci_mc_load_microcode(struct radeon_device *rdev);
163extern void cik_update_cg(struct radeon_device *rdev,
164			  u32 block, bool enable);
165
166static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
167					 struct atom_voltage_table_entry *voltage_table,
168					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
169static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
170static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
171				       u32 target_tdp);
172static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
173
174static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
175static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
176						      PPSMC_Msg msg, u32 parameter);
177
178static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev);
179static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev);
180
181static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
182{
183	struct ci_power_info *pi = rdev->pm.dpm.priv;
184
185	return pi;
186}
187
188static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
189{
190	struct ci_ps *ps = rps->ps_priv;
191
192	return ps;
193}
194
195static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
196{
197	struct ci_power_info *pi = ci_get_pi(rdev);
198
199	switch (rdev->pdev->device) {
200	case 0x6649:
201	case 0x6650:
202	case 0x6651:
203	case 0x6658:
204	case 0x665C:
205	case 0x665D:
206	default:
207		pi->powertune_defaults = &defaults_bonaire_xt;
208		break;
209	case 0x6640:
210	case 0x6641:
211	case 0x6646:
212	case 0x6647:
213		pi->powertune_defaults = &defaults_saturn_xt;
214		break;
215	case 0x67B8:
216	case 0x67B0:
217		pi->powertune_defaults = &defaults_hawaii_xt;
218		break;
219	case 0x67BA:
220	case 0x67B1:
221		pi->powertune_defaults = &defaults_hawaii_pro;
222		break;
223	case 0x67A0:
224	case 0x67A1:
225	case 0x67A2:
226	case 0x67A8:
227	case 0x67A9:
228	case 0x67AA:
229	case 0x67B9:
230	case 0x67BE:
231		pi->powertune_defaults = &defaults_bonaire_xt;
232		break;
233	}
234
235	pi->dte_tj_offset = 0;
236
237	pi->caps_power_containment = true;
238	pi->caps_cac = false;
239	pi->caps_sq_ramping = false;
240	pi->caps_db_ramping = false;
241	pi->caps_td_ramping = false;
242	pi->caps_tcp_ramping = false;
243
244	if (pi->caps_power_containment) {
245		pi->caps_cac = true;
246		if (rdev->family == CHIP_HAWAII)
247			pi->enable_bapm_feature = false;
248		else
249			pi->enable_bapm_feature = true;
250		pi->enable_tdc_limit_feature = true;
251		pi->enable_pkg_pwr_tracking_feature = true;
252	}
253}
254
255static u8 ci_convert_to_vid(u16 vddc)
256{
257	return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
258}
259
260static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
261{
262	struct ci_power_info *pi = ci_get_pi(rdev);
263	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
264	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
265	u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
266	u32 i;
267
268	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
269		return -EINVAL;
270	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
271		return -EINVAL;
272	if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
273	    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
274		return -EINVAL;
275
276	for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
277		if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
278			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
279			hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
280			hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
281		} else {
282			lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
283			hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
284		}
285	}
286	return 0;
287}
288
289static int ci_populate_vddc_vid(struct radeon_device *rdev)
290{
291	struct ci_power_info *pi = ci_get_pi(rdev);
292	u8 *vid = pi->smc_powertune_table.VddCVid;
293	u32 i;
294
295	if (pi->vddc_voltage_table.count > 8)
296		return -EINVAL;
297
298	for (i = 0; i < pi->vddc_voltage_table.count; i++)
299		vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
300
301	return 0;
302}
303
304static int ci_populate_svi_load_line(struct radeon_device *rdev)
305{
306	struct ci_power_info *pi = ci_get_pi(rdev);
307	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
308
309	pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
310	pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
311	pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
312	pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
313
314	return 0;
315}
316
317static int ci_populate_tdc_limit(struct radeon_device *rdev)
318{
319	struct ci_power_info *pi = ci_get_pi(rdev);
320	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
321	u16 tdc_limit;
322
323	tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
324	pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
325	pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
326		pt_defaults->tdc_vddc_throttle_release_limit_perc;
327	pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
328
329	return 0;
330}
331
332static int ci_populate_dw8(struct radeon_device *rdev)
333{
334	struct ci_power_info *pi = ci_get_pi(rdev);
335	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
336	int ret;
337
338	ret = ci_read_smc_sram_dword(rdev,
339				     SMU7_FIRMWARE_HEADER_LOCATION +
340				     offsetof(SMU7_Firmware_Header, PmFuseTable) +
341				     offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
342				     (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
343				     pi->sram_end);
344	if (ret)
345		return -EINVAL;
346	else
347		pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
348
349	return 0;
350}
351
352static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
353{
354	struct ci_power_info *pi = ci_get_pi(rdev);
355
356	if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
357	    (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
358		rdev->pm.dpm.fan.fan_output_sensitivity =
359			rdev->pm.dpm.fan.default_fan_output_sensitivity;
360
361	pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
362		cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
363
364	return 0;
365}
366
367static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
368{
369	struct ci_power_info *pi = ci_get_pi(rdev);
370	u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
371	u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
372	int i, min, max;
373
374	min = max = hi_vid[0];
375	for (i = 0; i < 8; i++) {
376		if (0 != hi_vid[i]) {
377			if (min > hi_vid[i])
378				min = hi_vid[i];
379			if (max < hi_vid[i])
380				max = hi_vid[i];
381		}
382
383		if (0 != lo_vid[i]) {
384			if (min > lo_vid[i])
385				min = lo_vid[i];
386			if (max < lo_vid[i])
387				max = lo_vid[i];
388		}
389	}
390
391	if ((min == 0) || (max == 0))
392		return -EINVAL;
393	pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
394	pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
395
396	return 0;
397}
398
399static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
400{
401	struct ci_power_info *pi = ci_get_pi(rdev);
402	u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
403	u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
404	struct radeon_cac_tdp_table *cac_tdp_table =
405		rdev->pm.dpm.dyn_state.cac_tdp_table;
406
407	hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
408	lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
409
410	pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
411	pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
412
413	return 0;
414}
415
416static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
417{
418	struct ci_power_info *pi = ci_get_pi(rdev);
419	const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
420	SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
421	struct radeon_cac_tdp_table *cac_tdp_table =
422		rdev->pm.dpm.dyn_state.cac_tdp_table;
423	struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
424	int i, j, k;
425	const u16 *def1;
426	const u16 *def2;
427
428	dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
429	dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
430
431	dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
432	dpm_table->GpuTjMax =
433		(u8)(pi->thermal_temp_setting.temperature_high / 1000);
434	dpm_table->GpuTjHyst = 8;
435
436	dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
437
438	if (ppm) {
439		dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
440		dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
441	} else {
442		dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
443		dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
444	}
445
446	dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
447	def1 = pt_defaults->bapmti_r;
448	def2 = pt_defaults->bapmti_rc;
449
450	for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
451		for (j = 0; j < SMU7_DTE_SOURCES; j++) {
452			for (k = 0; k < SMU7_DTE_SINKS; k++) {
453				dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
454				dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
455				def1++;
456				def2++;
457			}
458		}
459	}
460
461	return 0;
462}
463
464static int ci_populate_pm_base(struct radeon_device *rdev)
465{
466	struct ci_power_info *pi = ci_get_pi(rdev);
467	u32 pm_fuse_table_offset;
468	int ret;
469
470	if (pi->caps_power_containment) {
471		ret = ci_read_smc_sram_dword(rdev,
472					     SMU7_FIRMWARE_HEADER_LOCATION +
473					     offsetof(SMU7_Firmware_Header, PmFuseTable),
474					     &pm_fuse_table_offset, pi->sram_end);
475		if (ret)
476			return ret;
477		ret = ci_populate_bapm_vddc_vid_sidd(rdev);
478		if (ret)
479			return ret;
480		ret = ci_populate_vddc_vid(rdev);
481		if (ret)
482			return ret;
483		ret = ci_populate_svi_load_line(rdev);
484		if (ret)
485			return ret;
486		ret = ci_populate_tdc_limit(rdev);
487		if (ret)
488			return ret;
489		ret = ci_populate_dw8(rdev);
490		if (ret)
491			return ret;
492		ret = ci_populate_fuzzy_fan(rdev);
493		if (ret)
494			return ret;
495		ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
496		if (ret)
497			return ret;
498		ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
499		if (ret)
500			return ret;
501		ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
502					   (u8 *)&pi->smc_powertune_table,
503					   sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
504		if (ret)
505			return ret;
506	}
507
508	return 0;
509}
510
511static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
512{
513	struct ci_power_info *pi = ci_get_pi(rdev);
514	u32 data;
515
516	if (pi->caps_sq_ramping) {
517		data = RREG32_DIDT(DIDT_SQ_CTRL0);
518		if (enable)
519			data |= DIDT_CTRL_EN;
520		else
521			data &= ~DIDT_CTRL_EN;
522		WREG32_DIDT(DIDT_SQ_CTRL0, data);
523	}
524
525	if (pi->caps_db_ramping) {
526		data = RREG32_DIDT(DIDT_DB_CTRL0);
527		if (enable)
528			data |= DIDT_CTRL_EN;
529		else
530			data &= ~DIDT_CTRL_EN;
531		WREG32_DIDT(DIDT_DB_CTRL0, data);
532	}
533
534	if (pi->caps_td_ramping) {
535		data = RREG32_DIDT(DIDT_TD_CTRL0);
536		if (enable)
537			data |= DIDT_CTRL_EN;
538		else
539			data &= ~DIDT_CTRL_EN;
540		WREG32_DIDT(DIDT_TD_CTRL0, data);
541	}
542
543	if (pi->caps_tcp_ramping) {
544		data = RREG32_DIDT(DIDT_TCP_CTRL0);
545		if (enable)
546			data |= DIDT_CTRL_EN;
547		else
548			data &= ~DIDT_CTRL_EN;
549		WREG32_DIDT(DIDT_TCP_CTRL0, data);
550	}
551}
552
553static int ci_program_pt_config_registers(struct radeon_device *rdev,
554					  const struct ci_pt_config_reg *cac_config_regs)
555{
556	const struct ci_pt_config_reg *config_regs = cac_config_regs;
557	u32 data;
558	u32 cache = 0;
559
560	if (config_regs == NULL)
561		return -EINVAL;
562
563	while (config_regs->offset != 0xFFFFFFFF) {
564		if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
565			cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
566		} else {
567			switch (config_regs->type) {
568			case CISLANDS_CONFIGREG_SMC_IND:
569				data = RREG32_SMC(config_regs->offset);
570				break;
571			case CISLANDS_CONFIGREG_DIDT_IND:
572				data = RREG32_DIDT(config_regs->offset);
573				break;
574			default:
575				data = RREG32(config_regs->offset << 2);
576				break;
577			}
578
579			data &= ~config_regs->mask;
580			data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
581			data |= cache;
582
583			switch (config_regs->type) {
584			case CISLANDS_CONFIGREG_SMC_IND:
585				WREG32_SMC(config_regs->offset, data);
586				break;
587			case CISLANDS_CONFIGREG_DIDT_IND:
588				WREG32_DIDT(config_regs->offset, data);
589				break;
590			default:
591				WREG32(config_regs->offset << 2, data);
592				break;
593			}
594			cache = 0;
595		}
596		config_regs++;
597	}
598	return 0;
599}
600
601static int ci_enable_didt(struct radeon_device *rdev, bool enable)
602{
603	struct ci_power_info *pi = ci_get_pi(rdev);
604	int ret;
605
606	if (pi->caps_sq_ramping || pi->caps_db_ramping ||
607	    pi->caps_td_ramping || pi->caps_tcp_ramping) {
608		cik_enter_rlc_safe_mode(rdev);
609
610		if (enable) {
611			ret = ci_program_pt_config_registers(rdev, didt_config_ci);
612			if (ret) {
613				cik_exit_rlc_safe_mode(rdev);
614				return ret;
615			}
616		}
617
618		ci_do_enable_didt(rdev, enable);
619
620		cik_exit_rlc_safe_mode(rdev);
621	}
622
623	return 0;
624}
625
626static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
627{
628	struct ci_power_info *pi = ci_get_pi(rdev);
629	PPSMC_Result smc_result;
630	int ret = 0;
631
632	if (enable) {
633		pi->power_containment_features = 0;
634		if (pi->caps_power_containment) {
635			if (pi->enable_bapm_feature) {
636				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
637				if (smc_result != PPSMC_Result_OK)
638					ret = -EINVAL;
639				else
640					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
641			}
642
643			if (pi->enable_tdc_limit_feature) {
644				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
645				if (smc_result != PPSMC_Result_OK)
646					ret = -EINVAL;
647				else
648					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
649			}
650
651			if (pi->enable_pkg_pwr_tracking_feature) {
652				smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
653				if (smc_result != PPSMC_Result_OK) {
654					ret = -EINVAL;
655				} else {
656					struct radeon_cac_tdp_table *cac_tdp_table =
657						rdev->pm.dpm.dyn_state.cac_tdp_table;
658					u32 default_pwr_limit =
659						(u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
660
661					pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
662
663					ci_set_power_limit(rdev, default_pwr_limit);
664				}
665			}
666		}
667	} else {
668		if (pi->caps_power_containment && pi->power_containment_features) {
669			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
670				ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
671
672			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
673				ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
674
675			if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
676				ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
677			pi->power_containment_features = 0;
678		}
679	}
680
681	return ret;
682}
683
684static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
685{
686	struct ci_power_info *pi = ci_get_pi(rdev);
687	PPSMC_Result smc_result;
688	int ret = 0;
689
690	if (pi->caps_cac) {
691		if (enable) {
692			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
693			if (smc_result != PPSMC_Result_OK) {
694				ret = -EINVAL;
695				pi->cac_enabled = false;
696			} else {
697				pi->cac_enabled = true;
698			}
699		} else if (pi->cac_enabled) {
700			ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
701			pi->cac_enabled = false;
702		}
703	}
704
705	return ret;
706}
707
708static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
709					    bool enable)
710{
711	struct ci_power_info *pi = ci_get_pi(rdev);
712	PPSMC_Result smc_result = PPSMC_Result_OK;
713
714	if (pi->thermal_sclk_dpm_enabled) {
715		if (enable)
716			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
717		else
718			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
719	}
720
721	if (smc_result == PPSMC_Result_OK)
722		return 0;
723	else
724		return -EINVAL;
725}
726
727static int ci_power_control_set_level(struct radeon_device *rdev)
728{
729	struct ci_power_info *pi = ci_get_pi(rdev);
730	struct radeon_cac_tdp_table *cac_tdp_table =
731		rdev->pm.dpm.dyn_state.cac_tdp_table;
732	s32 adjust_percent;
733	s32 target_tdp;
734	int ret = 0;
735	bool adjust_polarity = false; /* ??? */
736
737	if (pi->caps_power_containment) {
738		adjust_percent = adjust_polarity ?
739			rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
740		target_tdp = ((100 + adjust_percent) *
741			      (s32)cac_tdp_table->configurable_tdp) / 100;
742
743		ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
744	}
745
746	return ret;
747}
748
749void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
750{
751	struct ci_power_info *pi = ci_get_pi(rdev);
752
753	if (pi->uvd_power_gated == gate)
754		return;
755
756	pi->uvd_power_gated = gate;
757
758	ci_update_uvd_dpm(rdev, gate);
759}
760
761bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
762{
763	struct ci_power_info *pi = ci_get_pi(rdev);
764	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
765	u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
766
767	/* disable mclk switching if the refresh is >120Hz, even if the
768        * blanking period would allow it
769        */
770	if (r600_dpm_get_vrefresh(rdev) > 120)
771		return true;
772
773	if (vblank_time < switch_limit)
774		return true;
775	else
776		return false;
777
778}
779
780static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
781					struct radeon_ps *rps)
782{
783	struct ci_ps *ps = ci_get_ps(rps);
784	struct ci_power_info *pi = ci_get_pi(rdev);
785	struct radeon_clock_and_voltage_limits *max_limits;
786	bool disable_mclk_switching;
787	u32 sclk, mclk;
788	int i;
789
790	if (rps->vce_active) {
791		rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
792		rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
793	} else {
794		rps->evclk = 0;
795		rps->ecclk = 0;
796	}
797
798	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
799	    ci_dpm_vblank_too_short(rdev))
800		disable_mclk_switching = true;
801	else
802		disable_mclk_switching = false;
803
804	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
805		pi->battery_state = true;
806	else
807		pi->battery_state = false;
808
809	if (rdev->pm.dpm.ac_power)
810		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
811	else
812		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
813
814	if (rdev->pm.dpm.ac_power == false) {
815		for (i = 0; i < ps->performance_level_count; i++) {
816			if (ps->performance_levels[i].mclk > max_limits->mclk)
817				ps->performance_levels[i].mclk = max_limits->mclk;
818			if (ps->performance_levels[i].sclk > max_limits->sclk)
819				ps->performance_levels[i].sclk = max_limits->sclk;
820		}
821	}
822
823	/* XXX validate the min clocks required for display */
824
825	if (disable_mclk_switching) {
826		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
827		sclk = ps->performance_levels[0].sclk;
828	} else {
829		mclk = ps->performance_levels[0].mclk;
830		sclk = ps->performance_levels[0].sclk;
831	}
832
833	if (rps->vce_active) {
834		if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
835			sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
836		if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
837			mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
838	}
839
840	ps->performance_levels[0].sclk = sclk;
841	ps->performance_levels[0].mclk = mclk;
842
843	if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
844		ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
845
846	if (disable_mclk_switching) {
847		if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
848			ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
849	} else {
850		if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
851			ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
852	}
853}
854
855static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
856					    int min_temp, int max_temp)
857{
858	int low_temp = 0 * 1000;
859	int high_temp = 255 * 1000;
860	u32 tmp;
861
862	if (low_temp < min_temp)
863		low_temp = min_temp;
864	if (high_temp > max_temp)
865		high_temp = max_temp;
866	if (high_temp < low_temp) {
867		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
868		return -EINVAL;
869	}
870
871	tmp = RREG32_SMC(CG_THERMAL_INT);
872	tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
873	tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
874		CI_DIG_THERM_INTL(low_temp / 1000);
875	WREG32_SMC(CG_THERMAL_INT, tmp);
876
877#if 0
878	/* XXX: need to figure out how to handle this properly */
879	tmp = RREG32_SMC(CG_THERMAL_CTRL);
880	tmp &= DIG_THERM_DPM_MASK;
881	tmp |= DIG_THERM_DPM(high_temp / 1000);
882	WREG32_SMC(CG_THERMAL_CTRL, tmp);
883#endif
884
885	rdev->pm.dpm.thermal.min_temp = low_temp;
886	rdev->pm.dpm.thermal.max_temp = high_temp;
887
888	return 0;
889}
890
891static int ci_thermal_enable_alert(struct radeon_device *rdev,
892				   bool enable)
893{
894	u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
895	PPSMC_Result result;
896
897	if (enable) {
898		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
899		WREG32_SMC(CG_THERMAL_INT, thermal_int);
900		rdev->irq.dpm_thermal = false;
901		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
902		if (result != PPSMC_Result_OK) {
903			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
904			return -EINVAL;
905		}
906	} else {
907		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
908		WREG32_SMC(CG_THERMAL_INT, thermal_int);
909		rdev->irq.dpm_thermal = true;
910		result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
911		if (result != PPSMC_Result_OK) {
912			DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
913			return -EINVAL;
914		}
915	}
916
917	return 0;
918}
919
920static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
921{
922	struct ci_power_info *pi = ci_get_pi(rdev);
923	u32 tmp;
924
925	if (pi->fan_ctrl_is_in_default_mode) {
926		tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
927		pi->fan_ctrl_default_mode = tmp;
928		tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
929		pi->t_min = tmp;
930		pi->fan_ctrl_is_in_default_mode = false;
931	}
932
933	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
934	tmp |= TMIN(0);
935	WREG32_SMC(CG_FDO_CTRL2, tmp);
936
937	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
938	tmp |= FDO_PWM_MODE(mode);
939	WREG32_SMC(CG_FDO_CTRL2, tmp);
940}
941
942static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
943{
944	struct ci_power_info *pi = ci_get_pi(rdev);
945	SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
946	u32 duty100;
947	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
948	u16 fdo_min, slope1, slope2;
949	u32 reference_clock, tmp;
950	int ret;
951	u64 tmp64;
952
953	if (!pi->fan_table_start) {
954		rdev->pm.dpm.fan.ucode_fan_control = false;
955		return 0;
956	}
957
958	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
959
960	if (duty100 == 0) {
961		rdev->pm.dpm.fan.ucode_fan_control = false;
962		return 0;
963	}
964
965	tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
966	do_div(tmp64, 10000);
967	fdo_min = (u16)tmp64;
968
969	t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
970	t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
971
972	pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
973	pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
974
975	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
976	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
977
978	fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
979	fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
980	fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
981
982	fan_table.Slope1 = cpu_to_be16(slope1);
983	fan_table.Slope2 = cpu_to_be16(slope2);
984
985	fan_table.FdoMin = cpu_to_be16(fdo_min);
986
987	fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
988
989	fan_table.HystUp = cpu_to_be16(1);
990
991	fan_table.HystSlope = cpu_to_be16(1);
992
993	fan_table.TempRespLim = cpu_to_be16(5);
994
995	reference_clock = radeon_get_xclk(rdev);
996
997	fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
998					       reference_clock) / 1600);
999
1000	fan_table.FdoMax = cpu_to_be16((u16)duty100);
1001
1002	tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1003	fan_table.TempSrc = (uint8_t)tmp;
1004
1005	ret = ci_copy_bytes_to_smc(rdev,
1006				   pi->fan_table_start,
1007				   (u8 *)(&fan_table),
1008				   sizeof(fan_table),
1009				   pi->sram_end);
1010
1011	if (ret) {
1012		DRM_ERROR("Failed to load fan table to the SMC.");
1013		rdev->pm.dpm.fan.ucode_fan_control = false;
1014	}
1015
1016	return 0;
1017}
1018
1019static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1020{
1021	struct ci_power_info *pi = ci_get_pi(rdev);
1022	PPSMC_Result ret;
1023
1024	if (pi->caps_od_fuzzy_fan_control_support) {
1025		ret = ci_send_msg_to_smc_with_parameter(rdev,
1026							PPSMC_StartFanControl,
1027							FAN_CONTROL_FUZZY);
1028		if (ret != PPSMC_Result_OK)
1029			return -EINVAL;
1030		ret = ci_send_msg_to_smc_with_parameter(rdev,
1031							PPSMC_MSG_SetFanPwmMax,
1032							rdev->pm.dpm.fan.default_max_fan_pwm);
1033		if (ret != PPSMC_Result_OK)
1034			return -EINVAL;
1035	} else {
1036		ret = ci_send_msg_to_smc_with_parameter(rdev,
1037							PPSMC_StartFanControl,
1038							FAN_CONTROL_TABLE);
1039		if (ret != PPSMC_Result_OK)
1040			return -EINVAL;
1041	}
1042
1043	pi->fan_is_controlled_by_smc = true;
1044	return 0;
1045}
1046
1047static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1048{
1049	PPSMC_Result ret;
1050	struct ci_power_info *pi = ci_get_pi(rdev);
1051
1052	ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1053	if (ret == PPSMC_Result_OK) {
1054		pi->fan_is_controlled_by_smc = false;
1055		return 0;
1056	} else
1057		return -EINVAL;
1058}
1059
1060int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1061					     u32 *speed)
1062{
1063	u32 duty, duty100;
1064	u64 tmp64;
1065
1066	if (rdev->pm.no_fan)
1067		return -ENOENT;
1068
1069	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1070	duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1071
1072	if (duty100 == 0)
1073		return -EINVAL;
1074
1075	tmp64 = (u64)duty * 100;
1076	do_div(tmp64, duty100);
1077	*speed = (u32)tmp64;
1078
1079	if (*speed > 100)
1080		*speed = 100;
1081
1082	return 0;
1083}
1084
1085int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1086					     u32 speed)
1087{
1088	u32 tmp;
1089	u32 duty, duty100;
1090	u64 tmp64;
1091	struct ci_power_info *pi = ci_get_pi(rdev);
1092
1093	if (rdev->pm.no_fan)
1094		return -ENOENT;
1095
1096	if (pi->fan_is_controlled_by_smc)
1097		return -EINVAL;
1098
1099	if (speed > 100)
1100		return -EINVAL;
1101
1102	duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1103
1104	if (duty100 == 0)
1105		return -EINVAL;
1106
1107	tmp64 = (u64)speed * duty100;
1108	do_div(tmp64, 100);
1109	duty = (u32)tmp64;
1110
1111	tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1112	tmp |= FDO_STATIC_DUTY(duty);
1113	WREG32_SMC(CG_FDO_CTRL0, tmp);
1114
1115	return 0;
1116}
1117
1118void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode)
1119{
1120	if (mode) {
1121		/* stop auto-manage */
1122		if (rdev->pm.dpm.fan.ucode_fan_control)
1123			ci_fan_ctrl_stop_smc_fan_control(rdev);
1124		ci_fan_ctrl_set_static_mode(rdev, mode);
1125	} else {
1126		/* restart auto-manage */
1127		if (rdev->pm.dpm.fan.ucode_fan_control)
1128			ci_thermal_start_smc_fan_control(rdev);
1129		else
1130			ci_fan_ctrl_set_default_mode(rdev);
1131	}
1132}
1133
1134u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev)
1135{
1136	struct ci_power_info *pi = ci_get_pi(rdev);
1137	u32 tmp;
1138
1139	if (pi->fan_is_controlled_by_smc)
1140		return 0;
1141
1142	tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
1143	return (tmp >> FDO_PWM_MODE_SHIFT);
1144}
1145
1146#if 0
1147static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1148					 u32 *speed)
1149{
1150	u32 tach_period;
1151	u32 xclk = radeon_get_xclk(rdev);
1152
1153	if (rdev->pm.no_fan)
1154		return -ENOENT;
1155
1156	if (rdev->pm.fan_pulses_per_revolution == 0)
1157		return -ENOENT;
1158
1159	tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1160	if (tach_period == 0)
1161		return -ENOENT;
1162
1163	*speed = 60 * xclk * 10000 / tach_period;
1164
1165	return 0;
1166}
1167
1168static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1169					 u32 speed)
1170{
1171	u32 tach_period, tmp;
1172	u32 xclk = radeon_get_xclk(rdev);
1173
1174	if (rdev->pm.no_fan)
1175		return -ENOENT;
1176
1177	if (rdev->pm.fan_pulses_per_revolution == 0)
1178		return -ENOENT;
1179
1180	if ((speed < rdev->pm.fan_min_rpm) ||
1181	    (speed > rdev->pm.fan_max_rpm))
1182		return -EINVAL;
1183
1184	if (rdev->pm.dpm.fan.ucode_fan_control)
1185		ci_fan_ctrl_stop_smc_fan_control(rdev);
1186
1187	tach_period = 60 * xclk * 10000 / (8 * speed);
1188	tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1189	tmp |= TARGET_PERIOD(tach_period);
1190	WREG32_SMC(CG_TACH_CTRL, tmp);
1191
1192	ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1193
1194	return 0;
1195}
1196#endif
1197
1198static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1199{
1200	struct ci_power_info *pi = ci_get_pi(rdev);
1201	u32 tmp;
1202
1203	if (!pi->fan_ctrl_is_in_default_mode) {
1204		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1205		tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1206		WREG32_SMC(CG_FDO_CTRL2, tmp);
1207
1208		tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1209		tmp |= TMIN(pi->t_min);
1210		WREG32_SMC(CG_FDO_CTRL2, tmp);
1211		pi->fan_ctrl_is_in_default_mode = true;
1212	}
1213}
1214
1215static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1216{
1217	if (rdev->pm.dpm.fan.ucode_fan_control) {
1218		ci_fan_ctrl_start_smc_fan_control(rdev);
1219		ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1220	}
1221}
1222
1223static void ci_thermal_initialize(struct radeon_device *rdev)
1224{
1225	u32 tmp;
1226
1227	if (rdev->pm.fan_pulses_per_revolution) {
1228		tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1229		tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1230		WREG32_SMC(CG_TACH_CTRL, tmp);
1231	}
1232
1233	tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1234	tmp |= TACH_PWM_RESP_RATE(0x28);
1235	WREG32_SMC(CG_FDO_CTRL2, tmp);
1236}
1237
1238static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1239{
1240	int ret;
1241
1242	ci_thermal_initialize(rdev);
1243	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1244	if (ret)
1245		return ret;
1246	ret = ci_thermal_enable_alert(rdev, true);
1247	if (ret)
1248		return ret;
1249	if (rdev->pm.dpm.fan.ucode_fan_control) {
1250		ret = ci_thermal_setup_fan_table(rdev);
1251		if (ret)
1252			return ret;
1253		ci_thermal_start_smc_fan_control(rdev);
1254	}
1255
1256	return 0;
1257}
1258
1259static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1260{
1261	if (!rdev->pm.no_fan)
1262		ci_fan_ctrl_set_default_mode(rdev);
1263}
1264
1265#if 0
1266static int ci_read_smc_soft_register(struct radeon_device *rdev,
1267				     u16 reg_offset, u32 *value)
1268{
1269	struct ci_power_info *pi = ci_get_pi(rdev);
1270
1271	return ci_read_smc_sram_dword(rdev,
1272				      pi->soft_regs_start + reg_offset,
1273				      value, pi->sram_end);
1274}
1275#endif
1276
1277static int ci_write_smc_soft_register(struct radeon_device *rdev,
1278				      u16 reg_offset, u32 value)
1279{
1280	struct ci_power_info *pi = ci_get_pi(rdev);
1281
1282	return ci_write_smc_sram_dword(rdev,
1283				       pi->soft_regs_start + reg_offset,
1284				       value, pi->sram_end);
1285}
1286
1287static void ci_init_fps_limits(struct radeon_device *rdev)
1288{
1289	struct ci_power_info *pi = ci_get_pi(rdev);
1290	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1291
1292	if (pi->caps_fps) {
1293		u16 tmp;
1294
1295		tmp = 45;
1296		table->FpsHighT = cpu_to_be16(tmp);
1297
1298		tmp = 30;
1299		table->FpsLowT = cpu_to_be16(tmp);
1300	}
1301}
1302
1303static int ci_update_sclk_t(struct radeon_device *rdev)
1304{
1305	struct ci_power_info *pi = ci_get_pi(rdev);
1306	int ret = 0;
1307	u32 low_sclk_interrupt_t = 0;
1308
1309	if (pi->caps_sclk_throttle_low_notification) {
1310		low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1311
1312		ret = ci_copy_bytes_to_smc(rdev,
1313					   pi->dpm_table_start +
1314					   offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1315					   (u8 *)&low_sclk_interrupt_t,
1316					   sizeof(u32), pi->sram_end);
1317
1318	}
1319
1320	return ret;
1321}
1322
1323static void ci_get_leakage_voltages(struct radeon_device *rdev)
1324{
1325	struct ci_power_info *pi = ci_get_pi(rdev);
1326	u16 leakage_id, virtual_voltage_id;
1327	u16 vddc, vddci;
1328	int i;
1329
1330	pi->vddc_leakage.count = 0;
1331	pi->vddci_leakage.count = 0;
1332
1333	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1334		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1335			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1336			if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
1337				continue;
1338			if (vddc != 0 && vddc != virtual_voltage_id) {
1339				pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1340				pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1341				pi->vddc_leakage.count++;
1342			}
1343		}
1344	} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
1345		for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1346			virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1347			if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
1348										 virtual_voltage_id,
1349										 leakage_id) == 0) {
1350				if (vddc != 0 && vddc != virtual_voltage_id) {
1351					pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1352					pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1353					pi->vddc_leakage.count++;
1354				}
1355				if (vddci != 0 && vddci != virtual_voltage_id) {
1356					pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1357					pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1358					pi->vddci_leakage.count++;
1359				}
1360			}
1361		}
1362	}
1363}
1364
1365static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1366{
1367	struct ci_power_info *pi = ci_get_pi(rdev);
1368	bool want_thermal_protection;
1369	enum radeon_dpm_event_src dpm_event_src;
1370	u32 tmp;
1371
1372	switch (sources) {
1373	case 0:
1374	default:
1375		want_thermal_protection = false;
1376		break;
1377	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1378		want_thermal_protection = true;
1379		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1380		break;
1381	case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1382		want_thermal_protection = true;
1383		dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1384		break;
1385	case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1386	      (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1387		want_thermal_protection = true;
1388		dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1389		break;
1390	}
1391
1392	if (want_thermal_protection) {
1393#if 0
1394		/* XXX: need to figure out how to handle this properly */
1395		tmp = RREG32_SMC(CG_THERMAL_CTRL);
1396		tmp &= DPM_EVENT_SRC_MASK;
1397		tmp |= DPM_EVENT_SRC(dpm_event_src);
1398		WREG32_SMC(CG_THERMAL_CTRL, tmp);
1399#endif
1400
1401		tmp = RREG32_SMC(GENERAL_PWRMGT);
1402		if (pi->thermal_protection)
1403			tmp &= ~THERMAL_PROTECTION_DIS;
1404		else
1405			tmp |= THERMAL_PROTECTION_DIS;
1406		WREG32_SMC(GENERAL_PWRMGT, tmp);
1407	} else {
1408		tmp = RREG32_SMC(GENERAL_PWRMGT);
1409		tmp |= THERMAL_PROTECTION_DIS;
1410		WREG32_SMC(GENERAL_PWRMGT, tmp);
1411	}
1412}
1413
1414static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1415					   enum radeon_dpm_auto_throttle_src source,
1416					   bool enable)
1417{
1418	struct ci_power_info *pi = ci_get_pi(rdev);
1419
1420	if (enable) {
1421		if (!(pi->active_auto_throttle_sources & (1 << source))) {
1422			pi->active_auto_throttle_sources |= 1 << source;
1423			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1424		}
1425	} else {
1426		if (pi->active_auto_throttle_sources & (1 << source)) {
1427			pi->active_auto_throttle_sources &= ~(1 << source);
1428			ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1429		}
1430	}
1431}
1432
1433static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1434{
1435	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1436		ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1437}
1438
1439static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1440{
1441	struct ci_power_info *pi = ci_get_pi(rdev);
1442	PPSMC_Result smc_result;
1443
1444	if (!pi->need_update_smu7_dpm_table)
1445		return 0;
1446
1447	if ((!pi->sclk_dpm_key_disabled) &&
1448	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1449		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1450		if (smc_result != PPSMC_Result_OK)
1451			return -EINVAL;
1452	}
1453
1454	if ((!pi->mclk_dpm_key_disabled) &&
1455	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1456		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1457		if (smc_result != PPSMC_Result_OK)
1458			return -EINVAL;
1459	}
1460
1461	pi->need_update_smu7_dpm_table = 0;
1462	return 0;
1463}
1464
1465static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1466{
1467	struct ci_power_info *pi = ci_get_pi(rdev);
1468	PPSMC_Result smc_result;
1469
1470	if (enable) {
1471		if (!pi->sclk_dpm_key_disabled) {
1472			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1473			if (smc_result != PPSMC_Result_OK)
1474				return -EINVAL;
1475		}
1476
1477		if (!pi->mclk_dpm_key_disabled) {
1478			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1479			if (smc_result != PPSMC_Result_OK)
1480				return -EINVAL;
1481
1482			WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1483
1484			WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1485			WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1486			WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1487
1488			udelay(10);
1489
1490			WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1491			WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1492			WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1493		}
1494	} else {
1495		if (!pi->sclk_dpm_key_disabled) {
1496			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1497			if (smc_result != PPSMC_Result_OK)
1498				return -EINVAL;
1499		}
1500
1501		if (!pi->mclk_dpm_key_disabled) {
1502			smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1503			if (smc_result != PPSMC_Result_OK)
1504				return -EINVAL;
1505		}
1506	}
1507
1508	return 0;
1509}
1510
1511static int ci_start_dpm(struct radeon_device *rdev)
1512{
1513	struct ci_power_info *pi = ci_get_pi(rdev);
1514	PPSMC_Result smc_result;
1515	int ret;
1516	u32 tmp;
1517
1518	tmp = RREG32_SMC(GENERAL_PWRMGT);
1519	tmp |= GLOBAL_PWRMGT_EN;
1520	WREG32_SMC(GENERAL_PWRMGT, tmp);
1521
1522	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1523	tmp |= DYNAMIC_PM_EN;
1524	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1525
1526	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1527
1528	WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1529
1530	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1531	if (smc_result != PPSMC_Result_OK)
1532		return -EINVAL;
1533
1534	ret = ci_enable_sclk_mclk_dpm(rdev, true);
1535	if (ret)
1536		return ret;
1537
1538	if (!pi->pcie_dpm_key_disabled) {
1539		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1540		if (smc_result != PPSMC_Result_OK)
1541			return -EINVAL;
1542	}
1543
1544	return 0;
1545}
1546
1547static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1548{
1549	struct ci_power_info *pi = ci_get_pi(rdev);
1550	PPSMC_Result smc_result;
1551
1552	if (!pi->need_update_smu7_dpm_table)
1553		return 0;
1554
1555	if ((!pi->sclk_dpm_key_disabled) &&
1556	    (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1557		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1558		if (smc_result != PPSMC_Result_OK)
1559			return -EINVAL;
1560	}
1561
1562	if ((!pi->mclk_dpm_key_disabled) &&
1563	    (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1564		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1565		if (smc_result != PPSMC_Result_OK)
1566			return -EINVAL;
1567	}
1568
1569	return 0;
1570}
1571
1572static int ci_stop_dpm(struct radeon_device *rdev)
1573{
1574	struct ci_power_info *pi = ci_get_pi(rdev);
1575	PPSMC_Result smc_result;
1576	int ret;
1577	u32 tmp;
1578
1579	tmp = RREG32_SMC(GENERAL_PWRMGT);
1580	tmp &= ~GLOBAL_PWRMGT_EN;
1581	WREG32_SMC(GENERAL_PWRMGT, tmp);
1582
1583	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1584	tmp &= ~DYNAMIC_PM_EN;
1585	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1586
1587	if (!pi->pcie_dpm_key_disabled) {
1588		smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1589		if (smc_result != PPSMC_Result_OK)
1590			return -EINVAL;
1591	}
1592
1593	ret = ci_enable_sclk_mclk_dpm(rdev, false);
1594	if (ret)
1595		return ret;
1596
1597	smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1598	if (smc_result != PPSMC_Result_OK)
1599		return -EINVAL;
1600
1601	return 0;
1602}
1603
1604static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1605{
1606	u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1607
1608	if (enable)
1609		tmp &= ~SCLK_PWRMGT_OFF;
1610	else
1611		tmp |= SCLK_PWRMGT_OFF;
1612	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1613}
1614
1615#if 0
1616static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1617					bool ac_power)
1618{
1619	struct ci_power_info *pi = ci_get_pi(rdev);
1620	struct radeon_cac_tdp_table *cac_tdp_table =
1621		rdev->pm.dpm.dyn_state.cac_tdp_table;
1622	u32 power_limit;
1623
1624	if (ac_power)
1625		power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1626	else
1627		power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1628
1629	ci_set_power_limit(rdev, power_limit);
1630
1631	if (pi->caps_automatic_dc_transition) {
1632		if (ac_power)
1633			ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1634		else
1635			ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1636	}
1637
1638	return 0;
1639}
1640#endif
1641
1642static PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
1643{
1644	u32 tmp;
1645	int i;
1646
1647	if (!ci_is_smc_running(rdev))
1648		return PPSMC_Result_Failed;
1649
1650	WREG32(SMC_MESSAGE_0, msg);
1651
1652	for (i = 0; i < rdev->usec_timeout; i++) {
1653		tmp = RREG32(SMC_RESP_0);
1654		if (tmp != 0)
1655			break;
1656		udelay(1);
1657	}
1658	tmp = RREG32(SMC_RESP_0);
1659
1660	return (PPSMC_Result)tmp;
1661}
1662
1663static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1664						      PPSMC_Msg msg, u32 parameter)
1665{
1666	WREG32(SMC_MSG_ARG_0, parameter);
1667	return ci_send_msg_to_smc(rdev, msg);
1668}
1669
1670static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1671							PPSMC_Msg msg, u32 *parameter)
1672{
1673	PPSMC_Result smc_result;
1674
1675	smc_result = ci_send_msg_to_smc(rdev, msg);
1676
1677	if ((smc_result == PPSMC_Result_OK) && parameter)
1678		*parameter = RREG32(SMC_MSG_ARG_0);
1679
1680	return smc_result;
1681}
1682
1683static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1684{
1685	struct ci_power_info *pi = ci_get_pi(rdev);
1686
1687	if (!pi->sclk_dpm_key_disabled) {
1688		PPSMC_Result smc_result =
1689			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1690		if (smc_result != PPSMC_Result_OK)
1691			return -EINVAL;
1692	}
1693
1694	return 0;
1695}
1696
1697static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1698{
1699	struct ci_power_info *pi = ci_get_pi(rdev);
1700
1701	if (!pi->mclk_dpm_key_disabled) {
1702		PPSMC_Result smc_result =
1703			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1704		if (smc_result != PPSMC_Result_OK)
1705			return -EINVAL;
1706	}
1707
1708	return 0;
1709}
1710
1711static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1712{
1713	struct ci_power_info *pi = ci_get_pi(rdev);
1714
1715	if (!pi->pcie_dpm_key_disabled) {
1716		PPSMC_Result smc_result =
1717			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1718		if (smc_result != PPSMC_Result_OK)
1719			return -EINVAL;
1720	}
1721
1722	return 0;
1723}
1724
1725static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1726{
1727	struct ci_power_info *pi = ci_get_pi(rdev);
1728
1729	if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1730		PPSMC_Result smc_result =
1731			ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1732		if (smc_result != PPSMC_Result_OK)
1733			return -EINVAL;
1734	}
1735
1736	return 0;
1737}
1738
1739static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1740				       u32 target_tdp)
1741{
1742	PPSMC_Result smc_result =
1743		ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1744	if (smc_result != PPSMC_Result_OK)
1745		return -EINVAL;
1746	return 0;
1747}
1748
1749#if 0
1750static int ci_set_boot_state(struct radeon_device *rdev)
1751{
1752	return ci_enable_sclk_mclk_dpm(rdev, false);
1753}
1754#endif
1755
1756static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1757{
1758	u32 sclk_freq;
1759	PPSMC_Result smc_result =
1760		ci_send_msg_to_smc_return_parameter(rdev,
1761						    PPSMC_MSG_API_GetSclkFrequency,
1762						    &sclk_freq);
1763	if (smc_result != PPSMC_Result_OK)
1764		sclk_freq = 0;
1765
1766	return sclk_freq;
1767}
1768
1769static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1770{
1771	u32 mclk_freq;
1772	PPSMC_Result smc_result =
1773		ci_send_msg_to_smc_return_parameter(rdev,
1774						    PPSMC_MSG_API_GetMclkFrequency,
1775						    &mclk_freq);
1776	if (smc_result != PPSMC_Result_OK)
1777		mclk_freq = 0;
1778
1779	return mclk_freq;
1780}
1781
1782static void ci_dpm_start_smc(struct radeon_device *rdev)
1783{
1784	int i;
1785
1786	ci_program_jump_on_start(rdev);
1787	ci_start_smc_clock(rdev);
1788	ci_start_smc(rdev);
1789	for (i = 0; i < rdev->usec_timeout; i++) {
1790		if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1791			break;
1792	}
1793}
1794
1795static void ci_dpm_stop_smc(struct radeon_device *rdev)
1796{
1797	ci_reset_smc(rdev);
1798	ci_stop_smc_clock(rdev);
1799}
1800
1801static int ci_process_firmware_header(struct radeon_device *rdev)
1802{
1803	struct ci_power_info *pi = ci_get_pi(rdev);
1804	u32 tmp;
1805	int ret;
1806
1807	ret = ci_read_smc_sram_dword(rdev,
1808				     SMU7_FIRMWARE_HEADER_LOCATION +
1809				     offsetof(SMU7_Firmware_Header, DpmTable),
1810				     &tmp, pi->sram_end);
1811	if (ret)
1812		return ret;
1813
1814	pi->dpm_table_start = tmp;
1815
1816	ret = ci_read_smc_sram_dword(rdev,
1817				     SMU7_FIRMWARE_HEADER_LOCATION +
1818				     offsetof(SMU7_Firmware_Header, SoftRegisters),
1819				     &tmp, pi->sram_end);
1820	if (ret)
1821		return ret;
1822
1823	pi->soft_regs_start = tmp;
1824
1825	ret = ci_read_smc_sram_dword(rdev,
1826				     SMU7_FIRMWARE_HEADER_LOCATION +
1827				     offsetof(SMU7_Firmware_Header, mcRegisterTable),
1828				     &tmp, pi->sram_end);
1829	if (ret)
1830		return ret;
1831
1832	pi->mc_reg_table_start = tmp;
1833
1834	ret = ci_read_smc_sram_dword(rdev,
1835				     SMU7_FIRMWARE_HEADER_LOCATION +
1836				     offsetof(SMU7_Firmware_Header, FanTable),
1837				     &tmp, pi->sram_end);
1838	if (ret)
1839		return ret;
1840
1841	pi->fan_table_start = tmp;
1842
1843	ret = ci_read_smc_sram_dword(rdev,
1844				     SMU7_FIRMWARE_HEADER_LOCATION +
1845				     offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1846				     &tmp, pi->sram_end);
1847	if (ret)
1848		return ret;
1849
1850	pi->arb_table_start = tmp;
1851
1852	return 0;
1853}
1854
1855static void ci_read_clock_registers(struct radeon_device *rdev)
1856{
1857	struct ci_power_info *pi = ci_get_pi(rdev);
1858
1859	pi->clock_registers.cg_spll_func_cntl =
1860		RREG32_SMC(CG_SPLL_FUNC_CNTL);
1861	pi->clock_registers.cg_spll_func_cntl_2 =
1862		RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1863	pi->clock_registers.cg_spll_func_cntl_3 =
1864		RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1865	pi->clock_registers.cg_spll_func_cntl_4 =
1866		RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1867	pi->clock_registers.cg_spll_spread_spectrum =
1868		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1869	pi->clock_registers.cg_spll_spread_spectrum_2 =
1870		RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1871	pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1872	pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1873	pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1874	pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1875	pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1876	pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1877	pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1878	pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1879	pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1880}
1881
1882static void ci_init_sclk_t(struct radeon_device *rdev)
1883{
1884	struct ci_power_info *pi = ci_get_pi(rdev);
1885
1886	pi->low_sclk_interrupt_t = 0;
1887}
1888
1889static void ci_enable_thermal_protection(struct radeon_device *rdev,
1890					 bool enable)
1891{
1892	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1893
1894	if (enable)
1895		tmp &= ~THERMAL_PROTECTION_DIS;
1896	else
1897		tmp |= THERMAL_PROTECTION_DIS;
1898	WREG32_SMC(GENERAL_PWRMGT, tmp);
1899}
1900
1901static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1902{
1903	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1904
1905	tmp |= STATIC_PM_EN;
1906
1907	WREG32_SMC(GENERAL_PWRMGT, tmp);
1908}
1909
1910#if 0
1911static int ci_enter_ulp_state(struct radeon_device *rdev)
1912{
1913
1914	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1915
1916	udelay(25000);
1917
1918	return 0;
1919}
1920
1921static int ci_exit_ulp_state(struct radeon_device *rdev)
1922{
1923	int i;
1924
1925	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1926
1927	udelay(7000);
1928
1929	for (i = 0; i < rdev->usec_timeout; i++) {
1930		if (RREG32(SMC_RESP_0) == 1)
1931			break;
1932		udelay(1000);
1933	}
1934
1935	return 0;
1936}
1937#endif
1938
1939static int ci_notify_smc_display_change(struct radeon_device *rdev,
1940					bool has_display)
1941{
1942	PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1943
1944	return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
1945}
1946
1947static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1948				      bool enable)
1949{
1950	struct ci_power_info *pi = ci_get_pi(rdev);
1951
1952	if (enable) {
1953		if (pi->caps_sclk_ds) {
1954			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1955				return -EINVAL;
1956		} else {
1957			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1958				return -EINVAL;
1959		}
1960	} else {
1961		if (pi->caps_sclk_ds) {
1962			if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1963				return -EINVAL;
1964		}
1965	}
1966
1967	return 0;
1968}
1969
1970static void ci_program_display_gap(struct radeon_device *rdev)
1971{
1972	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1973	u32 pre_vbi_time_in_us;
1974	u32 frame_time_in_us;
1975	u32 ref_clock = rdev->clock.spll.reference_freq;
1976	u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1977	u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1978
1979	tmp &= ~DISP_GAP_MASK;
1980	if (rdev->pm.dpm.new_active_crtc_count > 0)
1981		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1982	else
1983		tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1984	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1985
1986	if (refresh_rate == 0)
1987		refresh_rate = 60;
1988	if (vblank_time == 0xffffffff)
1989		vblank_time = 500;
1990	frame_time_in_us = 1000000 / refresh_rate;
1991	pre_vbi_time_in_us =
1992		frame_time_in_us - 200 - vblank_time;
1993	tmp = pre_vbi_time_in_us * (ref_clock / 100);
1994
1995	WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1996	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1997	ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1998
1999
2000	ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
2001
2002}
2003
2004static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
2005{
2006	struct ci_power_info *pi = ci_get_pi(rdev);
2007	u32 tmp;
2008
2009	if (enable) {
2010		if (pi->caps_sclk_ss_support) {
2011			tmp = RREG32_SMC(GENERAL_PWRMGT);
2012			tmp |= DYN_SPREAD_SPECTRUM_EN;
2013			WREG32_SMC(GENERAL_PWRMGT, tmp);
2014		}
2015	} else {
2016		tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
2017		tmp &= ~SSEN;
2018		WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
2019
2020		tmp = RREG32_SMC(GENERAL_PWRMGT);
2021		tmp &= ~DYN_SPREAD_SPECTRUM_EN;
2022		WREG32_SMC(GENERAL_PWRMGT, tmp);
2023	}
2024}
2025
2026static void ci_program_sstp(struct radeon_device *rdev)
2027{
2028	WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
2029}
2030
2031static void ci_enable_display_gap(struct radeon_device *rdev)
2032{
2033	u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
2034
2035	tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
2036	tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
2037		DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
2038
2039	WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
2040}
2041
2042static void ci_program_vc(struct radeon_device *rdev)
2043{
2044	u32 tmp;
2045
2046	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2047	tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
2048	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2049
2050	WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
2051	WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
2052	WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
2053	WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
2054	WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
2055	WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
2056	WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
2057	WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
2058}
2059
2060static void ci_clear_vc(struct radeon_device *rdev)
2061{
2062	u32 tmp;
2063
2064	tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
2065	tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
2066	WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
2067
2068	WREG32_SMC(CG_FTV_0, 0);
2069	WREG32_SMC(CG_FTV_1, 0);
2070	WREG32_SMC(CG_FTV_2, 0);
2071	WREG32_SMC(CG_FTV_3, 0);
2072	WREG32_SMC(CG_FTV_4, 0);
2073	WREG32_SMC(CG_FTV_5, 0);
2074	WREG32_SMC(CG_FTV_6, 0);
2075	WREG32_SMC(CG_FTV_7, 0);
2076}
2077
2078static int ci_upload_firmware(struct radeon_device *rdev)
2079{
2080	struct ci_power_info *pi = ci_get_pi(rdev);
2081	int i, ret;
2082
2083	for (i = 0; i < rdev->usec_timeout; i++) {
2084		if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
2085			break;
2086	}
2087	WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
2088
2089	ci_stop_smc_clock(rdev);
2090	ci_reset_smc(rdev);
2091
2092	ret = ci_load_smc_ucode(rdev, pi->sram_end);
2093
2094	return ret;
2095
2096}
2097
2098static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
2099				     struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
2100				     struct atom_voltage_table *voltage_table)
2101{
2102	u32 i;
2103
2104	if (voltage_dependency_table == NULL)
2105		return -EINVAL;
2106
2107	voltage_table->mask_low = 0;
2108	voltage_table->phase_delay = 0;
2109
2110	voltage_table->count = voltage_dependency_table->count;
2111	for (i = 0; i < voltage_table->count; i++) {
2112		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2113		voltage_table->entries[i].smio_low = 0;
2114	}
2115
2116	return 0;
2117}
2118
2119static int ci_construct_voltage_tables(struct radeon_device *rdev)
2120{
2121	struct ci_power_info *pi = ci_get_pi(rdev);
2122	int ret;
2123
2124	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2125		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
2126						    VOLTAGE_OBJ_GPIO_LUT,
2127						    &pi->vddc_voltage_table);
2128		if (ret)
2129			return ret;
2130	} else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2131		ret = ci_get_svi2_voltage_table(rdev,
2132						&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2133						&pi->vddc_voltage_table);
2134		if (ret)
2135			return ret;
2136	}
2137
2138	if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2139		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
2140							 &pi->vddc_voltage_table);
2141
2142	if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2143		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
2144						    VOLTAGE_OBJ_GPIO_LUT,
2145						    &pi->vddci_voltage_table);
2146		if (ret)
2147			return ret;
2148	} else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2149		ret = ci_get_svi2_voltage_table(rdev,
2150						&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2151						&pi->vddci_voltage_table);
2152		if (ret)
2153			return ret;
2154	}
2155
2156	if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2157		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
2158							 &pi->vddci_voltage_table);
2159
2160	if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2161		ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
2162						    VOLTAGE_OBJ_GPIO_LUT,
2163						    &pi->mvdd_voltage_table);
2164		if (ret)
2165			return ret;
2166	} else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2167		ret = ci_get_svi2_voltage_table(rdev,
2168						&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2169						&pi->mvdd_voltage_table);
2170		if (ret)
2171			return ret;
2172	}
2173
2174	if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2175		si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
2176							 &pi->mvdd_voltage_table);
2177
2178	return 0;
2179}
2180
2181static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
2182					  struct atom_voltage_table_entry *voltage_table,
2183					  SMU7_Discrete_VoltageLevel *smc_voltage_table)
2184{
2185	int ret;
2186
2187	ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
2188					    &smc_voltage_table->StdVoltageHiSidd,
2189					    &smc_voltage_table->StdVoltageLoSidd);
2190
2191	if (ret) {
2192		smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2193		smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2194	}
2195
2196	smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2197	smc_voltage_table->StdVoltageHiSidd =
2198		cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2199	smc_voltage_table->StdVoltageLoSidd =
2200		cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2201}
2202
2203static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
2204				      SMU7_Discrete_DpmTable *table)
2205{
2206	struct ci_power_info *pi = ci_get_pi(rdev);
2207	unsigned int count;
2208
2209	table->VddcLevelCount = pi->vddc_voltage_table.count;
2210	for (count = 0; count < table->VddcLevelCount; count++) {
2211		ci_populate_smc_voltage_table(rdev,
2212					      &pi->vddc_voltage_table.entries[count],
2213					      &table->VddcLevel[count]);
2214
2215		if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2216			table->VddcLevel[count].Smio |=
2217				pi->vddc_voltage_table.entries[count].smio_low;
2218		else
2219			table->VddcLevel[count].Smio = 0;
2220	}
2221	table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2222
2223	return 0;
2224}
2225
2226static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
2227				       SMU7_Discrete_DpmTable *table)
2228{
2229	unsigned int count;
2230	struct ci_power_info *pi = ci_get_pi(rdev);
2231
2232	table->VddciLevelCount = pi->vddci_voltage_table.count;
2233	for (count = 0; count < table->VddciLevelCount; count++) {
2234		ci_populate_smc_voltage_table(rdev,
2235					      &pi->vddci_voltage_table.entries[count],
2236					      &table->VddciLevel[count]);
2237
2238		if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2239			table->VddciLevel[count].Smio |=
2240				pi->vddci_voltage_table.entries[count].smio_low;
2241		else
2242			table->VddciLevel[count].Smio = 0;
2243	}
2244	table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2245
2246	return 0;
2247}
2248
2249static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
2250				      SMU7_Discrete_DpmTable *table)
2251{
2252	struct ci_power_info *pi = ci_get_pi(rdev);
2253	unsigned int count;
2254
2255	table->MvddLevelCount = pi->mvdd_voltage_table.count;
2256	for (count = 0; count < table->MvddLevelCount; count++) {
2257		ci_populate_smc_voltage_table(rdev,
2258					      &pi->mvdd_voltage_table.entries[count],
2259					      &table->MvddLevel[count]);
2260
2261		if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2262			table->MvddLevel[count].Smio |=
2263				pi->mvdd_voltage_table.entries[count].smio_low;
2264		else
2265			table->MvddLevel[count].Smio = 0;
2266	}
2267	table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2268
2269	return 0;
2270}
2271
2272static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
2273					  SMU7_Discrete_DpmTable *table)
2274{
2275	int ret;
2276
2277	ret = ci_populate_smc_vddc_table(rdev, table);
2278	if (ret)
2279		return ret;
2280
2281	ret = ci_populate_smc_vddci_table(rdev, table);
2282	if (ret)
2283		return ret;
2284
2285	ret = ci_populate_smc_mvdd_table(rdev, table);
2286	if (ret)
2287		return ret;
2288
2289	return 0;
2290}
2291
2292static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
2293				  SMU7_Discrete_VoltageLevel *voltage)
2294{
2295	struct ci_power_info *pi = ci_get_pi(rdev);
2296	u32 i = 0;
2297
2298	if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2299		for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2300			if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2301				voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2302				break;
2303			}
2304		}
2305
2306		if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2307			return -EINVAL;
2308	}
2309
2310	return -EINVAL;
2311}
2312
2313static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
2314					 struct atom_voltage_table_entry *voltage_table,
2315					 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2316{
2317	u16 v_index, idx;
2318	bool voltage_found = false;
2319	*std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2320	*std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2321
2322	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2323		return -EINVAL;
2324
2325	if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2326		for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2327			if (voltage_table->value ==
2328			    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2329				voltage_found = true;
2330				if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2331					idx = v_index;
2332				else
2333					idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2334				*std_voltage_lo_sidd =
2335					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2336				*std_voltage_hi_sidd =
2337					rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2338				break;
2339			}
2340		}
2341
2342		if (!voltage_found) {
2343			for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2344				if (voltage_table->value <=
2345				    rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2346					voltage_found = true;
2347					if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
2348						idx = v_index;
2349					else
2350						idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2351					*std_voltage_lo_sidd =
2352						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2353					*std_voltage_hi_sidd =
2354						rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2355					break;
2356				}
2357			}
2358		}
2359	}
2360
2361	return 0;
2362}
2363
2364static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
2365						  const struct radeon_phase_shedding_limits_table *limits,
2366						  u32 sclk,
2367						  u32 *phase_shedding)
2368{
2369	unsigned int i;
2370
2371	*phase_shedding = 1;
2372
2373	for (i = 0; i < limits->count; i++) {
2374		if (sclk < limits->entries[i].sclk) {
2375			*phase_shedding = i;
2376			break;
2377		}
2378	}
2379}
2380
2381static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
2382						  const struct radeon_phase_shedding_limits_table *limits,
2383						  u32 mclk,
2384						  u32 *phase_shedding)
2385{
2386	unsigned int i;
2387
2388	*phase_shedding = 1;
2389
2390	for (i = 0; i < limits->count; i++) {
2391		if (mclk < limits->entries[i].mclk) {
2392			*phase_shedding = i;
2393			break;
2394		}
2395	}
2396}
2397
2398static int ci_init_arb_table_index(struct radeon_device *rdev)
2399{
2400	struct ci_power_info *pi = ci_get_pi(rdev);
2401	u32 tmp;
2402	int ret;
2403
2404	ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2405				     &tmp, pi->sram_end);
2406	if (ret)
2407		return ret;
2408
2409	tmp &= 0x00FFFFFF;
2410	tmp |= MC_CG_ARB_FREQ_F1 << 24;
2411
2412	return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2413				       tmp, pi->sram_end);
2414}
2415
2416static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2417					 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2418					 u32 clock, u32 *voltage)
2419{
2420	u32 i = 0;
2421
2422	if (allowed_clock_voltage_table->count == 0)
2423		return -EINVAL;
2424
2425	for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2426		if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2427			*voltage = allowed_clock_voltage_table->entries[i].v;
2428			return 0;
2429		}
2430	}
2431
2432	*voltage = allowed_clock_voltage_table->entries[i-1].v;
2433
2434	return 0;
2435}
2436
2437static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2438					     u32 sclk, u32 min_sclk_in_sr)
2439{
2440	u32 i;
2441	u32 tmp;
2442	u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2443		min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2444
2445	if (sclk < min)
2446		return 0;
2447
2448	for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2449		tmp = sclk / (1 << i);
2450		if (tmp >= min || i == 0)
2451			break;
2452	}
2453
2454	return (u8)i;
2455}
2456
2457static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2458{
2459	return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2460}
2461
2462static int ci_reset_to_default(struct radeon_device *rdev)
2463{
2464	return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2465		0 : -EINVAL;
2466}
2467
2468static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2469{
2470	u32 tmp;
2471
2472	tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2473
2474	if (tmp == MC_CG_ARB_FREQ_F0)
2475		return 0;
2476
2477	return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2478}
2479
2480static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2481					const u32 engine_clock,
2482					const u32 memory_clock,
2483					u32 *dram_timimg2)
2484{
2485	bool patch;
2486	u32 tmp, tmp2;
2487
2488	tmp = RREG32(MC_SEQ_MISC0);
2489	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2490
2491	if (patch &&
2492	    ((rdev->pdev->device == 0x67B0) ||
2493	     (rdev->pdev->device == 0x67B1))) {
2494		if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2495			tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2496			*dram_timimg2 &= ~0x00ff0000;
2497			*dram_timimg2 |= tmp2 << 16;
2498		} else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2499			tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2500			*dram_timimg2 &= ~0x00ff0000;
2501			*dram_timimg2 |= tmp2 << 16;
2502		}
2503	}
2504}
2505
2506
2507static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2508						u32 sclk,
2509						u32 mclk,
2510						SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2511{
2512	u32 dram_timing;
2513	u32 dram_timing2;
2514	u32 burst_time;
2515
2516	radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2517
2518	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
2519	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2520	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2521
2522	ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2523
2524	arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2525	arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2526	arb_regs->McArbBurstTime = (u8)burst_time;
2527
2528	return 0;
2529}
2530
2531static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2532{
2533	struct ci_power_info *pi = ci_get_pi(rdev);
2534	SMU7_Discrete_MCArbDramTimingTable arb_regs;
2535	u32 i, j;
2536	int ret =  0;
2537
2538	memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2539
2540	for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2541		for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2542			ret = ci_populate_memory_timing_parameters(rdev,
2543								   pi->dpm_table.sclk_table.dpm_levels[i].value,
2544								   pi->dpm_table.mclk_table.dpm_levels[j].value,
2545								   &arb_regs.entries[i][j]);
2546			if (ret)
2547				break;
2548		}
2549	}
2550
2551	if (ret == 0)
2552		ret = ci_copy_bytes_to_smc(rdev,
2553					   pi->arb_table_start,
2554					   (u8 *)&arb_regs,
2555					   sizeof(SMU7_Discrete_MCArbDramTimingTable),
2556					   pi->sram_end);
2557
2558	return ret;
2559}
2560
2561static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2562{
2563	struct ci_power_info *pi = ci_get_pi(rdev);
2564
2565	if (pi->need_update_smu7_dpm_table == 0)
2566		return 0;
2567
2568	return ci_do_program_memory_timing_parameters(rdev);
2569}
2570
2571static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2572					  struct radeon_ps *radeon_boot_state)
2573{
2574	struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2575	struct ci_power_info *pi = ci_get_pi(rdev);
2576	u32 level = 0;
2577
2578	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2579		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2580		    boot_state->performance_levels[0].sclk) {
2581			pi->smc_state_table.GraphicsBootLevel = level;
2582			break;
2583		}
2584	}
2585
2586	for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2587		if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2588		    boot_state->performance_levels[0].mclk) {
2589			pi->smc_state_table.MemoryBootLevel = level;
2590			break;
2591		}
2592	}
2593}
2594
2595static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2596{
2597	u32 i;
2598	u32 mask_value = 0;
2599
2600	for (i = dpm_table->count; i > 0; i--) {
2601		mask_value = mask_value << 1;
2602		if (dpm_table->dpm_levels[i-1].enabled)
2603			mask_value |= 0x1;
2604		else
2605			mask_value &= 0xFFFFFFFE;
2606	}
2607
2608	return mask_value;
2609}
2610
2611static void ci_populate_smc_link_level(struct radeon_device *rdev,
2612				       SMU7_Discrete_DpmTable *table)
2613{
2614	struct ci_power_info *pi = ci_get_pi(rdev);
2615	struct ci_dpm_table *dpm_table = &pi->dpm_table;
2616	u32 i;
2617
2618	for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2619		table->LinkLevel[i].PcieGenSpeed =
2620			(u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2621		table->LinkLevel[i].PcieLaneCount =
2622			r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2623		table->LinkLevel[i].EnabledForActivity = 1;
2624		table->LinkLevel[i].DownT = cpu_to_be32(5);
2625		table->LinkLevel[i].UpT = cpu_to_be32(30);
2626	}
2627
2628	pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2629	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2630		ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2631}
2632
2633static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2634				     SMU7_Discrete_DpmTable *table)
2635{
2636	u32 count;
2637	struct atom_clock_dividers dividers;
2638	int ret = -EINVAL;
2639
2640	table->UvdLevelCount =
2641		rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2642
2643	for (count = 0; count < table->UvdLevelCount; count++) {
2644		table->UvdLevel[count].VclkFrequency =
2645			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2646		table->UvdLevel[count].DclkFrequency =
2647			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2648		table->UvdLevel[count].MinVddc =
2649			rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2650		table->UvdLevel[count].MinVddcPhases = 1;
2651
2652		ret = radeon_atom_get_clock_dividers(rdev,
2653						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2654						     table->UvdLevel[count].VclkFrequency, false, &dividers);
2655		if (ret)
2656			return ret;
2657
2658		table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2659
2660		ret = radeon_atom_get_clock_dividers(rdev,
2661						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2662						     table->UvdLevel[count].DclkFrequency, false, &dividers);
2663		if (ret)
2664			return ret;
2665
2666		table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2667
2668		table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2669		table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2670		table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2671	}
2672
2673	return ret;
2674}
2675
2676static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2677				     SMU7_Discrete_DpmTable *table)
2678{
2679	u32 count;
2680	struct atom_clock_dividers dividers;
2681	int ret = -EINVAL;
2682
2683	table->VceLevelCount =
2684		rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2685
2686	for (count = 0; count < table->VceLevelCount; count++) {
2687		table->VceLevel[count].Frequency =
2688			rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2689		table->VceLevel[count].MinVoltage =
2690			(u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2691		table->VceLevel[count].MinPhases = 1;
2692
2693		ret = radeon_atom_get_clock_dividers(rdev,
2694						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2695						     table->VceLevel[count].Frequency, false, &dividers);
2696		if (ret)
2697			return ret;
2698
2699		table->VceLevel[count].Divider = (u8)dividers.post_divider;
2700
2701		table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2702		table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2703	}
2704
2705	return ret;
2706
2707}
2708
2709static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2710				     SMU7_Discrete_DpmTable *table)
2711{
2712	u32 count;
2713	struct atom_clock_dividers dividers;
2714	int ret = -EINVAL;
2715
2716	table->AcpLevelCount = (u8)
2717		(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2718
2719	for (count = 0; count < table->AcpLevelCount; count++) {
2720		table->AcpLevel[count].Frequency =
2721			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2722		table->AcpLevel[count].MinVoltage =
2723			rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2724		table->AcpLevel[count].MinPhases = 1;
2725
2726		ret = radeon_atom_get_clock_dividers(rdev,
2727						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2728						     table->AcpLevel[count].Frequency, false, &dividers);
2729		if (ret)
2730			return ret;
2731
2732		table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2733
2734		table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2735		table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2736	}
2737
2738	return ret;
2739}
2740
2741static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2742				      SMU7_Discrete_DpmTable *table)
2743{
2744	u32 count;
2745	struct atom_clock_dividers dividers;
2746	int ret = -EINVAL;
2747
2748	table->SamuLevelCount =
2749		rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2750
2751	for (count = 0; count < table->SamuLevelCount; count++) {
2752		table->SamuLevel[count].Frequency =
2753			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2754		table->SamuLevel[count].MinVoltage =
2755			rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2756		table->SamuLevel[count].MinPhases = 1;
2757
2758		ret = radeon_atom_get_clock_dividers(rdev,
2759						     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2760						     table->SamuLevel[count].Frequency, false, &dividers);
2761		if (ret)
2762			return ret;
2763
2764		table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2765
2766		table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2767		table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2768	}
2769
2770	return ret;
2771}
2772
2773static int ci_calculate_mclk_params(struct radeon_device *rdev,
2774				    u32 memory_clock,
2775				    SMU7_Discrete_MemoryLevel *mclk,
2776				    bool strobe_mode,
2777				    bool dll_state_on)
2778{
2779	struct ci_power_info *pi = ci_get_pi(rdev);
2780	u32  dll_cntl = pi->clock_registers.dll_cntl;
2781	u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2782	u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2783	u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2784	u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2785	u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2786	u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2787	u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2788	u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2789	struct atom_mpll_param mpll_param;
2790	int ret;
2791
2792	ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2793	if (ret)
2794		return ret;
2795
2796	mpll_func_cntl &= ~BWCTRL_MASK;
2797	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2798
2799	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2800	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2801		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2802
2803	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2804	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2805
2806	if (pi->mem_gddr5) {
2807		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2808		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2809			YCLK_POST_DIV(mpll_param.post_div);
2810	}
2811
2812	if (pi->caps_mclk_ss_support) {
2813		struct radeon_atom_ss ss;
2814		u32 freq_nom;
2815		u32 tmp;
2816		u32 reference_clock = rdev->clock.mpll.reference_freq;
2817
2818		if (mpll_param.qdr == 1)
2819			freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2820		else
2821			freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2822
2823		tmp = (freq_nom / reference_clock);
2824		tmp = tmp * tmp;
2825		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2826						     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2827			u32 clks = reference_clock * 5 / ss.rate;
2828			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2829
2830			mpll_ss1 &= ~CLKV_MASK;
2831			mpll_ss1 |= CLKV(clkv);
2832
2833			mpll_ss2 &= ~CLKS_MASK;
2834			mpll_ss2 |= CLKS(clks);
2835		}
2836	}
2837
2838	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2839	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2840
2841	if (dll_state_on)
2842		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2843	else
2844		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2845
2846	mclk->MclkFrequency = memory_clock;
2847	mclk->MpllFuncCntl = mpll_func_cntl;
2848	mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2849	mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2850	mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2851	mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2852	mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2853	mclk->DllCntl = dll_cntl;
2854	mclk->MpllSs1 = mpll_ss1;
2855	mclk->MpllSs2 = mpll_ss2;
2856
2857	return 0;
2858}
2859
2860static int ci_populate_single_memory_level(struct radeon_device *rdev,
2861					   u32 memory_clock,
2862					   SMU7_Discrete_MemoryLevel *memory_level)
2863{
2864	struct ci_power_info *pi = ci_get_pi(rdev);
2865	int ret;
2866	bool dll_state_on;
2867
2868	if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2869		ret = ci_get_dependency_volt_by_clk(rdev,
2870						    &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2871						    memory_clock, &memory_level->MinVddc);
2872		if (ret)
2873			return ret;
2874	}
2875
2876	if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2877		ret = ci_get_dependency_volt_by_clk(rdev,
2878						    &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2879						    memory_clock, &memory_level->MinVddci);
2880		if (ret)
2881			return ret;
2882	}
2883
2884	if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2885		ret = ci_get_dependency_volt_by_clk(rdev,
2886						    &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2887						    memory_clock, &memory_level->MinMvdd);
2888		if (ret)
2889			return ret;
2890	}
2891
2892	memory_level->MinVddcPhases = 1;
2893
2894	if (pi->vddc_phase_shed_control)
2895		ci_populate_phase_value_based_on_mclk(rdev,
2896						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2897						      memory_clock,
2898						      &memory_level->MinVddcPhases);
2899
2900	memory_level->EnabledForThrottle = 1;
2901	memory_level->UpH = 0;
2902	memory_level->DownH = 100;
2903	memory_level->VoltageDownH = 0;
2904	memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2905
2906	memory_level->StutterEnable = false;
2907	memory_level->StrobeEnable = false;
2908	memory_level->EdcReadEnable = false;
2909	memory_level->EdcWriteEnable = false;
2910	memory_level->RttEnable = false;
2911
2912	memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2913
2914	if (pi->mclk_stutter_mode_threshold &&
2915	    (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2916	    (pi->uvd_enabled == false) &&
2917	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2918	    (rdev->pm.dpm.new_active_crtc_count <= 2))
2919		memory_level->StutterEnable = true;
2920
2921	if (pi->mclk_strobe_mode_threshold &&
2922	    (memory_clock <= pi->mclk_strobe_mode_threshold))
2923		memory_level->StrobeEnable = 1;
2924
2925	if (pi->mem_gddr5) {
2926		memory_level->StrobeRatio =
2927			si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2928		if (pi->mclk_edc_enable_threshold &&
2929		    (memory_clock > pi->mclk_edc_enable_threshold))
2930			memory_level->EdcReadEnable = true;
2931
2932		if (pi->mclk_edc_wr_enable_threshold &&
2933		    (memory_clock > pi->mclk_edc_wr_enable_threshold))
2934			memory_level->EdcWriteEnable = true;
2935
2936		if (memory_level->StrobeEnable) {
2937			if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2938			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2939				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2940			else
2941				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2942		} else {
2943			dll_state_on = pi->dll_default_on;
2944		}
2945	} else {
2946		memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2947		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2948	}
2949
2950	ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2951	if (ret)
2952		return ret;
2953
2954	memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2955	memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2956	memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2957	memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2958
2959	memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2960	memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2961	memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2962	memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2963	memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2964	memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2965	memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2966	memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2967	memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2968	memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2969	memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2970
2971	return 0;
2972}
2973
2974static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2975				      SMU7_Discrete_DpmTable *table)
2976{
2977	struct ci_power_info *pi = ci_get_pi(rdev);
2978	struct atom_clock_dividers dividers;
2979	SMU7_Discrete_VoltageLevel voltage_level;
2980	u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2981	u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2982	u32 dll_cntl = pi->clock_registers.dll_cntl;
2983	u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2984	int ret;
2985
2986	table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2987
2988	if (pi->acpi_vddc)
2989		table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2990	else
2991		table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2992
2993	table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2994
2995	table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2996
2997	ret = radeon_atom_get_clock_dividers(rdev,
2998					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2999					     table->ACPILevel.SclkFrequency, false, &dividers);
3000	if (ret)
3001		return ret;
3002
3003	table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3004	table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3005	table->ACPILevel.DeepSleepDivId = 0;
3006
3007	spll_func_cntl &= ~SPLL_PWRON;
3008	spll_func_cntl |= SPLL_RESET;
3009
3010	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
3011	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
3012
3013	table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3014	table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3015	table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3016	table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3017	table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3018	table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3019	table->ACPILevel.CcPwrDynRm = 0;
3020	table->ACPILevel.CcPwrDynRm1 = 0;
3021
3022	table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3023	table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3024	table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3025	table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3026	table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3027	table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3028	table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3029	table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3030	table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3031	table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3032	table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3033
3034	table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3035	table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3036
3037	if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3038		if (pi->acpi_vddci)
3039			table->MemoryACPILevel.MinVddci =
3040				cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3041		else
3042			table->MemoryACPILevel.MinVddci =
3043				cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3044	}
3045
3046	if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
3047		table->MemoryACPILevel.MinMvdd = 0;
3048	else
3049		table->MemoryACPILevel.MinMvdd =
3050			cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3051
3052	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
3053	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
3054
3055	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
3056
3057	table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3058	table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3059	table->MemoryACPILevel.MpllAdFuncCntl =
3060		cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3061	table->MemoryACPILevel.MpllDqFuncCntl =
3062		cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3063	table->MemoryACPILevel.MpllFuncCntl =
3064		cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3065	table->MemoryACPILevel.MpllFuncCntl_1 =
3066		cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3067	table->MemoryACPILevel.MpllFuncCntl_2 =
3068		cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3069	table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3070	table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3071
3072	table->MemoryACPILevel.EnabledForThrottle = 0;
3073	table->MemoryACPILevel.EnabledForActivity = 0;
3074	table->MemoryACPILevel.UpH = 0;
3075	table->MemoryACPILevel.DownH = 100;
3076	table->MemoryACPILevel.VoltageDownH = 0;
3077	table->MemoryACPILevel.ActivityLevel =
3078		cpu_to_be16((u16)pi->mclk_activity_target);
3079
3080	table->MemoryACPILevel.StutterEnable = false;
3081	table->MemoryACPILevel.StrobeEnable = false;
3082	table->MemoryACPILevel.EdcReadEnable = false;
3083	table->MemoryACPILevel.EdcWriteEnable = false;
3084	table->MemoryACPILevel.RttEnable = false;
3085
3086	return 0;
3087}
3088
3089
3090static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
3091{
3092	struct ci_power_info *pi = ci_get_pi(rdev);
3093	struct ci_ulv_parm *ulv = &pi->ulv;
3094
3095	if (ulv->supported) {
3096		if (enable)
3097			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3098				0 : -EINVAL;
3099		else
3100			return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3101				0 : -EINVAL;
3102	}
3103
3104	return 0;
3105}
3106
3107static int ci_populate_ulv_level(struct radeon_device *rdev,
3108				 SMU7_Discrete_Ulv *state)
3109{
3110	struct ci_power_info *pi = ci_get_pi(rdev);
3111	u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
3112
3113	state->CcPwrDynRm = 0;
3114	state->CcPwrDynRm1 = 0;
3115
3116	if (ulv_voltage == 0) {
3117		pi->ulv.supported = false;
3118		return 0;
3119	}
3120
3121	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3122		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3123			state->VddcOffset = 0;
3124		else
3125			state->VddcOffset =
3126				rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3127	} else {
3128		if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3129			state->VddcOffsetVid = 0;
3130		else
3131			state->VddcOffsetVid = (u8)
3132				((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3133				 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3134	}
3135	state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3136
3137	state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3138	state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3139	state->VddcOffset = cpu_to_be16(state->VddcOffset);
3140
3141	return 0;
3142}
3143
3144static int ci_calculate_sclk_params(struct radeon_device *rdev,
3145				    u32 engine_clock,
3146				    SMU7_Discrete_GraphicsLevel *sclk)
3147{
3148	struct ci_power_info *pi = ci_get_pi(rdev);
3149	struct atom_clock_dividers dividers;
3150	u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3151	u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3152	u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3153	u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3154	u32 reference_clock = rdev->clock.spll.reference_freq;
3155	u32 reference_divider;
3156	u32 fbdiv;
3157	int ret;
3158
3159	ret = radeon_atom_get_clock_dividers(rdev,
3160					     COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3161					     engine_clock, false, &dividers);
3162	if (ret)
3163		return ret;
3164
3165	reference_divider = 1 + dividers.ref_div;
3166	fbdiv = dividers.fb_div & 0x3FFFFFF;
3167
3168	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
3169	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
3170	spll_func_cntl_3 |= SPLL_DITHEN;
3171
3172	if (pi->caps_sclk_ss_support) {
3173		struct radeon_atom_ss ss;
3174		u32 vco_freq = engine_clock * dividers.post_div;
3175
3176		if (radeon_atombios_get_asic_ss_info(rdev, &ss,
3177						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3178			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3179			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3180
3181			cg_spll_spread_spectrum &= ~CLK_S_MASK;
3182			cg_spll_spread_spectrum |= CLK_S(clk_s);
3183			cg_spll_spread_spectrum |= SSEN;
3184
3185			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
3186			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
3187		}
3188	}
3189
3190	sclk->SclkFrequency = engine_clock;
3191	sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3192	sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3193	sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3194	sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3195	sclk->SclkDid = (u8)dividers.post_divider;
3196
3197	return 0;
3198}
3199
3200static int ci_populate_single_graphic_level(struct radeon_device *rdev,
3201					    u32 engine_clock,
3202					    u16 sclk_activity_level_t,
3203					    SMU7_Discrete_GraphicsLevel *graphic_level)
3204{
3205	struct ci_power_info *pi = ci_get_pi(rdev);
3206	int ret;
3207
3208	ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
3209	if (ret)
3210		return ret;
3211
3212	ret = ci_get_dependency_volt_by_clk(rdev,
3213					    &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3214					    engine_clock, &graphic_level->MinVddc);
3215	if (ret)
3216		return ret;
3217
3218	graphic_level->SclkFrequency = engine_clock;
3219
3220	graphic_level->Flags =  0;
3221	graphic_level->MinVddcPhases = 1;
3222
3223	if (pi->vddc_phase_shed_control)
3224		ci_populate_phase_value_based_on_sclk(rdev,
3225						      &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
3226						      engine_clock,
3227						      &graphic_level->MinVddcPhases);
3228
3229	graphic_level->ActivityLevel = sclk_activity_level_t;
3230
3231	graphic_level->CcPwrDynRm = 0;
3232	graphic_level->CcPwrDynRm1 = 0;
3233	graphic_level->EnabledForThrottle = 1;
3234	graphic_level->UpH = 0;
3235	graphic_level->DownH = 0;
3236	graphic_level->VoltageDownH = 0;
3237	graphic_level->PowerThrottle = 0;
3238
3239	if (pi->caps_sclk_ds)
3240		graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
3241										   engine_clock,
3242										   CISLAND_MINIMUM_ENGINE_CLOCK);
3243
3244	graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3245
3246	graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3247	graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3248	graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3249	graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3250	graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3251	graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3252	graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3253	graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3254	graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3255	graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3256	graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3257
3258	return 0;
3259}
3260
3261static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
3262{
3263	struct ci_power_info *pi = ci_get_pi(rdev);
3264	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3265	u32 level_array_address = pi->dpm_table_start +
3266		offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3267	u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3268		SMU7_MAX_LEVELS_GRAPHICS;
3269	SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3270	u32 i, ret;
3271
3272	memset(levels, 0, level_array_size);
3273
3274	for (i = 0; i < dpm_table->sclk_table.count; i++) {
3275		ret = ci_populate_single_graphic_level(rdev,
3276						       dpm_table->sclk_table.dpm_levels[i].value,
3277						       (u16)pi->activity_target[i],
3278						       &pi->smc_state_table.GraphicsLevel[i]);
3279		if (ret)
3280			return ret;
3281		if (i > 1)
3282			pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3283		if (i == (dpm_table->sclk_table.count - 1))
3284			pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3285				PPSMC_DISPLAY_WATERMARK_HIGH;
3286	}
3287	pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3288
3289	pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3290	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3291		ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3292
3293	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3294				   (u8 *)levels, level_array_size,
3295				   pi->sram_end);
3296	if (ret)
3297		return ret;
3298
3299	return 0;
3300}
3301
3302static int ci_populate_ulv_state(struct radeon_device *rdev,
3303				 SMU7_Discrete_Ulv *ulv_level)
3304{
3305	return ci_populate_ulv_level(rdev, ulv_level);
3306}
3307
3308static int ci_populate_all_memory_levels(struct radeon_device *rdev)
3309{
3310	struct ci_power_info *pi = ci_get_pi(rdev);
3311	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3312	u32 level_array_address = pi->dpm_table_start +
3313		offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3314	u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3315		SMU7_MAX_LEVELS_MEMORY;
3316	SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3317	u32 i, ret;
3318
3319	memset(levels, 0, level_array_size);
3320
3321	for (i = 0; i < dpm_table->mclk_table.count; i++) {
3322		if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3323			return -EINVAL;
3324		ret = ci_populate_single_memory_level(rdev,
3325						      dpm_table->mclk_table.dpm_levels[i].value,
3326						      &pi->smc_state_table.MemoryLevel[i]);
3327		if (ret)
3328			return ret;
3329	}
3330
3331	pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3332
3333	if ((dpm_table->mclk_table.count >= 2) &&
3334	    ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3335		pi->smc_state_table.MemoryLevel[1].MinVddc =
3336			pi->smc_state_table.MemoryLevel[0].MinVddc;
3337		pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3338			pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3339	}
3340
3341	pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3342
3343	pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3344	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3345		ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3346
3347	pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3348		PPSMC_DISPLAY_WATERMARK_HIGH;
3349
3350	ret = ci_copy_bytes_to_smc(rdev, level_array_address,
3351				   (u8 *)levels, level_array_size,
3352				   pi->sram_end);
3353	if (ret)
3354		return ret;
3355
3356	return 0;
3357}
3358
3359static void ci_reset_single_dpm_table(struct radeon_device *rdev,
3360				      struct ci_single_dpm_table* dpm_table,
3361				      u32 count)
3362{
3363	u32 i;
3364
3365	dpm_table->count = count;
3366	for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3367		dpm_table->dpm_levels[i].enabled = false;
3368}
3369
3370static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3371				      u32 index, u32 pcie_gen, u32 pcie_lanes)
3372{
3373	dpm_table->dpm_levels[index].value = pcie_gen;
3374	dpm_table->dpm_levels[index].param1 = pcie_lanes;
3375	dpm_table->dpm_levels[index].enabled = true;
3376}
3377
3378static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
3379{
3380	struct ci_power_info *pi = ci_get_pi(rdev);
3381
3382	if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3383		return -EINVAL;
3384
3385	if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3386		pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3387		pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3388	} else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3389		pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3390		pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3391	}
3392
3393	ci_reset_single_dpm_table(rdev,
3394				  &pi->dpm_table.pcie_speed_table,
3395				  SMU7_MAX_LEVELS_LINK);
3396
3397	if (rdev->family == CHIP_BONAIRE)
3398		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3399					  pi->pcie_gen_powersaving.min,
3400					  pi->pcie_lane_powersaving.max);
3401	else
3402		ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3403					  pi->pcie_gen_powersaving.min,
3404					  pi->pcie_lane_powersaving.min);
3405	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3406				  pi->pcie_gen_performance.min,
3407				  pi->pcie_lane_performance.min);
3408	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3409				  pi->pcie_gen_powersaving.min,
3410				  pi->pcie_lane_powersaving.max);
3411	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3412				  pi->pcie_gen_performance.min,
3413				  pi->pcie_lane_performance.max);
3414	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3415				  pi->pcie_gen_powersaving.max,
3416				  pi->pcie_lane_powersaving.max);
3417	ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3418				  pi->pcie_gen_performance.max,
3419				  pi->pcie_lane_performance.max);
3420
3421	pi->dpm_table.pcie_speed_table.count = 6;
3422
3423	return 0;
3424}
3425
3426static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3427{
3428	struct ci_power_info *pi = ci_get_pi(rdev);
3429	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3430		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3431	struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3432		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3433	struct radeon_cac_leakage_table *std_voltage_table =
3434		&rdev->pm.dpm.dyn_state.cac_leakage_table;
3435	u32 i;
3436
3437	if (allowed_sclk_vddc_table == NULL)
3438		return -EINVAL;
3439	if (allowed_sclk_vddc_table->count < 1)
3440		return -EINVAL;
3441	if (allowed_mclk_table == NULL)
3442		return -EINVAL;
3443	if (allowed_mclk_table->count < 1)
3444		return -EINVAL;
3445
3446	memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3447
3448	ci_reset_single_dpm_table(rdev,
3449				  &pi->dpm_table.sclk_table,
3450				  SMU7_MAX_LEVELS_GRAPHICS);
3451	ci_reset_single_dpm_table(rdev,
3452				  &pi->dpm_table.mclk_table,
3453				  SMU7_MAX_LEVELS_MEMORY);
3454	ci_reset_single_dpm_table(rdev,
3455				  &pi->dpm_table.vddc_table,
3456				  SMU7_MAX_LEVELS_VDDC);
3457	ci_reset_single_dpm_table(rdev,
3458				  &pi->dpm_table.vddci_table,
3459				  SMU7_MAX_LEVELS_VDDCI);
3460	ci_reset_single_dpm_table(rdev,
3461				  &pi->dpm_table.mvdd_table,
3462				  SMU7_MAX_LEVELS_MVDD);
3463
3464	pi->dpm_table.sclk_table.count = 0;
3465	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3466		if ((i == 0) ||
3467		    (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3468		     allowed_sclk_vddc_table->entries[i].clk)) {
3469			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3470				allowed_sclk_vddc_table->entries[i].clk;
3471			pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3472				(i == 0) ? true : false;
3473			pi->dpm_table.sclk_table.count++;
3474		}
3475	}
3476
3477	pi->dpm_table.mclk_table.count = 0;
3478	for (i = 0; i < allowed_mclk_table->count; i++) {
3479		if ((i == 0) ||
3480		    (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3481		     allowed_mclk_table->entries[i].clk)) {
3482			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3483				allowed_mclk_table->entries[i].clk;
3484			pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3485				(i == 0) ? true : false;
3486			pi->dpm_table.mclk_table.count++;
3487		}
3488	}
3489
3490	for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3491		pi->dpm_table.vddc_table.dpm_levels[i].value =
3492			allowed_sclk_vddc_table->entries[i].v;
3493		pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3494			std_voltage_table->entries[i].leakage;
3495		pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3496	}
3497	pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3498
3499	allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3500	if (allowed_mclk_table) {
3501		for (i = 0; i < allowed_mclk_table->count; i++) {
3502			pi->dpm_table.vddci_table.dpm_levels[i].value =
3503				allowed_mclk_table->entries[i].v;
3504			pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3505		}
3506		pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3507	}
3508
3509	allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3510	if (allowed_mclk_table) {
3511		for (i = 0; i < allowed_mclk_table->count; i++) {
3512			pi->dpm_table.mvdd_table.dpm_levels[i].value =
3513				allowed_mclk_table->entries[i].v;
3514			pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3515		}
3516		pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3517	}
3518
3519	ci_setup_default_pcie_tables(rdev);
3520
3521	return 0;
3522}
3523
3524static int ci_find_boot_level(struct ci_single_dpm_table *table,
3525			      u32 value, u32 *boot_level)
3526{
3527	u32 i;
3528	int ret = -EINVAL;
3529
3530	for(i = 0; i < table->count; i++) {
3531		if (value == table->dpm_levels[i].value) {
3532			*boot_level = i;
3533			ret = 0;
3534		}
3535	}
3536
3537	return ret;
3538}
3539
3540static int ci_init_smc_table(struct radeon_device *rdev)
3541{
3542	struct ci_power_info *pi = ci_get_pi(rdev);
3543	struct ci_ulv_parm *ulv = &pi->ulv;
3544	struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3545	SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3546	int ret;
3547
3548	ret = ci_setup_default_dpm_tables(rdev);
3549	if (ret)
3550		return ret;
3551
3552	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3553		ci_populate_smc_voltage_tables(rdev, table);
3554
3555	ci_init_fps_limits(rdev);
3556
3557	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3558		table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3559
3560	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3561		table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3562
3563	if (pi->mem_gddr5)
3564		table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3565
3566	if (ulv->supported) {
3567		ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3568		if (ret)
3569			return ret;
3570		WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3571	}
3572
3573	ret = ci_populate_all_graphic_levels(rdev);
3574	if (ret)
3575		return ret;
3576
3577	ret = ci_populate_all_memory_levels(rdev);
3578	if (ret)
3579		return ret;
3580
3581	ci_populate_smc_link_level(rdev, table);
3582
3583	ret = ci_populate_smc_acpi_level(rdev, table);
3584	if (ret)
3585		return ret;
3586
3587	ret = ci_populate_smc_vce_level(rdev, table);
3588	if (ret)
3589		return ret;
3590
3591	ret = ci_populate_smc_acp_level(rdev, table);
3592	if (ret)
3593		return ret;
3594
3595	ret = ci_populate_smc_samu_level(rdev, table);
3596	if (ret)
3597		return ret;
3598
3599	ret = ci_do_program_memory_timing_parameters(rdev);
3600	if (ret)
3601		return ret;
3602
3603	ret = ci_populate_smc_uvd_level(rdev, table);
3604	if (ret)
3605		return ret;
3606
3607	table->UvdBootLevel  = 0;
3608	table->VceBootLevel  = 0;
3609	table->AcpBootLevel  = 0;
3610	table->SamuBootLevel  = 0;
3611	table->GraphicsBootLevel  = 0;
3612	table->MemoryBootLevel  = 0;
3613
3614	ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3615				 pi->vbios_boot_state.sclk_bootup_value,
3616				 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3617
3618	ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3619				 pi->vbios_boot_state.mclk_bootup_value,
3620				 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3621
3622	table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3623	table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3624	table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3625
3626	ci_populate_smc_initial_state(rdev, radeon_boot_state);
3627
3628	ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3629	if (ret)
3630		return ret;
3631
3632	table->UVDInterval = 1;
3633	table->VCEInterval = 1;
3634	table->ACPInterval = 1;
3635	table->SAMUInterval = 1;
3636	table->GraphicsVoltageChangeEnable = 1;
3637	table->GraphicsThermThrottleEnable = 1;
3638	table->GraphicsInterval = 1;
3639	table->VoltageInterval = 1;
3640	table->ThermalInterval = 1;
3641	table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3642					     CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3643	table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3644					    CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3645	table->MemoryVoltageChangeEnable = 1;
3646	table->MemoryInterval = 1;
3647	table->VoltageResponseTime = 0;
3648	table->VddcVddciDelta = 4000;
3649	table->PhaseResponseTime = 0;
3650	table->MemoryThermThrottleEnable = 1;
3651	table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3652	table->PCIeGenInterval = 1;
3653	if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3654		table->SVI2Enable  = 1;
3655	else
3656		table->SVI2Enable  = 0;
3657
3658	table->ThermGpio = 17;
3659	table->SclkStepSize = 0x4000;
3660
3661	table->SystemFlags = cpu_to_be32(table->SystemFlags);
3662	table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3663	table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3664	table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3665	table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3666	table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3667	table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3668	table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3669	table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3670	table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3671	table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3672	table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3673	table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3674	table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3675
3676	ret = ci_copy_bytes_to_smc(rdev,
3677				   pi->dpm_table_start +
3678				   offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3679				   (u8 *)&table->SystemFlags,
3680				   sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3681				   pi->sram_end);
3682	if (ret)
3683		return ret;
3684
3685	return 0;
3686}
3687
3688static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3689				      struct ci_single_dpm_table *dpm_table,
3690				      u32 low_limit, u32 high_limit)
3691{
3692	u32 i;
3693
3694	for (i = 0; i < dpm_table->count; i++) {
3695		if ((dpm_table->dpm_levels[i].value < low_limit) ||
3696		    (dpm_table->dpm_levels[i].value > high_limit))
3697			dpm_table->dpm_levels[i].enabled = false;
3698		else
3699			dpm_table->dpm_levels[i].enabled = true;
3700	}
3701}
3702
3703static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3704				    u32 speed_low, u32 lanes_low,
3705				    u32 speed_high, u32 lanes_high)
3706{
3707	struct ci_power_info *pi = ci_get_pi(rdev);
3708	struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3709	u32 i, j;
3710
3711	for (i = 0; i < pcie_table->count; i++) {
3712		if ((pcie_table->dpm_levels[i].value < speed_low) ||
3713		    (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3714		    (pcie_table->dpm_levels[i].value > speed_high) ||
3715		    (pcie_table->dpm_levels[i].param1 > lanes_high))
3716			pcie_table->dpm_levels[i].enabled = false;
3717		else
3718			pcie_table->dpm_levels[i].enabled = true;
3719	}
3720
3721	for (i = 0; i < pcie_table->count; i++) {
3722		if (pcie_table->dpm_levels[i].enabled) {
3723			for (j = i + 1; j < pcie_table->count; j++) {
3724				if (pcie_table->dpm_levels[j].enabled) {
3725					if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3726					    (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3727						pcie_table->dpm_levels[j].enabled = false;
3728				}
3729			}
3730		}
3731	}
3732}
3733
3734static int ci_trim_dpm_states(struct radeon_device *rdev,
3735			      struct radeon_ps *radeon_state)
3736{
3737	struct ci_ps *state = ci_get_ps(radeon_state);
3738	struct ci_power_info *pi = ci_get_pi(rdev);
3739	u32 high_limit_count;
3740
3741	if (state->performance_level_count < 1)
3742		return -EINVAL;
3743
3744	if (state->performance_level_count == 1)
3745		high_limit_count = 0;
3746	else
3747		high_limit_count = 1;
3748
3749	ci_trim_single_dpm_states(rdev,
3750				  &pi->dpm_table.sclk_table,
3751				  state->performance_levels[0].sclk,
3752				  state->performance_levels[high_limit_count].sclk);
3753
3754	ci_trim_single_dpm_states(rdev,
3755				  &pi->dpm_table.mclk_table,
3756				  state->performance_levels[0].mclk,
3757				  state->performance_levels[high_limit_count].mclk);
3758
3759	ci_trim_pcie_dpm_states(rdev,
3760				state->performance_levels[0].pcie_gen,
3761				state->performance_levels[0].pcie_lane,
3762				state->performance_levels[high_limit_count].pcie_gen,
3763				state->performance_levels[high_limit_count].pcie_lane);
3764
3765	return 0;
3766}
3767
3768static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3769{
3770	struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3771		&rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3772	struct radeon_clock_voltage_dependency_table *vddc_table =
3773		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3774	u32 requested_voltage = 0;
3775	u32 i;
3776
3777	if (disp_voltage_table == NULL)
3778		return -EINVAL;
3779	if (!disp_voltage_table->count)
3780		return -EINVAL;
3781
3782	for (i = 0; i < disp_voltage_table->count; i++) {
3783		if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3784			requested_voltage = disp_voltage_table->entries[i].v;
3785	}
3786
3787	for (i = 0; i < vddc_table->count; i++) {
3788		if (requested_voltage <= vddc_table->entries[i].v) {
3789			requested_voltage = vddc_table->entries[i].v;
3790			return (ci_send_msg_to_smc_with_parameter(rdev,
3791								  PPSMC_MSG_VddC_Request,
3792								  requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3793				0 : -EINVAL;
3794		}
3795	}
3796
3797	return -EINVAL;
3798}
3799
3800static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3801{
3802	struct ci_power_info *pi = ci_get_pi(rdev);
3803	PPSMC_Result result;
3804
3805	ci_apply_disp_minimum_voltage_request(rdev);
3806
3807	if (!pi->sclk_dpm_key_disabled) {
3808		if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3809			result = ci_send_msg_to_smc_with_parameter(rdev,
3810								   PPSMC_MSG_SCLKDPM_SetEnabledMask,
3811								   pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3812			if (result != PPSMC_Result_OK)
3813				return -EINVAL;
3814		}
3815	}
3816
3817	if (!pi->mclk_dpm_key_disabled) {
3818		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3819			result = ci_send_msg_to_smc_with_parameter(rdev,
3820								   PPSMC_MSG_MCLKDPM_SetEnabledMask,
3821								   pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3822			if (result != PPSMC_Result_OK)
3823				return -EINVAL;
3824		}
3825	}
3826#if 0
3827	if (!pi->pcie_dpm_key_disabled) {
3828		if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3829			result = ci_send_msg_to_smc_with_parameter(rdev,
3830								   PPSMC_MSG_PCIeDPM_SetEnabledMask,
3831								   pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3832			if (result != PPSMC_Result_OK)
3833				return -EINVAL;
3834		}
3835	}
3836#endif
3837	return 0;
3838}
3839
3840static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3841						   struct radeon_ps *radeon_state)
3842{
3843	struct ci_power_info *pi = ci_get_pi(rdev);
3844	struct ci_ps *state = ci_get_ps(radeon_state);
3845	struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3846	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3847	struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3848	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3849	u32 i;
3850
3851	pi->need_update_smu7_dpm_table = 0;
3852
3853	for (i = 0; i < sclk_table->count; i++) {
3854		if (sclk == sclk_table->dpm_levels[i].value)
3855			break;
3856	}
3857
3858	if (i >= sclk_table->count) {
3859		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3860	} else {
3861		/* XXX The current code always reprogrammed the sclk levels,
3862		 * but we don't currently handle disp sclk requirements
3863		 * so just skip it.
3864		 */
3865		if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3866			pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3867	}
3868
3869	for (i = 0; i < mclk_table->count; i++) {
3870		if (mclk == mclk_table->dpm_levels[i].value)
3871			break;
3872	}
3873
3874	if (i >= mclk_table->count)
3875		pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3876
3877	if (rdev->pm.dpm.current_active_crtc_count !=
3878	    rdev->pm.dpm.new_active_crtc_count)
3879		pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3880}
3881
3882static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3883						       struct radeon_ps *radeon_state)
3884{
3885	struct ci_power_info *pi = ci_get_pi(rdev);
3886	struct ci_ps *state = ci_get_ps(radeon_state);
3887	u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3888	u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3889	struct ci_dpm_table *dpm_table = &pi->dpm_table;
3890	int ret;
3891
3892	if (!pi->need_update_smu7_dpm_table)
3893		return 0;
3894
3895	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3896		dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3897
3898	if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3899		dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3900
3901	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3902		ret = ci_populate_all_graphic_levels(rdev);
3903		if (ret)
3904			return ret;
3905	}
3906
3907	if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3908		ret = ci_populate_all_memory_levels(rdev);
3909		if (ret)
3910			return ret;
3911	}
3912
3913	return 0;
3914}
3915
3916static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3917{
3918	struct ci_power_info *pi = ci_get_pi(rdev);
3919	const struct radeon_clock_and_voltage_limits *max_limits;
3920	int i;
3921
3922	if (rdev->pm.dpm.ac_power)
3923		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3924	else
3925		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3926
3927	if (enable) {
3928		pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3929
3930		for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3931			if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3932				pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3933
3934				if (!pi->caps_uvd_dpm)
3935					break;
3936			}
3937		}
3938
3939		ci_send_msg_to_smc_with_parameter(rdev,
3940						  PPSMC_MSG_UVDDPM_SetEnabledMask,
3941						  pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3942
3943		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3944			pi->uvd_enabled = true;
3945			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3946			ci_send_msg_to_smc_with_parameter(rdev,
3947							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3948							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3949		}
3950	} else {
3951		if (pi->last_mclk_dpm_enable_mask & 0x1) {
3952			pi->uvd_enabled = false;
3953			pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3954			ci_send_msg_to_smc_with_parameter(rdev,
3955							  PPSMC_MSG_MCLKDPM_SetEnabledMask,
3956							  pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3957		}
3958	}
3959
3960	return (ci_send_msg_to_smc(rdev, enable ?
3961				   PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3962		0 : -EINVAL;
3963}
3964
3965static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3966{
3967	struct ci_power_info *pi = ci_get_pi(rdev);
3968	const struct radeon_clock_and_voltage_limits *max_limits;
3969	int i;
3970
3971	if (rdev->pm.dpm.ac_power)
3972		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3973	else
3974		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3975
3976	if (enable) {
3977		pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3978		for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3979			if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3980				pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3981
3982				if (!pi->caps_vce_dpm)
3983					break;
3984			}
3985		}
3986
3987		ci_send_msg_to_smc_with_parameter(rdev,
3988						  PPSMC_MSG_VCEDPM_SetEnabledMask,
3989						  pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3990	}
3991
3992	return (ci_send_msg_to_smc(rdev, enable ?
3993				   PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3994		0 : -EINVAL;
3995}
3996
3997#if 0
3998static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3999{
4000	struct ci_power_info *pi = ci_get_pi(rdev);
4001	const struct radeon_clock_and_voltage_limits *max_limits;
4002	int i;
4003
4004	if (rdev->pm.dpm.ac_power)
4005		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4006	else
4007		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4008
4009	if (enable) {
4010		pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4011		for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4012			if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4013				pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4014
4015				if (!pi->caps_samu_dpm)
4016					break;
4017			}
4018		}
4019
4020		ci_send_msg_to_smc_with_parameter(rdev,
4021						  PPSMC_MSG_SAMUDPM_SetEnabledMask,
4022						  pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4023	}
4024	return (ci_send_msg_to_smc(rdev, enable ?
4025				   PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4026		0 : -EINVAL;
4027}
4028
4029static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
4030{
4031	struct ci_power_info *pi = ci_get_pi(rdev);
4032	const struct radeon_clock_and_voltage_limits *max_limits;
4033	int i;
4034
4035	if (rdev->pm.dpm.ac_power)
4036		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4037	else
4038		max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4039
4040	if (enable) {
4041		pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4042		for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4043			if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4044				pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4045
4046				if (!pi->caps_acp_dpm)
4047					break;
4048			}
4049		}
4050
4051		ci_send_msg_to_smc_with_parameter(rdev,
4052						  PPSMC_MSG_ACPDPM_SetEnabledMask,
4053						  pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4054	}
4055
4056	return (ci_send_msg_to_smc(rdev, enable ?
4057				   PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4058		0 : -EINVAL;
4059}
4060#endif
4061
4062static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
4063{
4064	struct ci_power_info *pi = ci_get_pi(rdev);
4065	u32 tmp;
4066
4067	if (!gate) {
4068		if (pi->caps_uvd_dpm ||
4069		    (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4070			pi->smc_state_table.UvdBootLevel = 0;
4071		else
4072			pi->smc_state_table.UvdBootLevel =
4073				rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4074
4075		tmp = RREG32_SMC(DPM_TABLE_475);
4076		tmp &= ~UvdBootLevel_MASK;
4077		tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
4078		WREG32_SMC(DPM_TABLE_475, tmp);
4079	}
4080
4081	return ci_enable_uvd_dpm(rdev, !gate);
4082}
4083
4084static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
4085{
4086	u8 i;
4087	u32 min_evclk = 30000; /* ??? */
4088	struct radeon_vce_clock_voltage_dependency_table *table =
4089		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4090
4091	for (i = 0; i < table->count; i++) {
4092		if (table->entries[i].evclk >= min_evclk)
4093			return i;
4094	}
4095
4096	return table->count - 1;
4097}
4098
4099static int ci_update_vce_dpm(struct radeon_device *rdev,
4100			     struct radeon_ps *radeon_new_state,
4101			     struct radeon_ps *radeon_current_state)
4102{
4103	struct ci_power_info *pi = ci_get_pi(rdev);
4104	int ret = 0;
4105	u32 tmp;
4106
4107	if (radeon_current_state->evclk != radeon_new_state->evclk) {
4108		if (radeon_new_state->evclk) {
4109			/* turn the clocks on when encoding */
4110			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
4111
4112			pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
4113			tmp = RREG32_SMC(DPM_TABLE_475);
4114			tmp &= ~VceBootLevel_MASK;
4115			tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
4116			WREG32_SMC(DPM_TABLE_475, tmp);
4117
4118			ret = ci_enable_vce_dpm(rdev, true);
4119		} else {
4120			/* turn the clocks off when not encoding */
4121			cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
4122
4123			ret = ci_enable_vce_dpm(rdev, false);
4124		}
4125	}
4126	return ret;
4127}
4128
4129#if 0
4130static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
4131{
4132	return ci_enable_samu_dpm(rdev, gate);
4133}
4134
4135static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
4136{
4137	struct ci_power_info *pi = ci_get_pi(rdev);
4138	u32 tmp;
4139
4140	if (!gate) {
4141		pi->smc_state_table.AcpBootLevel = 0;
4142
4143		tmp = RREG32_SMC(DPM_TABLE_475);
4144		tmp &= ~AcpBootLevel_MASK;
4145		tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4146		WREG32_SMC(DPM_TABLE_475, tmp);
4147	}
4148
4149	return ci_enable_acp_dpm(rdev, !gate);
4150}
4151#endif
4152
4153static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
4154					     struct radeon_ps *radeon_state)
4155{
4156	struct ci_power_info *pi = ci_get_pi(rdev);
4157	int ret;
4158
4159	ret = ci_trim_dpm_states(rdev, radeon_state);
4160	if (ret)
4161		return ret;
4162
4163	pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4164		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4165	pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4166		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4167	pi->last_mclk_dpm_enable_mask =
4168		pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4169	if (pi->uvd_enabled) {
4170		if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4171			pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4172	}
4173	pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4174		ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4175
4176	return 0;
4177}
4178
4179static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
4180				       u32 level_mask)
4181{
4182	u32 level = 0;
4183
4184	while ((level_mask & (1 << level)) == 0)
4185		level++;
4186
4187	return level;
4188}
4189
4190
4191int ci_dpm_force_performance_level(struct radeon_device *rdev,
4192				   enum radeon_dpm_forced_level level)
4193{
4194	struct ci_power_info *pi = ci_get_pi(rdev);
4195	u32 tmp, levels, i;
4196	int ret;
4197
4198	if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
4199		if ((!pi->pcie_dpm_key_disabled) &&
4200		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4201			levels = 0;
4202			tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4203			while (tmp >>= 1)
4204				levels++;
4205			if (levels) {
4206				ret = ci_dpm_force_state_pcie(rdev, level);
4207				if (ret)
4208					return ret;
4209				for (i = 0; i < rdev->usec_timeout; i++) {
4210					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4211					       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4212					if (tmp == levels)
4213						break;
4214					udelay(1);
4215				}
4216			}
4217		}
4218		if ((!pi->sclk_dpm_key_disabled) &&
4219		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4220			levels = 0;
4221			tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4222			while (tmp >>= 1)
4223				levels++;
4224			if (levels) {
4225				ret = ci_dpm_force_state_sclk(rdev, levels);
4226				if (ret)
4227					return ret;
4228				for (i = 0; i < rdev->usec_timeout; i++) {
4229					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4230					       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4231					if (tmp == levels)
4232						break;
4233					udelay(1);
4234				}
4235			}
4236		}
4237		if ((!pi->mclk_dpm_key_disabled) &&
4238		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4239			levels = 0;
4240			tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4241			while (tmp >>= 1)
4242				levels++;
4243			if (levels) {
4244				ret = ci_dpm_force_state_mclk(rdev, levels);
4245				if (ret)
4246					return ret;
4247				for (i = 0; i < rdev->usec_timeout; i++) {
4248					tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4249					       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4250					if (tmp == levels)
4251						break;
4252					udelay(1);
4253				}
4254			}
4255		}
4256	} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
4257		if ((!pi->sclk_dpm_key_disabled) &&
4258		    pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4259			levels = ci_get_lowest_enabled_level(rdev,
4260							     pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4261			ret = ci_dpm_force_state_sclk(rdev, levels);
4262			if (ret)
4263				return ret;
4264			for (i = 0; i < rdev->usec_timeout; i++) {
4265				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4266				       CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
4267				if (tmp == levels)
4268					break;
4269				udelay(1);
4270			}
4271		}
4272		if ((!pi->mclk_dpm_key_disabled) &&
4273		    pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4274			levels = ci_get_lowest_enabled_level(rdev,
4275							     pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4276			ret = ci_dpm_force_state_mclk(rdev, levels);
4277			if (ret)
4278				return ret;
4279			for (i = 0; i < rdev->usec_timeout; i++) {
4280				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
4281				       CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
4282				if (tmp == levels)
4283					break;
4284				udelay(1);
4285			}
4286		}
4287		if ((!pi->pcie_dpm_key_disabled) &&
4288		    pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4289			levels = ci_get_lowest_enabled_level(rdev,
4290							     pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4291			ret = ci_dpm_force_state_pcie(rdev, levels);
4292			if (ret)
4293				return ret;
4294			for (i = 0; i < rdev->usec_timeout; i++) {
4295				tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
4296				       CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
4297				if (tmp == levels)
4298					break;
4299				udelay(1);
4300			}
4301		}
4302	} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
4303		if (!pi->pcie_dpm_key_disabled) {
4304			PPSMC_Result smc_result;
4305
4306			smc_result = ci_send_msg_to_smc(rdev,
4307							PPSMC_MSG_PCIeDPM_UnForceLevel);
4308			if (smc_result != PPSMC_Result_OK)
4309				return -EINVAL;
4310		}
4311		ret = ci_upload_dpm_level_enable_mask(rdev);
4312		if (ret)
4313			return ret;
4314	}
4315
4316	rdev->pm.dpm.forced_level = level;
4317
4318	return 0;
4319}
4320
4321static int ci_set_mc_special_registers(struct radeon_device *rdev,
4322				       struct ci_mc_reg_table *table)
4323{
4324	struct ci_power_info *pi = ci_get_pi(rdev);
4325	u8 i, j, k;
4326	u32 temp_reg;
4327
4328	for (i = 0, j = table->last; i < table->last; i++) {
4329		if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4330			return -EINVAL;
4331		switch(table->mc_reg_address[i].s1 << 2) {
4332		case MC_SEQ_MISC1:
4333			temp_reg = RREG32(MC_PMG_CMD_EMRS);
4334			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
4335			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4336			for (k = 0; k < table->num_entries; k++) {
4337				table->mc_reg_table_entry[k].mc_data[j] =
4338					((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4339			}
4340			j++;
4341			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4342				return -EINVAL;
4343
4344			temp_reg = RREG32(MC_PMG_CMD_MRS);
4345			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
4346			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4347			for (k = 0; k < table->num_entries; k++) {
4348				table->mc_reg_table_entry[k].mc_data[j] =
4349					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4350				if (!pi->mem_gddr5)
4351					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4352			}
4353			j++;
4354			if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4355				return -EINVAL;
4356
4357			if (!pi->mem_gddr5) {
4358				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
4359				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
4360				for (k = 0; k < table->num_entries; k++) {
4361					table->mc_reg_table_entry[k].mc_data[j] =
4362						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4363				}
4364				j++;
4365				if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4366					return -EINVAL;
4367			}
4368			break;
4369		case MC_SEQ_RESERVE_M:
4370			temp_reg = RREG32(MC_PMG_CMD_MRS1);
4371			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
4372			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4373			for (k = 0; k < table->num_entries; k++) {
4374				table->mc_reg_table_entry[k].mc_data[j] =
4375					(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4376			}
4377			j++;
4378			if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4379				return -EINVAL;
4380			break;
4381		default:
4382			break;
4383		}
4384
4385	}
4386
4387	table->last = j;
4388
4389	return 0;
4390}
4391
4392static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4393{
4394	bool result = true;
4395
4396	switch(in_reg) {
4397	case MC_SEQ_RAS_TIMING >> 2:
4398		*out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
4399		break;
4400	case MC_SEQ_DLL_STBY >> 2:
4401		*out_reg = MC_SEQ_DLL_STBY_LP >> 2;
4402		break;
4403	case MC_SEQ_G5PDX_CMD0 >> 2:
4404		*out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
4405		break;
4406	case MC_SEQ_G5PDX_CMD1 >> 2:
4407		*out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
4408		break;
4409	case MC_SEQ_G5PDX_CTRL >> 2:
4410		*out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
4411		break;
4412	case MC_SEQ_CAS_TIMING >> 2:
4413		*out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
4414		break;
4415	case MC_SEQ_MISC_TIMING >> 2:
4416		*out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4417		break;
4418	case MC_SEQ_MISC_TIMING2 >> 2:
4419		*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4420		break;
4421	case MC_SEQ_PMG_DVS_CMD >> 2:
4422		*out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4423		break;
4424	case MC_SEQ_PMG_DVS_CTL >> 2:
4425		*out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4426		break;
4427	case MC_SEQ_RD_CTL_D0 >> 2:
4428		*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4429		break;
4430	case MC_SEQ_RD_CTL_D1 >> 2:
4431		*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4432		break;
4433	case MC_SEQ_WR_CTL_D0 >> 2:
4434		*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4435		break;
4436	case MC_SEQ_WR_CTL_D1 >> 2:
4437		*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4438		break;
4439	case MC_PMG_CMD_EMRS >> 2:
4440		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4441		break;
4442	case MC_PMG_CMD_MRS >> 2:
4443		*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4444		break;
4445	case MC_PMG_CMD_MRS1 >> 2:
4446		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4447		break;
4448	case MC_SEQ_PMG_TIMING >> 2:
4449		*out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4450		break;
4451	case MC_PMG_CMD_MRS2 >> 2:
4452		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4453		break;
4454	case MC_SEQ_WR_CTL_2 >> 2:
4455		*out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4456		break;
4457	default:
4458		result = false;
4459		break;
4460	}
4461
4462	return result;
4463}
4464
4465static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4466{
4467	u8 i, j;
4468
4469	for (i = 0; i < table->last; i++) {
4470		for (j = 1; j < table->num_entries; j++) {
4471			if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4472			    table->mc_reg_table_entry[j].mc_data[i]) {
4473				table->valid_flag |= 1 << i;
4474				break;
4475			}
4476		}
4477	}
4478}
4479
4480static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4481{
4482	u32 i;
4483	u16 address;
4484
4485	for (i = 0; i < table->last; i++) {
4486		table->mc_reg_address[i].s0 =
4487			ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4488			address : table->mc_reg_address[i].s1;
4489	}
4490}
4491
4492static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4493				      struct ci_mc_reg_table *ci_table)
4494{
4495	u8 i, j;
4496
4497	if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4498		return -EINVAL;
4499	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4500		return -EINVAL;
4501
4502	for (i = 0; i < table->last; i++)
4503		ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4504
4505	ci_table->last = table->last;
4506
4507	for (i = 0; i < table->num_entries; i++) {
4508		ci_table->mc_reg_table_entry[i].mclk_max =
4509			table->mc_reg_table_entry[i].mclk_max;
4510		for (j = 0; j < table->last; j++)
4511			ci_table->mc_reg_table_entry[i].mc_data[j] =
4512				table->mc_reg_table_entry[i].mc_data[j];
4513	}
4514	ci_table->num_entries = table->num_entries;
4515
4516	return 0;
4517}
4518
4519static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4520				       struct ci_mc_reg_table *table)
4521{
4522	u8 i, k;
4523	u32 tmp;
4524	bool patch;
4525
4526	tmp = RREG32(MC_SEQ_MISC0);
4527	patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4528
4529	if (patch &&
4530	    ((rdev->pdev->device == 0x67B0) ||
4531	     (rdev->pdev->device == 0x67B1))) {
4532		for (i = 0; i < table->last; i++) {
4533			if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4534				return -EINVAL;
4535			switch(table->mc_reg_address[i].s1 >> 2) {
4536			case MC_SEQ_MISC1:
4537				for (k = 0; k < table->num_entries; k++) {
4538					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4539					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4540						table->mc_reg_table_entry[k].mc_data[i] =
4541							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4542							0x00000007;
4543				}
4544				break;
4545			case MC_SEQ_WR_CTL_D0:
4546				for (k = 0; k < table->num_entries; k++) {
4547					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4548					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4549						table->mc_reg_table_entry[k].mc_data[i] =
4550							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4551							0x0000D0DD;
4552				}
4553				break;
4554			case MC_SEQ_WR_CTL_D1:
4555				for (k = 0; k < table->num_entries; k++) {
4556					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4557					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4558						table->mc_reg_table_entry[k].mc_data[i] =
4559							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4560							0x0000D0DD;
4561				}
4562				break;
4563			case MC_SEQ_WR_CTL_2:
4564				for (k = 0; k < table->num_entries; k++) {
4565					if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4566					    (table->mc_reg_table_entry[k].mclk_max == 137500))
4567						table->mc_reg_table_entry[k].mc_data[i] = 0;
4568				}
4569				break;
4570			case MC_SEQ_CAS_TIMING:
4571				for (k = 0; k < table->num_entries; k++) {
4572					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4573						table->mc_reg_table_entry[k].mc_data[i] =
4574							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4575							0x000C0140;
4576					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4577						table->mc_reg_table_entry[k].mc_data[i] =
4578							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4579							0x000C0150;
4580				}
4581				break;
4582			case MC_SEQ_MISC_TIMING:
4583				for (k = 0; k < table->num_entries; k++) {
4584					if (table->mc_reg_table_entry[k].mclk_max == 125000)
4585						table->mc_reg_table_entry[k].mc_data[i] =
4586							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4587							0x00000030;
4588					else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4589						table->mc_reg_table_entry[k].mc_data[i] =
4590							(table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4591							0x00000035;
4592				}
4593				break;
4594			default:
4595				break;
4596			}
4597		}
4598
4599		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4600		tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4601		tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4602		WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4603		WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4604	}
4605
4606	return 0;
4607}
4608
4609static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4610{
4611	struct ci_power_info *pi = ci_get_pi(rdev);
4612	struct atom_mc_reg_table *table;
4613	struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4614	u8 module_index = rv770_get_memory_module_index(rdev);
4615	int ret;
4616
4617	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4618	if (!table)
4619		return -ENOMEM;
4620
4621	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4622	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4623	WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4624	WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4625	WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4626	WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4627	WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4628	WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4629	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4630	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4631	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4632	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4633	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4634	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4635	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4636	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4637	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4638	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4639	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4640	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4641
4642	ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4643	if (ret)
4644		goto init_mc_done;
4645
4646	ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4647	if (ret)
4648		goto init_mc_done;
4649
4650	ci_set_s0_mc_reg_index(ci_table);
4651
4652	ret = ci_register_patching_mc_seq(rdev, ci_table);
4653	if (ret)
4654		goto init_mc_done;
4655
4656	ret = ci_set_mc_special_registers(rdev, ci_table);
4657	if (ret)
4658		goto init_mc_done;
4659
4660	ci_set_valid_flag(ci_table);
4661
4662init_mc_done:
4663	kfree(table);
4664
4665	return ret;
4666}
4667
4668static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4669					SMU7_Discrete_MCRegisters *mc_reg_table)
4670{
4671	struct ci_power_info *pi = ci_get_pi(rdev);
4672	u32 i, j;
4673
4674	for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4675		if (pi->mc_reg_table.valid_flag & (1 << j)) {
4676			if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4677				return -EINVAL;
4678			mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4679			mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4680			i++;
4681		}
4682	}
4683
4684	mc_reg_table->last = (u8)i;
4685
4686	return 0;
4687}
4688
4689static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4690				    SMU7_Discrete_MCRegisterSet *data,
4691				    u32 num_entries, u32 valid_flag)
4692{
4693	u32 i, j;
4694
4695	for (i = 0, j = 0; j < num_entries; j++) {
4696		if (valid_flag & (1 << j)) {
4697			data->value[i] = cpu_to_be32(entry->mc_data[j]);
4698			i++;
4699		}
4700	}
4701}
4702
4703static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4704						 const u32 memory_clock,
4705						 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4706{
4707	struct ci_power_info *pi = ci_get_pi(rdev);
4708	u32 i = 0;
4709
4710	for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4711		if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4712			break;
4713	}
4714
4715	if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4716		--i;
4717
4718	ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4719				mc_reg_table_data, pi->mc_reg_table.last,
4720				pi->mc_reg_table.valid_flag);
4721}
4722
4723static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4724					   SMU7_Discrete_MCRegisters *mc_reg_table)
4725{
4726	struct ci_power_info *pi = ci_get_pi(rdev);
4727	u32 i;
4728
4729	for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4730		ci_convert_mc_reg_table_entry_to_smc(rdev,
4731						     pi->dpm_table.mclk_table.dpm_levels[i].value,
4732						     &mc_reg_table->data[i]);
4733}
4734
4735static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4736{
4737	struct ci_power_info *pi = ci_get_pi(rdev);
4738	int ret;
4739
4740	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4741
4742	ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4743	if (ret)
4744		return ret;
4745	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4746
4747	return ci_copy_bytes_to_smc(rdev,
4748				    pi->mc_reg_table_start,
4749				    (u8 *)&pi->smc_mc_reg_table,
4750				    sizeof(SMU7_Discrete_MCRegisters),
4751				    pi->sram_end);
4752}
4753
4754static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4755{
4756	struct ci_power_info *pi = ci_get_pi(rdev);
4757
4758	if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4759		return 0;
4760
4761	memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4762
4763	ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4764
4765	return ci_copy_bytes_to_smc(rdev,
4766				    pi->mc_reg_table_start +
4767				    offsetof(SMU7_Discrete_MCRegisters, data[0]),
4768				    (u8 *)&pi->smc_mc_reg_table.data[0],
4769				    sizeof(SMU7_Discrete_MCRegisterSet) *
4770				    pi->dpm_table.mclk_table.count,
4771				    pi->sram_end);
4772}
4773
4774static void ci_enable_voltage_control(struct radeon_device *rdev)
4775{
4776	u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4777
4778	tmp |= VOLT_PWRMGT_EN;
4779	WREG32_SMC(GENERAL_PWRMGT, tmp);
4780}
4781
4782static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4783						      struct radeon_ps *radeon_state)
4784{
4785	struct ci_ps *state = ci_get_ps(radeon_state);
4786	int i;
4787	u16 pcie_speed, max_speed = 0;
4788
4789	for (i = 0; i < state->performance_level_count; i++) {
4790		pcie_speed = state->performance_levels[i].pcie_gen;
4791		if (max_speed < pcie_speed)
4792			max_speed = pcie_speed;
4793	}
4794
4795	return max_speed;
4796}
4797
4798static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4799{
4800	u32 speed_cntl = 0;
4801
4802	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4803	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4804
4805	return (u16)speed_cntl;
4806}
4807
4808static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4809{
4810	u32 link_width = 0;
4811
4812	link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4813	link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4814
4815	switch (link_width) {
4816	case RADEON_PCIE_LC_LINK_WIDTH_X1:
4817		return 1;
4818	case RADEON_PCIE_LC_LINK_WIDTH_X2:
4819		return 2;
4820	case RADEON_PCIE_LC_LINK_WIDTH_X4:
4821		return 4;
4822	case RADEON_PCIE_LC_LINK_WIDTH_X8:
4823		return 8;
4824	case RADEON_PCIE_LC_LINK_WIDTH_X12:
4825		/* not actually supported */
4826		return 12;
4827	case RADEON_PCIE_LC_LINK_WIDTH_X0:
4828	case RADEON_PCIE_LC_LINK_WIDTH_X16:
4829	default:
4830		return 16;
4831	}
4832}
4833
4834static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4835							     struct radeon_ps *radeon_new_state,
4836							     struct radeon_ps *radeon_current_state)
4837{
4838	struct ci_power_info *pi = ci_get_pi(rdev);
4839	enum radeon_pcie_gen target_link_speed =
4840		ci_get_maximum_link_speed(rdev, radeon_new_state);
4841	enum radeon_pcie_gen current_link_speed;
4842
4843	if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4844		current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4845	else
4846		current_link_speed = pi->force_pcie_gen;
4847
4848	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4849	pi->pspp_notify_required = false;
4850	if (target_link_speed > current_link_speed) {
4851		switch (target_link_speed) {
4852#ifdef CONFIG_ACPI
4853		case RADEON_PCIE_GEN3:
4854			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4855				break;
4856			pi->force_pcie_gen = RADEON_PCIE_GEN2;
4857			if (current_link_speed == RADEON_PCIE_GEN2)
4858				break;
4859			fallthrough;
4860		case RADEON_PCIE_GEN2:
4861			if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4862				break;
4863#endif
4864			/* fall through */
4865		default:
4866			pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4867			break;
4868		}
4869	} else {
4870		if (target_link_speed < current_link_speed)
4871			pi->pspp_notify_required = true;
4872	}
4873}
4874
4875static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4876							   struct radeon_ps *radeon_new_state,
4877							   struct radeon_ps *radeon_current_state)
4878{
4879	struct ci_power_info *pi = ci_get_pi(rdev);
4880	enum radeon_pcie_gen target_link_speed =
4881		ci_get_maximum_link_speed(rdev, radeon_new_state);
4882	u8 request;
4883
4884	if (pi->pspp_notify_required) {
4885		if (target_link_speed == RADEON_PCIE_GEN3)
4886			request = PCIE_PERF_REQ_PECI_GEN3;
4887		else if (target_link_speed == RADEON_PCIE_GEN2)
4888			request = PCIE_PERF_REQ_PECI_GEN2;
4889		else
4890			request = PCIE_PERF_REQ_PECI_GEN1;
4891
4892		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4893		    (ci_get_current_pcie_speed(rdev) > 0))
4894			return;
4895
4896#ifdef CONFIG_ACPI
4897		radeon_acpi_pcie_performance_request(rdev, request, false);
4898#endif
4899	}
4900}
4901
4902static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4903{
4904	struct ci_power_info *pi = ci_get_pi(rdev);
4905	struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4906		&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4907	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4908		&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4909	struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4910		&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4911
4912	if (allowed_sclk_vddc_table == NULL)
4913		return -EINVAL;
4914	if (allowed_sclk_vddc_table->count < 1)
4915		return -EINVAL;
4916	if (allowed_mclk_vddc_table == NULL)
4917		return -EINVAL;
4918	if (allowed_mclk_vddc_table->count < 1)
4919		return -EINVAL;
4920	if (allowed_mclk_vddci_table == NULL)
4921		return -EINVAL;
4922	if (allowed_mclk_vddci_table->count < 1)
4923		return -EINVAL;
4924
4925	pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4926	pi->max_vddc_in_pp_table =
4927		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4928
4929	pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4930	pi->max_vddci_in_pp_table =
4931		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4932
4933	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4934		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4935	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4936		allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4937	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4938		allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4939	rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4940		allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4941
4942	return 0;
4943}
4944
4945static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4946{
4947	struct ci_power_info *pi = ci_get_pi(rdev);
4948	struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4949	u32 leakage_index;
4950
4951	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4952		if (leakage_table->leakage_id[leakage_index] == *vddc) {
4953			*vddc = leakage_table->actual_voltage[leakage_index];
4954			break;
4955		}
4956	}
4957}
4958
4959static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4960{
4961	struct ci_power_info *pi = ci_get_pi(rdev);
4962	struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4963	u32 leakage_index;
4964
4965	for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4966		if (leakage_table->leakage_id[leakage_index] == *vddci) {
4967			*vddci = leakage_table->actual_voltage[leakage_index];
4968			break;
4969		}
4970	}
4971}
4972
4973static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4974								      struct radeon_clock_voltage_dependency_table *table)
4975{
4976	u32 i;
4977
4978	if (table) {
4979		for (i = 0; i < table->count; i++)
4980			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4981	}
4982}
4983
4984static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4985								       struct radeon_clock_voltage_dependency_table *table)
4986{
4987	u32 i;
4988
4989	if (table) {
4990		for (i = 0; i < table->count; i++)
4991			ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4992	}
4993}
4994
4995static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4996									  struct radeon_vce_clock_voltage_dependency_table *table)
4997{
4998	u32 i;
4999
5000	if (table) {
5001		for (i = 0; i < table->count; i++)
5002			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5003	}
5004}
5005
5006static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
5007									  struct radeon_uvd_clock_voltage_dependency_table *table)
5008{
5009	u32 i;
5010
5011	if (table) {
5012		for (i = 0; i < table->count; i++)
5013			ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
5014	}
5015}
5016
5017static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
5018								   struct radeon_phase_shedding_limits_table *table)
5019{
5020	u32 i;
5021
5022	if (table) {
5023		for (i = 0; i < table->count; i++)
5024			ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
5025	}
5026}
5027
5028static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
5029							    struct radeon_clock_and_voltage_limits *table)
5030{
5031	if (table) {
5032		ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
5033		ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
5034	}
5035}
5036
5037static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
5038							 struct radeon_cac_leakage_table *table)
5039{
5040	u32 i;
5041
5042	if (table) {
5043		for (i = 0; i < table->count; i++)
5044			ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
5045	}
5046}
5047
5048static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
5049{
5050
5051	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5052								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5053	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5054								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5055	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5056								  &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5057	ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
5058								   &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5059	ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5060								      &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5061	ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5062								      &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5063	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5064								  &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5065	ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
5066								  &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5067	ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
5068							       &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
5069	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5070							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5071	ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
5072							&rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5073	ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
5074						     &rdev->pm.dpm.dyn_state.cac_leakage_table);
5075
5076}
5077
5078static void ci_get_memory_type(struct radeon_device *rdev)
5079{
5080	struct ci_power_info *pi = ci_get_pi(rdev);
5081	u32 tmp;
5082
5083	tmp = RREG32(MC_SEQ_MISC0);
5084
5085	if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
5086	    MC_SEQ_MISC0_GDDR5_VALUE)
5087		pi->mem_gddr5 = true;
5088	else
5089		pi->mem_gddr5 = false;
5090
5091}
5092
5093static void ci_update_current_ps(struct radeon_device *rdev,
5094				 struct radeon_ps *rps)
5095{
5096	struct ci_ps *new_ps = ci_get_ps(rps);
5097	struct ci_power_info *pi = ci_get_pi(rdev);
5098
5099	pi->current_rps = *rps;
5100	pi->current_ps = *new_ps;
5101	pi->current_rps.ps_priv = &pi->current_ps;
5102}
5103
5104static void ci_update_requested_ps(struct radeon_device *rdev,
5105				   struct radeon_ps *rps)
5106{
5107	struct ci_ps *new_ps = ci_get_ps(rps);
5108	struct ci_power_info *pi = ci_get_pi(rdev);
5109
5110	pi->requested_rps = *rps;
5111	pi->requested_ps = *new_ps;
5112	pi->requested_rps.ps_priv = &pi->requested_ps;
5113}
5114
5115int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
5116{
5117	struct ci_power_info *pi = ci_get_pi(rdev);
5118	struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
5119	struct radeon_ps *new_ps = &requested_ps;
5120
5121	ci_update_requested_ps(rdev, new_ps);
5122
5123	ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
5124
5125	return 0;
5126}
5127
5128void ci_dpm_post_set_power_state(struct radeon_device *rdev)
5129{
5130	struct ci_power_info *pi = ci_get_pi(rdev);
5131	struct radeon_ps *new_ps = &pi->requested_rps;
5132
5133	ci_update_current_ps(rdev, new_ps);
5134}
5135
5136
5137void ci_dpm_setup_asic(struct radeon_device *rdev)
5138{
5139	int r;
5140
5141	r = ci_mc_load_microcode(rdev);
5142	if (r)
5143		DRM_ERROR("Failed to load MC firmware!\n");
5144	ci_read_clock_registers(rdev);
5145	ci_get_memory_type(rdev);
5146	ci_enable_acpi_power_management(rdev);
5147	ci_init_sclk_t(rdev);
5148}
5149
5150int ci_dpm_enable(struct radeon_device *rdev)
5151{
5152	struct ci_power_info *pi = ci_get_pi(rdev);
5153	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5154	int ret;
5155
5156	if (ci_is_smc_running(rdev))
5157		return -EINVAL;
5158	if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5159		ci_enable_voltage_control(rdev);
5160		ret = ci_construct_voltage_tables(rdev);
5161		if (ret) {
5162			DRM_ERROR("ci_construct_voltage_tables failed\n");
5163			return ret;
5164		}
5165	}
5166	if (pi->caps_dynamic_ac_timing) {
5167		ret = ci_initialize_mc_reg_table(rdev);
5168		if (ret)
5169			pi->caps_dynamic_ac_timing = false;
5170	}
5171	if (pi->dynamic_ss)
5172		ci_enable_spread_spectrum(rdev, true);
5173	if (pi->thermal_protection)
5174		ci_enable_thermal_protection(rdev, true);
5175	ci_program_sstp(rdev);
5176	ci_enable_display_gap(rdev);
5177	ci_program_vc(rdev);
5178	ret = ci_upload_firmware(rdev);
5179	if (ret) {
5180		DRM_ERROR("ci_upload_firmware failed\n");
5181		return ret;
5182	}
5183	ret = ci_process_firmware_header(rdev);
5184	if (ret) {
5185		DRM_ERROR("ci_process_firmware_header failed\n");
5186		return ret;
5187	}
5188	ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
5189	if (ret) {
5190		DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5191		return ret;
5192	}
5193	ret = ci_init_smc_table(rdev);
5194	if (ret) {
5195		DRM_ERROR("ci_init_smc_table failed\n");
5196		return ret;
5197	}
5198	ret = ci_init_arb_table_index(rdev);
5199	if (ret) {
5200		DRM_ERROR("ci_init_arb_table_index failed\n");
5201		return ret;
5202	}
5203	if (pi->caps_dynamic_ac_timing) {
5204		ret = ci_populate_initial_mc_reg_table(rdev);
5205		if (ret) {
5206			DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5207			return ret;
5208		}
5209	}
5210	ret = ci_populate_pm_base(rdev);
5211	if (ret) {
5212		DRM_ERROR("ci_populate_pm_base failed\n");
5213		return ret;
5214	}
5215	ci_dpm_start_smc(rdev);
5216	ci_enable_vr_hot_gpio_interrupt(rdev);
5217	ret = ci_notify_smc_display_change(rdev, false);
5218	if (ret) {
5219		DRM_ERROR("ci_notify_smc_display_change failed\n");
5220		return ret;
5221	}
5222	ci_enable_sclk_control(rdev, true);
5223	ret = ci_enable_ulv(rdev, true);
5224	if (ret) {
5225		DRM_ERROR("ci_enable_ulv failed\n");
5226		return ret;
5227	}
5228	ret = ci_enable_ds_master_switch(rdev, true);
5229	if (ret) {
5230		DRM_ERROR("ci_enable_ds_master_switch failed\n");
5231		return ret;
5232	}
5233	ret = ci_start_dpm(rdev);
5234	if (ret) {
5235		DRM_ERROR("ci_start_dpm failed\n");
5236		return ret;
5237	}
5238	ret = ci_enable_didt(rdev, true);
5239	if (ret) {
5240		DRM_ERROR("ci_enable_didt failed\n");
5241		return ret;
5242	}
5243	ret = ci_enable_smc_cac(rdev, true);
5244	if (ret) {
5245		DRM_ERROR("ci_enable_smc_cac failed\n");
5246		return ret;
5247	}
5248	ret = ci_enable_power_containment(rdev, true);
5249	if (ret) {
5250		DRM_ERROR("ci_enable_power_containment failed\n");
5251		return ret;
5252	}
5253
5254	ret = ci_power_control_set_level(rdev);
5255	if (ret) {
5256		DRM_ERROR("ci_power_control_set_level failed\n");
5257		return ret;
5258	}
5259
5260	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5261
5262	ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5263	if (ret) {
5264		DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5265		return ret;
5266	}
5267
5268	ci_thermal_start_thermal_controller(rdev);
5269
5270	ci_update_current_ps(rdev, boot_ps);
5271
5272	return 0;
5273}
5274
5275static int ci_set_temperature_range(struct radeon_device *rdev)
5276{
5277	int ret;
5278
5279	ret = ci_thermal_enable_alert(rdev, false);
5280	if (ret)
5281		return ret;
5282	ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
5283	if (ret)
5284		return ret;
5285	ret = ci_thermal_enable_alert(rdev, true);
5286	if (ret)
5287		return ret;
5288
5289	return ret;
5290}
5291
5292int ci_dpm_late_enable(struct radeon_device *rdev)
5293{
5294	int ret;
5295
5296	ret = ci_set_temperature_range(rdev);
5297	if (ret)
5298		return ret;
5299
5300	ci_dpm_powergate_uvd(rdev, true);
5301
5302	return 0;
5303}
5304
5305void ci_dpm_disable(struct radeon_device *rdev)
5306{
5307	struct ci_power_info *pi = ci_get_pi(rdev);
5308	struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
5309
5310	ci_dpm_powergate_uvd(rdev, false);
5311
5312	if (!ci_is_smc_running(rdev))
5313		return;
5314
5315	ci_thermal_stop_thermal_controller(rdev);
5316
5317	if (pi->thermal_protection)
5318		ci_enable_thermal_protection(rdev, false);
5319	ci_enable_power_containment(rdev, false);
5320	ci_enable_smc_cac(rdev, false);
5321	ci_enable_didt(rdev, false);
5322	ci_enable_spread_spectrum(rdev, false);
5323	ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5324	ci_stop_dpm(rdev);
5325	ci_enable_ds_master_switch(rdev, false);
5326	ci_enable_ulv(rdev, false);
5327	ci_clear_vc(rdev);
5328	ci_reset_to_default(rdev);
5329	ci_dpm_stop_smc(rdev);
5330	ci_force_switch_to_arb_f0(rdev);
5331	ci_enable_thermal_based_sclk_dpm(rdev, false);
5332
5333	ci_update_current_ps(rdev, boot_ps);
5334}
5335
5336int ci_dpm_set_power_state(struct radeon_device *rdev)
5337{
5338	struct ci_power_info *pi = ci_get_pi(rdev);
5339	struct radeon_ps *new_ps = &pi->requested_rps;
5340	struct radeon_ps *old_ps = &pi->current_rps;
5341	int ret;
5342
5343	ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
5344	if (pi->pcie_performance_request)
5345		ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
5346	ret = ci_freeze_sclk_mclk_dpm(rdev);
5347	if (ret) {
5348		DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5349		return ret;
5350	}
5351	ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
5352	if (ret) {
5353		DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5354		return ret;
5355	}
5356	ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
5357	if (ret) {
5358		DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5359		return ret;
5360	}
5361
5362	ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
5363	if (ret) {
5364		DRM_ERROR("ci_update_vce_dpm failed\n");
5365		return ret;
5366	}
5367
5368	ret = ci_update_sclk_t(rdev);
5369	if (ret) {
5370		DRM_ERROR("ci_update_sclk_t failed\n");
5371		return ret;
5372	}
5373	if (pi->caps_dynamic_ac_timing) {
5374		ret = ci_update_and_upload_mc_reg_table(rdev);
5375		if (ret) {
5376			DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5377			return ret;
5378		}
5379	}
5380	ret = ci_program_memory_timing_parameters(rdev);
5381	if (ret) {
5382		DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5383		return ret;
5384	}
5385	ret = ci_unfreeze_sclk_mclk_dpm(rdev);
5386	if (ret) {
5387		DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5388		return ret;
5389	}
5390	ret = ci_upload_dpm_level_enable_mask(rdev);
5391	if (ret) {
5392		DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5393		return ret;
5394	}
5395	if (pi->pcie_performance_request)
5396		ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
5397
5398	return 0;
5399}
5400
5401#if 0
5402void ci_dpm_reset_asic(struct radeon_device *rdev)
5403{
5404	ci_set_boot_state(rdev);
5405}
5406#endif
5407
5408void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
5409{
5410	ci_program_display_gap(rdev);
5411}
5412
5413union power_info {
5414	struct _ATOM_POWERPLAY_INFO info;
5415	struct _ATOM_POWERPLAY_INFO_V2 info_2;
5416	struct _ATOM_POWERPLAY_INFO_V3 info_3;
5417	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5418	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5419	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5420};
5421
5422union pplib_clock_info {
5423	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5424	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5425	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5426	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5427	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5428	struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5429};
5430
5431union pplib_power_state {
5432	struct _ATOM_PPLIB_STATE v1;
5433	struct _ATOM_PPLIB_STATE_V2 v2;
5434};
5435
5436static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5437					  struct radeon_ps *rps,
5438					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5439					  u8 table_rev)
5440{
5441	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5442	rps->class = le16_to_cpu(non_clock_info->usClassification);
5443	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5444
5445	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5446		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5447		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5448	} else {
5449		rps->vclk = 0;
5450		rps->dclk = 0;
5451	}
5452
5453	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5454		rdev->pm.dpm.boot_ps = rps;
5455	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5456		rdev->pm.dpm.uvd_ps = rps;
5457}
5458
5459static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5460				      struct radeon_ps *rps, int index,
5461				      union pplib_clock_info *clock_info)
5462{
5463	struct ci_power_info *pi = ci_get_pi(rdev);
5464	struct ci_ps *ps = ci_get_ps(rps);
5465	struct ci_pl *pl = &ps->performance_levels[index];
5466
5467	ps->performance_level_count = index + 1;
5468
5469	pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5470	pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5471	pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5472	pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5473
5474	pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5475						 pi->sys_pcie_mask,
5476						 pi->vbios_boot_state.pcie_gen_bootup_value,
5477						 clock_info->ci.ucPCIEGen);
5478	pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5479						   pi->vbios_boot_state.pcie_lane_bootup_value,
5480						   le16_to_cpu(clock_info->ci.usPCIELane));
5481
5482	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5483		pi->acpi_pcie_gen = pl->pcie_gen;
5484	}
5485
5486	if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5487		pi->ulv.supported = true;
5488		pi->ulv.pl = *pl;
5489		pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5490	}
5491
5492	/* patch up boot state */
5493	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5494		pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5495		pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5496		pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5497		pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5498	}
5499
5500	switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5501	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5502		pi->use_pcie_powersaving_levels = true;
5503		if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5504			pi->pcie_gen_powersaving.max = pl->pcie_gen;
5505		if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5506			pi->pcie_gen_powersaving.min = pl->pcie_gen;
5507		if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5508			pi->pcie_lane_powersaving.max = pl->pcie_lane;
5509		if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5510			pi->pcie_lane_powersaving.min = pl->pcie_lane;
5511		break;
5512	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5513		pi->use_pcie_performance_levels = true;
5514		if (pi->pcie_gen_performance.max < pl->pcie_gen)
5515			pi->pcie_gen_performance.max = pl->pcie_gen;
5516		if (pi->pcie_gen_performance.min > pl->pcie_gen)
5517			pi->pcie_gen_performance.min = pl->pcie_gen;
5518		if (pi->pcie_lane_performance.max < pl->pcie_lane)
5519			pi->pcie_lane_performance.max = pl->pcie_lane;
5520		if (pi->pcie_lane_performance.min > pl->pcie_lane)
5521			pi->pcie_lane_performance.min = pl->pcie_lane;
5522		break;
5523	default:
5524		break;
5525	}
5526}
5527
5528static int ci_parse_power_table(struct radeon_device *rdev)
5529{
5530	struct radeon_mode_info *mode_info = &rdev->mode_info;
5531	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5532	union pplib_power_state *power_state;
5533	int i, j, k, non_clock_array_index, clock_array_index;
5534	union pplib_clock_info *clock_info;
5535	struct _StateArray *state_array;
5536	struct _ClockInfoArray *clock_info_array;
5537	struct _NonClockInfoArray *non_clock_info_array;
5538	union power_info *power_info;
5539	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5540	u16 data_offset;
5541	u8 frev, crev;
5542	u8 *power_state_offset;
5543	struct ci_ps *ps;
5544	int ret;
5545
5546	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5547				   &frev, &crev, &data_offset))
5548		return -EINVAL;
5549	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5550
5551	state_array = (struct _StateArray *)
5552		(mode_info->atom_context->bios + data_offset +
5553		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5554	clock_info_array = (struct _ClockInfoArray *)
5555		(mode_info->atom_context->bios + data_offset +
5556		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5557	non_clock_info_array = (struct _NonClockInfoArray *)
5558		(mode_info->atom_context->bios + data_offset +
5559		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5560
5561	rdev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
5562				  sizeof(struct radeon_ps),
5563				  GFP_KERNEL);
5564	if (!rdev->pm.dpm.ps)
5565		return -ENOMEM;
5566	power_state_offset = (u8 *)state_array->states;
5567	rdev->pm.dpm.num_ps = 0;
5568	for (i = 0; i < state_array->ucNumEntries; i++) {
5569		u8 *idx;
5570		power_state = (union pplib_power_state *)power_state_offset;
5571		non_clock_array_index = power_state->v2.nonClockInfoIndex;
5572		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5573			&non_clock_info_array->nonClockInfo[non_clock_array_index];
5574		if (!rdev->pm.power_state[i].clock_info) {
5575			ret = -EINVAL;
5576			goto err_free_ps;
5577		}
5578		ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5579		if (ps == NULL) {
5580			ret = -ENOMEM;
5581			goto err_free_ps;
5582		}
5583		rdev->pm.dpm.ps[i].ps_priv = ps;
5584		ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5585					      non_clock_info,
5586					      non_clock_info_array->ucEntrySize);
5587		k = 0;
5588		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5589		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5590			clock_array_index = idx[j];
5591			if (clock_array_index >= clock_info_array->ucNumEntries)
5592				continue;
5593			if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5594				break;
5595			clock_info = (union pplib_clock_info *)
5596				((u8 *)&clock_info_array->clockInfo[0] +
5597				 (clock_array_index * clock_info_array->ucEntrySize));
5598			ci_parse_pplib_clock_info(rdev,
5599						  &rdev->pm.dpm.ps[i], k,
5600						  clock_info);
5601			k++;
5602		}
5603		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5604		rdev->pm.dpm.num_ps = i + 1;
5605	}
5606
5607	/* fill in the vce power states */
5608	for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5609		u32 sclk, mclk;
5610		clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5611		clock_info = (union pplib_clock_info *)
5612			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5613		sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5614		sclk |= clock_info->ci.ucEngineClockHigh << 16;
5615		mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5616		mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5617		rdev->pm.dpm.vce_states[i].sclk = sclk;
5618		rdev->pm.dpm.vce_states[i].mclk = mclk;
5619	}
5620
5621	return 0;
5622
5623err_free_ps:
5624	for (i = 0; i < rdev->pm.dpm.num_ps; i++)
5625		kfree(rdev->pm.dpm.ps[i].ps_priv);
5626	kfree(rdev->pm.dpm.ps);
5627	return ret;
5628}
5629
5630static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5631				    struct ci_vbios_boot_state *boot_state)
5632{
5633	struct radeon_mode_info *mode_info = &rdev->mode_info;
5634	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5635	ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5636	u8 frev, crev;
5637	u16 data_offset;
5638
5639	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5640				   &frev, &crev, &data_offset)) {
5641		firmware_info =
5642			(ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5643						    data_offset);
5644		boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5645		boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5646		boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5647		boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5648		boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5649		boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5650		boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5651
5652		return 0;
5653	}
5654	return -EINVAL;
5655}
5656
5657void ci_dpm_fini(struct radeon_device *rdev)
5658{
5659	int i;
5660
5661	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5662		kfree(rdev->pm.dpm.ps[i].ps_priv);
5663	}
5664	kfree(rdev->pm.dpm.ps);
5665	kfree(rdev->pm.dpm.priv);
5666	kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5667	r600_free_extended_power_table(rdev);
5668}
5669
5670int ci_dpm_init(struct radeon_device *rdev)
5671{
5672	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5673	SMU7_Discrete_DpmTable  *dpm_table;
5674	struct radeon_gpio_rec gpio;
5675	u16 data_offset, size;
5676	u8 frev, crev;
5677	struct ci_power_info *pi;
5678	enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
5679	struct pci_dev *root = rdev->pdev->bus->self;
5680	int ret;
5681
5682	pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5683	if (pi == NULL)
5684		return -ENOMEM;
5685	rdev->pm.dpm.priv = pi;
5686
5687	if (!pci_is_root_bus(rdev->pdev->bus))
5688		speed_cap = pcie_get_speed_cap(root);
5689	if (speed_cap == PCI_SPEED_UNKNOWN) {
5690		pi->sys_pcie_mask = 0;
5691	} else {
5692		if (speed_cap == PCIE_SPEED_8_0GT)
5693			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5694				RADEON_PCIE_SPEED_50 |
5695				RADEON_PCIE_SPEED_80;
5696		else if (speed_cap == PCIE_SPEED_5_0GT)
5697			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25 |
5698				RADEON_PCIE_SPEED_50;
5699		else
5700			pi->sys_pcie_mask = RADEON_PCIE_SPEED_25;
5701	}
5702	pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5703
5704	pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5705	pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5706	pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5707	pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5708
5709	pi->pcie_lane_performance.max = 0;
5710	pi->pcie_lane_performance.min = 16;
5711	pi->pcie_lane_powersaving.max = 0;
5712	pi->pcie_lane_powersaving.min = 16;
5713
5714	ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5715	if (ret) {
5716		kfree(rdev->pm.dpm.priv);
5717		return ret;
5718	}
5719
5720	ret = r600_get_platform_caps(rdev);
5721	if (ret) {
5722		kfree(rdev->pm.dpm.priv);
5723		return ret;
5724	}
5725
5726	ret = r600_parse_extended_power_table(rdev);
5727	if (ret) {
5728		kfree(rdev->pm.dpm.priv);
5729		return ret;
5730	}
5731
5732	ret = ci_parse_power_table(rdev);
5733	if (ret) {
5734		kfree(rdev->pm.dpm.priv);
5735		r600_free_extended_power_table(rdev);
5736		return ret;
5737	}
5738
5739	pi->dll_default_on = false;
5740	pi->sram_end = SMC_RAM_END;
5741
5742	pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5743	pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5744	pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5745	pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5746	pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5747	pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5748	pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5749	pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5750
5751	pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5752
5753	pi->sclk_dpm_key_disabled = 0;
5754	pi->mclk_dpm_key_disabled = 0;
5755	pi->pcie_dpm_key_disabled = 0;
5756	pi->thermal_sclk_dpm_enabled = 0;
5757
5758	/* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5759	if ((rdev->pdev->device == 0x6658) &&
5760	    (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
5761		pi->mclk_dpm_key_disabled = 1;
5762	}
5763
5764	pi->caps_sclk_ds = true;
5765
5766	pi->mclk_strobe_mode_threshold = 40000;
5767	pi->mclk_stutter_mode_threshold = 40000;
5768	pi->mclk_edc_enable_threshold = 40000;
5769	pi->mclk_edc_wr_enable_threshold = 40000;
5770
5771	ci_initialize_powertune_defaults(rdev);
5772
5773	pi->caps_fps = false;
5774
5775	pi->caps_sclk_throttle_low_notification = false;
5776
5777	pi->caps_uvd_dpm = true;
5778	pi->caps_vce_dpm = true;
5779
5780	ci_get_leakage_voltages(rdev);
5781	ci_patch_dependency_tables_with_leakage(rdev);
5782	ci_set_private_data_variables_based_on_pptable(rdev);
5783
5784	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5785		kcalloc(4,
5786			sizeof(struct radeon_clock_voltage_dependency_entry),
5787			GFP_KERNEL);
5788	if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5789		ci_dpm_fini(rdev);
5790		return -ENOMEM;
5791	}
5792	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5793	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5794	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5795	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5796	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5797	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5798	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5799	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5800	rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5801
5802	rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5803	rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5804	rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5805
5806	rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5807	rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5808	rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5809	rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5810
5811	if (rdev->family == CHIP_HAWAII) {
5812		pi->thermal_temp_setting.temperature_low = 94500;
5813		pi->thermal_temp_setting.temperature_high = 95000;
5814		pi->thermal_temp_setting.temperature_shutdown = 104000;
5815	} else {
5816		pi->thermal_temp_setting.temperature_low = 99500;
5817		pi->thermal_temp_setting.temperature_high = 100000;
5818		pi->thermal_temp_setting.temperature_shutdown = 104000;
5819	}
5820
5821	pi->uvd_enabled = false;
5822
5823	dpm_table = &pi->smc_state_table;
5824
5825	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5826	if (gpio.valid) {
5827		dpm_table->VRHotGpio = gpio.shift;
5828		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5829	} else {
5830		dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5831		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5832	}
5833
5834	gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5835	if (gpio.valid) {
5836		dpm_table->AcDcGpio = gpio.shift;
5837		rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5838	} else {
5839		dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5840		rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5841	}
5842
5843	gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5844	if (gpio.valid) {
5845		u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5846
5847		switch (gpio.shift) {
5848		case 0:
5849			tmp &= ~GNB_SLOW_MODE_MASK;
5850			tmp |= GNB_SLOW_MODE(1);
5851			break;
5852		case 1:
5853			tmp &= ~GNB_SLOW_MODE_MASK;
5854			tmp |= GNB_SLOW_MODE(2);
5855			break;
5856		case 2:
5857			tmp |= GNB_SLOW;
5858			break;
5859		case 3:
5860			tmp |= FORCE_NB_PS1;
5861			break;
5862		case 4:
5863			tmp |= DPM_ENABLED;
5864			break;
5865		default:
5866			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
5867			break;
5868		}
5869		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5870	}
5871
5872	pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5873	pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5874	pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5875	if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5876		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5877	else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5878		pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5879
5880	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5881		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5882			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5883		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5884			pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5885		else
5886			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5887	}
5888
5889	if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5890		if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5891			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5892		else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5893			pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5894		else
5895			rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5896	}
5897
5898	pi->vddc_phase_shed_control = true;
5899
5900#if defined(CONFIG_ACPI)
5901	pi->pcie_performance_request =
5902		radeon_acpi_is_pcie_performance_request_supported(rdev);
5903#else
5904	pi->pcie_performance_request = false;
5905#endif
5906
5907	if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5908				   &frev, &crev, &data_offset)) {
5909		pi->caps_sclk_ss_support = true;
5910		pi->caps_mclk_ss_support = true;
5911		pi->dynamic_ss = true;
5912	} else {
5913		pi->caps_sclk_ss_support = false;
5914		pi->caps_mclk_ss_support = false;
5915		pi->dynamic_ss = true;
5916	}
5917
5918	if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5919		pi->thermal_protection = true;
5920	else
5921		pi->thermal_protection = false;
5922
5923	pi->caps_dynamic_ac_timing = true;
5924
5925	pi->uvd_power_gated = false;
5926
5927	/* make sure dc limits are valid */
5928	if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5929	    (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5930		rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5931			rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5932
5933	pi->fan_ctrl_is_in_default_mode = true;
5934
5935	return 0;
5936}
5937
5938void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5939						    struct seq_file *m)
5940{
5941	struct ci_power_info *pi = ci_get_pi(rdev);
5942	struct radeon_ps *rps = &pi->current_rps;
5943	u32 sclk = ci_get_average_sclk_freq(rdev);
5944	u32 mclk = ci_get_average_mclk_freq(rdev);
5945
5946	seq_printf(m, "uvd    %sabled\n", pi->uvd_enabled ? "en" : "dis");
5947	seq_printf(m, "vce    %sabled\n", rps->vce_active ? "en" : "dis");
5948	seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
5949		   sclk, mclk);
5950}
5951
5952void ci_dpm_print_power_state(struct radeon_device *rdev,
5953			      struct radeon_ps *rps)
5954{
5955	struct ci_ps *ps = ci_get_ps(rps);
5956	struct ci_pl *pl;
5957	int i;
5958
5959	r600_dpm_print_class_info(rps->class, rps->class2);
5960	r600_dpm_print_cap_info(rps->caps);
5961	printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5962	for (i = 0; i < ps->performance_level_count; i++) {
5963		pl = &ps->performance_levels[i];
5964		printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5965		       i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5966	}
5967	r600_dpm_print_ps_status(rdev, rps);
5968}
5969
5970u32 ci_dpm_get_current_sclk(struct radeon_device *rdev)
5971{
5972	u32 sclk = ci_get_average_sclk_freq(rdev);
5973
5974	return sclk;
5975}
5976
5977u32 ci_dpm_get_current_mclk(struct radeon_device *rdev)
5978{
5979	u32 mclk = ci_get_average_mclk_freq(rdev);
5980
5981	return mclk;
5982}
5983
5984u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5985{
5986	struct ci_power_info *pi = ci_get_pi(rdev);
5987	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5988
5989	if (low)
5990		return requested_state->performance_levels[0].sclk;
5991	else
5992		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5993}
5994
5995u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5996{
5997	struct ci_power_info *pi = ci_get_pi(rdev);
5998	struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5999
6000	if (low)
6001		return requested_state->performance_levels[0].mclk;
6002	else
6003		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6004}
6005