1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "soc15_common.h"
35 #include "smu_v11_0.h"
36 #include "smu11_driver_if_navi10.h"
37 #include "atom.h"
38 #include "navi10_ppt.h"
39 #include "smu_v11_0_pptable.h"
40 #include "smu_v11_0_ppsmc.h"
41 #include "nbio/nbio_2_3_offset.h"
42 #include "nbio/nbio_2_3_sh_mask.h"
43 #include "thm/thm_11_0_2_offset.h"
44 #include "thm/thm_11_0_2_sh_mask.h"
45
46 #include "asic_reg/mp/mp_11_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "smu_11_0_cdr_table.h"
49
50 /*
51 * DO NOT use these for err/warn/info/debug messages.
52 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53 * They are more MGPU friendly.
54 */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64 FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
65 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \
66 FEATURE_MASK(FEATURE_DPM_GFX_PACE_BIT) | \
67 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \
68 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) | \
70 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
71 FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
72
73 static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
77 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0),
78 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0),
79 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0),
80 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0),
81 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1),
82 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1),
83 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1),
84 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1),
85 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
86 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
87 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
88 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
89 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
90 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
91 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
92 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
93 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
94 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
95 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
96 MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0),
97 MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0),
98 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
99 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
100 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
101 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
102 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
103 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
104 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
105 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
106 MSG_MAP(SetMemoryChannelConfig, PPSMC_MSG_SetMemoryChannelConfig, 0),
107 MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0),
108 MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0),
109 MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0),
110 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
111 MSG_MAP(SetMinDeepSleepDcefclk, PPSMC_MSG_SetMinDeepSleepDcefclk, 0),
112 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
113 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
114 MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0),
115 MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0),
116 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
117 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
118 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
119 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
120 MSG_MAP(ConfigureGfxDidt, PPSMC_MSG_ConfigureGfxDidt, 0),
121 MSG_MAP(NumOfDisplays, PPSMC_MSG_NumOfDisplays, 0),
122 MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh, 0),
123 MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow, 0),
124 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
125 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
126 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
127 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
128 MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData, 0),
129 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
130 MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset, 0),
131 MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown, 0),
132 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
133 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
134 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
135 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
136 MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0),
137 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
138 MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange, 0),
139 MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange, 0),
140 MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm, 0),
141 MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive, 0),
142 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
143 MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH, PPSMC_MSG_SetDriverDummyTableDramAddrHigh, 0),
144 MSG_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW, PPSMC_MSG_SetDriverDummyTableDramAddrLow, 0),
145 MSG_MAP(GET_UMC_FW_WA, PPSMC_MSG_GetUMCFWWA, 0),
146 };
147
148 static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
149 CLK_MAP(GFXCLK, PPCLK_GFXCLK),
150 CLK_MAP(SCLK, PPCLK_GFXCLK),
151 CLK_MAP(SOCCLK, PPCLK_SOCCLK),
152 CLK_MAP(FCLK, PPCLK_SOCCLK),
153 CLK_MAP(UCLK, PPCLK_UCLK),
154 CLK_MAP(MCLK, PPCLK_UCLK),
155 CLK_MAP(DCLK, PPCLK_DCLK),
156 CLK_MAP(VCLK, PPCLK_VCLK),
157 CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
158 CLK_MAP(DISPCLK, PPCLK_DISPCLK),
159 CLK_MAP(PIXCLK, PPCLK_PIXCLK),
160 CLK_MAP(PHYCLK, PPCLK_PHYCLK),
161 };
162
163 static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
164 FEA_MAP(DPM_PREFETCHER),
165 FEA_MAP(DPM_GFXCLK),
166 FEA_MAP(DPM_GFX_PACE),
167 FEA_MAP(DPM_UCLK),
168 FEA_MAP(DPM_SOCCLK),
169 FEA_MAP(DPM_MP0CLK),
170 FEA_MAP(DPM_LINK),
171 FEA_MAP(DPM_DCEFCLK),
172 FEA_MAP(MEM_VDDCI_SCALING),
173 FEA_MAP(MEM_MVDD_SCALING),
174 FEA_MAP(DS_GFXCLK),
175 FEA_MAP(DS_SOCCLK),
176 FEA_MAP(DS_LCLK),
177 FEA_MAP(DS_DCEFCLK),
178 FEA_MAP(DS_UCLK),
179 FEA_MAP(GFX_ULV),
180 FEA_MAP(FW_DSTATE),
181 FEA_MAP(GFXOFF),
182 FEA_MAP(BACO),
183 FEA_MAP(VCN_PG),
184 FEA_MAP(JPEG_PG),
185 FEA_MAP(USB_PG),
186 FEA_MAP(RSMU_SMN_CG),
187 FEA_MAP(PPT),
188 FEA_MAP(TDC),
189 FEA_MAP(GFX_EDC),
190 FEA_MAP(APCC_PLUS),
191 FEA_MAP(GTHR),
192 FEA_MAP(ACDC),
193 FEA_MAP(VR0HOT),
194 FEA_MAP(VR1HOT),
195 FEA_MAP(FW_CTF),
196 FEA_MAP(FAN_CONTROL),
197 FEA_MAP(THERMAL),
198 FEA_MAP(GFX_DCS),
199 FEA_MAP(RM),
200 FEA_MAP(LED_DISPLAY),
201 FEA_MAP(GFX_SS),
202 FEA_MAP(OUT_OF_BAND_MONITOR),
203 FEA_MAP(TEMP_DEPENDENT_VMIN),
204 FEA_MAP(MMHUB_PG),
205 FEA_MAP(ATHUB_PG),
206 FEA_MAP(APCC_DFLL),
207 };
208
209 static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
210 TAB_MAP(PPTABLE),
211 TAB_MAP(WATERMARKS),
212 TAB_MAP(AVFS),
213 TAB_MAP(AVFS_PSM_DEBUG),
214 TAB_MAP(AVFS_FUSE_OVERRIDE),
215 TAB_MAP(PMSTATUSLOG),
216 TAB_MAP(SMU_METRICS),
217 TAB_MAP(DRIVER_SMU_CONFIG),
218 TAB_MAP(ACTIVITY_MONITOR_COEFF),
219 TAB_MAP(OVERDRIVE),
220 TAB_MAP(I2C_COMMANDS),
221 TAB_MAP(PACE),
222 };
223
224 static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
225 PWR_MAP(AC),
226 PWR_MAP(DC),
227 };
228
229 static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
230 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT),
231 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
232 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT),
233 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
234 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
235 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
236 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
237 };
238
is_asic_secure(struct smu_context *smu)239 static bool is_asic_secure(struct smu_context *smu)
240 {
241 struct amdgpu_device *adev = smu->adev;
242 bool is_secure = true;
243 uint32_t mp0_fw_intf;
244
245 mp0_fw_intf = RREG32_PCIE(MP0_Public |
246 (smnMP0_FW_INTF & 0xffffffff));
247
248 if (!(mp0_fw_intf & (1 << 19)))
249 is_secure = false;
250
251 return is_secure;
252 }
253
254 static int
navi10_get_allowed_feature_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num)255 navi10_get_allowed_feature_mask(struct smu_context *smu,
256 uint32_t *feature_mask, uint32_t num)
257 {
258 struct amdgpu_device *adev = smu->adev;
259
260 if (num > 2)
261 return -EINVAL;
262
263 memset(feature_mask, 0, sizeof(uint32_t) * num);
264
265 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
266 | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
267 | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT)
268 | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
269 | FEATURE_MASK(FEATURE_PPT_BIT)
270 | FEATURE_MASK(FEATURE_TDC_BIT)
271 | FEATURE_MASK(FEATURE_GFX_EDC_BIT)
272 | FEATURE_MASK(FEATURE_APCC_PLUS_BIT)
273 | FEATURE_MASK(FEATURE_VR0HOT_BIT)
274 | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT)
275 | FEATURE_MASK(FEATURE_THERMAL_BIT)
276 | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT)
277 | FEATURE_MASK(FEATURE_DS_LCLK_BIT)
278 | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
279 | FEATURE_MASK(FEATURE_FW_DSTATE_BIT)
280 | FEATURE_MASK(FEATURE_BACO_BIT)
281 | FEATURE_MASK(FEATURE_GFX_SS_BIT)
282 | FEATURE_MASK(FEATURE_APCC_DFLL_BIT)
283 | FEATURE_MASK(FEATURE_FW_CTF_BIT)
284 | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
285
286 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK)
287 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
288
289 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK)
290 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT);
291
292 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
293 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT);
294
295 if (adev->pm.pp_feature & PP_ULV_MASK)
296 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
297
298 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
299 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
300
301 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
302 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
303
304 if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
305 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT);
306
307 if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
308 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_PG_BIT);
309
310 if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
311 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VCN_PG_BIT);
312
313 if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_JPEG_PG_BIT);
315
316 if (smu->dc_controlled_by_gpio)
317 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
318
319 if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
320 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
321
322 /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
323 if (!(is_asic_secure(smu) &&
324 (adev->asic_type == CHIP_NAVI10) &&
325 (adev->rev_id == 0)) &&
326 (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
327 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
328 | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
329 | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
330
331 /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
332 if (is_asic_secure(smu) &&
333 (adev->asic_type == CHIP_NAVI10) &&
334 (adev->rev_id == 0))
335 *(uint64_t *)feature_mask &=
336 ~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
337
338 return 0;
339 }
340
navi10_check_bxco_support(struct smu_context *smu)341 static void navi10_check_bxco_support(struct smu_context *smu)
342 {
343 struct smu_table_context *table_context = &smu->smu_table;
344 struct smu_11_0_powerplay_table *powerplay_table =
345 table_context->power_play_table;
346 struct smu_baco_context *smu_baco = &smu->smu_baco;
347 struct amdgpu_device *adev = smu->adev;
348 uint32_t val;
349
350 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_BACO ||
351 powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_MACO) {
352 val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
353 smu_baco->platform_support =
354 (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
355 false;
356 }
357 }
358
navi10_check_powerplay_table(struct smu_context *smu)359 static int navi10_check_powerplay_table(struct smu_context *smu)
360 {
361 struct smu_table_context *table_context = &smu->smu_table;
362 struct smu_11_0_powerplay_table *powerplay_table =
363 table_context->power_play_table;
364
365 if (powerplay_table->platform_caps & SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC)
366 smu->dc_controlled_by_gpio = true;
367
368 navi10_check_bxco_support(smu);
369
370 table_context->thermal_controller_type =
371 powerplay_table->thermal_controller_type;
372
373 /*
374 * Instead of having its own buffer space and get overdrive_table copied,
375 * smu->od_settings just points to the actual overdrive_table
376 */
377 smu->od_settings = &powerplay_table->overdrive_table;
378
379 return 0;
380 }
381
navi10_append_powerplay_table(struct smu_context *smu)382 static int navi10_append_powerplay_table(struct smu_context *smu)
383 {
384 struct amdgpu_device *adev = smu->adev;
385 struct smu_table_context *table_context = &smu->smu_table;
386 PPTable_t *smc_pptable = table_context->driver_pptable;
387 struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
388 struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
389 int index, ret;
390
391 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
392 smc_dpm_info);
393
394 ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
395 (uint8_t **)&smc_dpm_table);
396 if (ret)
397 return ret;
398
399 dev_info(adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
400 smc_dpm_table->table_header.format_revision,
401 smc_dpm_table->table_header.content_revision);
402
403 if (smc_dpm_table->table_header.format_revision != 4) {
404 dev_err(adev->dev, "smc_dpm_info table format revision is not 4!\n");
405 return -EINVAL;
406 }
407
408 switch (smc_dpm_table->table_header.content_revision) {
409 case 5: /* nv10 and nv14 */
410 memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
411 sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
412 break;
413 case 7: /* nv12 */
414 ret = amdgpu_atombios_get_data_table(adev, index, NULL, NULL, NULL,
415 (uint8_t **)&smc_dpm_table_v4_7);
416 if (ret)
417 return ret;
418 memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
419 sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
420 break;
421 default:
422 dev_err(smu->adev->dev, "smc_dpm_info with unsupported content revision %d!\n",
423 smc_dpm_table->table_header.content_revision);
424 return -EINVAL;
425 }
426
427 if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
428 /* TODO: remove it once SMU fw fix it */
429 smc_pptable->DebugOverrides |= DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN;
430 }
431
432 return 0;
433 }
434
navi10_store_powerplay_table(struct smu_context *smu)435 static int navi10_store_powerplay_table(struct smu_context *smu)
436 {
437 struct smu_table_context *table_context = &smu->smu_table;
438 struct smu_11_0_powerplay_table *powerplay_table =
439 table_context->power_play_table;
440
441 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
442 sizeof(PPTable_t));
443
444 return 0;
445 }
446
navi10_setup_pptable(struct smu_context *smu)447 static int navi10_setup_pptable(struct smu_context *smu)
448 {
449 int ret = 0;
450
451 ret = smu_v11_0_setup_pptable(smu);
452 if (ret)
453 return ret;
454
455 ret = navi10_store_powerplay_table(smu);
456 if (ret)
457 return ret;
458
459 ret = navi10_append_powerplay_table(smu);
460 if (ret)
461 return ret;
462
463 ret = navi10_check_powerplay_table(smu);
464 if (ret)
465 return ret;
466
467 return ret;
468 }
469
navi10_tables_init(struct smu_context *smu)470 static int navi10_tables_init(struct smu_context *smu)
471 {
472 struct smu_table_context *smu_table = &smu->smu_table;
473 struct smu_table *tables = smu_table->tables;
474 struct amdgpu_device *adev = smu->adev;
475
476 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
477 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
478 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
479 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
480 if (adev->asic_type == CHIP_NAVI12)
481 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t),
482 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
483 else
484 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
485 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
486 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
487 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
488 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
489 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
490 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
491 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
492 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
493 sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
494 AMDGPU_GEM_DOMAIN_VRAM);
495
496 smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ?
497 sizeof(SmuMetrics_NV12_t) :
498 sizeof(SmuMetrics_t), GFP_KERNEL);
499 if (!smu_table->metrics_table)
500 goto err0_out;
501 smu_table->metrics_time = 0;
502
503 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
504 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
505 if (!smu_table->gpu_metrics_table)
506 goto err1_out;
507
508 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
509 if (!smu_table->watermarks_table)
510 goto err2_out;
511
512 return 0;
513
514 err2_out:
515 kfree(smu_table->gpu_metrics_table);
516 err1_out:
517 kfree(smu_table->metrics_table);
518 err0_out:
519 return -ENOMEM;
520 }
521
navi10_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, uint32_t *value)522 static int navi10_get_smu_metrics_data(struct smu_context *smu,
523 MetricsMember_t member,
524 uint32_t *value)
525 {
526 struct smu_table_context *smu_table= &smu->smu_table;
527 /*
528 * This works for NV12 also. As although NV12 uses a different
529 * SmuMetrics structure from other NV1X ASICs, they share the
530 * same offsets for the heading parts(those members used here).
531 */
532 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
533 int ret = 0;
534
535 mutex_lock(&smu->metrics_lock);
536
537 ret = smu_cmn_get_metrics_table_locked(smu,
538 NULL,
539 false);
540 if (ret) {
541 mutex_unlock(&smu->metrics_lock);
542 return ret;
543 }
544
545 switch (member) {
546 case METRICS_CURR_GFXCLK:
547 *value = metrics->CurrClock[PPCLK_GFXCLK];
548 break;
549 case METRICS_CURR_SOCCLK:
550 *value = metrics->CurrClock[PPCLK_SOCCLK];
551 break;
552 case METRICS_CURR_UCLK:
553 *value = metrics->CurrClock[PPCLK_UCLK];
554 break;
555 case METRICS_CURR_VCLK:
556 *value = metrics->CurrClock[PPCLK_VCLK];
557 break;
558 case METRICS_CURR_DCLK:
559 *value = metrics->CurrClock[PPCLK_DCLK];
560 break;
561 case METRICS_CURR_DCEFCLK:
562 *value = metrics->CurrClock[PPCLK_DCEFCLK];
563 break;
564 case METRICS_AVERAGE_GFXCLK:
565 *value = metrics->AverageGfxclkFrequency;
566 break;
567 case METRICS_AVERAGE_SOCCLK:
568 *value = metrics->AverageSocclkFrequency;
569 break;
570 case METRICS_AVERAGE_UCLK:
571 *value = metrics->AverageUclkFrequency;
572 break;
573 case METRICS_AVERAGE_GFXACTIVITY:
574 *value = metrics->AverageGfxActivity;
575 break;
576 case METRICS_AVERAGE_MEMACTIVITY:
577 *value = metrics->AverageUclkActivity;
578 break;
579 case METRICS_AVERAGE_SOCKETPOWER:
580 *value = metrics->AverageSocketPower << 8;
581 break;
582 case METRICS_TEMPERATURE_EDGE:
583 *value = metrics->TemperatureEdge *
584 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
585 break;
586 case METRICS_TEMPERATURE_HOTSPOT:
587 *value = metrics->TemperatureHotspot *
588 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
589 break;
590 case METRICS_TEMPERATURE_MEM:
591 *value = metrics->TemperatureMem *
592 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
593 break;
594 case METRICS_TEMPERATURE_VRGFX:
595 *value = metrics->TemperatureVrGfx *
596 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
597 break;
598 case METRICS_TEMPERATURE_VRSOC:
599 *value = metrics->TemperatureVrSoc *
600 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
601 break;
602 case METRICS_THROTTLER_STATUS:
603 *value = metrics->ThrottlerStatus;
604 break;
605 case METRICS_CURR_FANSPEED:
606 *value = metrics->CurrFanSpeed;
607 break;
608 default:
609 *value = UINT_MAX;
610 break;
611 }
612
613 mutex_unlock(&smu->metrics_lock);
614
615 return ret;
616 }
617
navi10_allocate_dpm_context(struct smu_context *smu)618 static int navi10_allocate_dpm_context(struct smu_context *smu)
619 {
620 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
621
622 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
623 GFP_KERNEL);
624 if (!smu_dpm->dpm_context)
625 return -ENOMEM;
626
627 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
628
629 return 0;
630 }
631
navi10_init_smc_tables(struct smu_context *smu)632 static int navi10_init_smc_tables(struct smu_context *smu)
633 {
634 int ret = 0;
635
636 ret = navi10_tables_init(smu);
637 if (ret)
638 return ret;
639
640 ret = navi10_allocate_dpm_context(smu);
641 if (ret)
642 return ret;
643
644 return smu_v11_0_init_smc_tables(smu);
645 }
646
navi10_set_default_dpm_table(struct smu_context *smu)647 static int navi10_set_default_dpm_table(struct smu_context *smu)
648 {
649 struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
650 PPTable_t *driver_ppt = smu->smu_table.driver_pptable;
651 struct smu_11_0_dpm_table *dpm_table;
652 int ret = 0;
653
654 /* socclk dpm table setup */
655 dpm_table = &dpm_context->dpm_tables.soc_table;
656 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
657 ret = smu_v11_0_set_single_dpm_table(smu,
658 SMU_SOCCLK,
659 dpm_table);
660 if (ret)
661 return ret;
662 dpm_table->is_fine_grained =
663 !driver_ppt->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete;
664 } else {
665 dpm_table->count = 1;
666 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
667 dpm_table->dpm_levels[0].enabled = true;
668 dpm_table->min = dpm_table->dpm_levels[0].value;
669 dpm_table->max = dpm_table->dpm_levels[0].value;
670 }
671
672 /* gfxclk dpm table setup */
673 dpm_table = &dpm_context->dpm_tables.gfx_table;
674 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
675 ret = smu_v11_0_set_single_dpm_table(smu,
676 SMU_GFXCLK,
677 dpm_table);
678 if (ret)
679 return ret;
680 dpm_table->is_fine_grained =
681 !driver_ppt->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete;
682 } else {
683 dpm_table->count = 1;
684 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
685 dpm_table->dpm_levels[0].enabled = true;
686 dpm_table->min = dpm_table->dpm_levels[0].value;
687 dpm_table->max = dpm_table->dpm_levels[0].value;
688 }
689
690 /* uclk dpm table setup */
691 dpm_table = &dpm_context->dpm_tables.uclk_table;
692 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
693 ret = smu_v11_0_set_single_dpm_table(smu,
694 SMU_UCLK,
695 dpm_table);
696 if (ret)
697 return ret;
698 dpm_table->is_fine_grained =
699 !driver_ppt->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete;
700 } else {
701 dpm_table->count = 1;
702 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
703 dpm_table->dpm_levels[0].enabled = true;
704 dpm_table->min = dpm_table->dpm_levels[0].value;
705 dpm_table->max = dpm_table->dpm_levels[0].value;
706 }
707
708 /* vclk dpm table setup */
709 dpm_table = &dpm_context->dpm_tables.vclk_table;
710 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
711 ret = smu_v11_0_set_single_dpm_table(smu,
712 SMU_VCLK,
713 dpm_table);
714 if (ret)
715 return ret;
716 dpm_table->is_fine_grained =
717 !driver_ppt->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete;
718 } else {
719 dpm_table->count = 1;
720 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
721 dpm_table->dpm_levels[0].enabled = true;
722 dpm_table->min = dpm_table->dpm_levels[0].value;
723 dpm_table->max = dpm_table->dpm_levels[0].value;
724 }
725
726 /* dclk dpm table setup */
727 dpm_table = &dpm_context->dpm_tables.dclk_table;
728 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
729 ret = smu_v11_0_set_single_dpm_table(smu,
730 SMU_DCLK,
731 dpm_table);
732 if (ret)
733 return ret;
734 dpm_table->is_fine_grained =
735 !driver_ppt->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete;
736 } else {
737 dpm_table->count = 1;
738 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
739 dpm_table->dpm_levels[0].enabled = true;
740 dpm_table->min = dpm_table->dpm_levels[0].value;
741 dpm_table->max = dpm_table->dpm_levels[0].value;
742 }
743
744 /* dcefclk dpm table setup */
745 dpm_table = &dpm_context->dpm_tables.dcef_table;
746 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
747 ret = smu_v11_0_set_single_dpm_table(smu,
748 SMU_DCEFCLK,
749 dpm_table);
750 if (ret)
751 return ret;
752 dpm_table->is_fine_grained =
753 !driver_ppt->DpmDescriptor[PPCLK_DCEFCLK].SnapToDiscrete;
754 } else {
755 dpm_table->count = 1;
756 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
757 dpm_table->dpm_levels[0].enabled = true;
758 dpm_table->min = dpm_table->dpm_levels[0].value;
759 dpm_table->max = dpm_table->dpm_levels[0].value;
760 }
761
762 /* pixelclk dpm table setup */
763 dpm_table = &dpm_context->dpm_tables.pixel_table;
764 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
765 ret = smu_v11_0_set_single_dpm_table(smu,
766 SMU_PIXCLK,
767 dpm_table);
768 if (ret)
769 return ret;
770 dpm_table->is_fine_grained =
771 !driver_ppt->DpmDescriptor[PPCLK_PIXCLK].SnapToDiscrete;
772 } else {
773 dpm_table->count = 1;
774 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
775 dpm_table->dpm_levels[0].enabled = true;
776 dpm_table->min = dpm_table->dpm_levels[0].value;
777 dpm_table->max = dpm_table->dpm_levels[0].value;
778 }
779
780 /* displayclk dpm table setup */
781 dpm_table = &dpm_context->dpm_tables.display_table;
782 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
783 ret = smu_v11_0_set_single_dpm_table(smu,
784 SMU_DISPCLK,
785 dpm_table);
786 if (ret)
787 return ret;
788 dpm_table->is_fine_grained =
789 !driver_ppt->DpmDescriptor[PPCLK_DISPCLK].SnapToDiscrete;
790 } else {
791 dpm_table->count = 1;
792 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
793 dpm_table->dpm_levels[0].enabled = true;
794 dpm_table->min = dpm_table->dpm_levels[0].value;
795 dpm_table->max = dpm_table->dpm_levels[0].value;
796 }
797
798 /* phyclk dpm table setup */
799 dpm_table = &dpm_context->dpm_tables.phy_table;
800 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
801 ret = smu_v11_0_set_single_dpm_table(smu,
802 SMU_PHYCLK,
803 dpm_table);
804 if (ret)
805 return ret;
806 dpm_table->is_fine_grained =
807 !driver_ppt->DpmDescriptor[PPCLK_PHYCLK].SnapToDiscrete;
808 } else {
809 dpm_table->count = 1;
810 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
811 dpm_table->dpm_levels[0].enabled = true;
812 dpm_table->min = dpm_table->dpm_levels[0].value;
813 dpm_table->max = dpm_table->dpm_levels[0].value;
814 }
815
816 return 0;
817 }
818
navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)819 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
820 {
821 int ret = 0;
822
823 if (enable) {
824 /* vcn dpm on is a prerequisite for vcn power gate messages */
825 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
826 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
827 if (ret)
828 return ret;
829 }
830 } else {
831 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
832 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
833 if (ret)
834 return ret;
835 }
836 }
837
838 return ret;
839 }
840
navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)841 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
842 {
843 int ret = 0;
844
845 if (enable) {
846 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
847 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
848 if (ret)
849 return ret;
850 }
851 } else {
852 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
853 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
854 if (ret)
855 return ret;
856 }
857 }
858
859 return ret;
860 }
861
navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value)862 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
863 enum smu_clk_type clk_type,
864 uint32_t *value)
865 {
866 MetricsMember_t member_type;
867 int clk_id = 0;
868
869 clk_id = smu_cmn_to_asic_specific_index(smu,
870 CMN2ASIC_MAPPING_CLK,
871 clk_type);
872 if (clk_id < 0)
873 return clk_id;
874
875 switch (clk_id) {
876 case PPCLK_GFXCLK:
877 member_type = METRICS_CURR_GFXCLK;
878 break;
879 case PPCLK_UCLK:
880 member_type = METRICS_CURR_UCLK;
881 break;
882 case PPCLK_SOCCLK:
883 member_type = METRICS_CURR_SOCCLK;
884 break;
885 case PPCLK_VCLK:
886 member_type = METRICS_CURR_VCLK;
887 break;
888 case PPCLK_DCLK:
889 member_type = METRICS_CURR_DCLK;
890 break;
891 case PPCLK_DCEFCLK:
892 member_type = METRICS_CURR_DCEFCLK;
893 break;
894 default:
895 return -EINVAL;
896 }
897
898 return navi10_get_smu_metrics_data(smu,
899 member_type,
900 value);
901 }
902
navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)903 static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
904 {
905 PPTable_t *pptable = smu->smu_table.driver_pptable;
906 DpmDescriptor_t *dpm_desc = NULL;
907 uint32_t clk_index = 0;
908
909 clk_index = smu_cmn_to_asic_specific_index(smu,
910 CMN2ASIC_MAPPING_CLK,
911 clk_type);
912 dpm_desc = &pptable->DpmDescriptor[clk_index];
913
914 /* 0 - Fine grained DPM, 1 - Discrete DPM */
915 return dpm_desc->SnapToDiscrete == 0 ? true : false;
916 }
917
navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)918 static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
919 {
920 return od_table->cap[cap];
921 }
922
navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t *min, uint32_t *max)923 static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
924 enum SMU_11_0_ODSETTING_ID setting,
925 uint32_t *min, uint32_t *max)
926 {
927 if (min)
928 *min = od_table->min[setting];
929 if (max)
930 *max = od_table->max[setting];
931 }
932
navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)933 static int navi10_print_clk_levels(struct smu_context *smu,
934 enum smu_clk_type clk_type, char *buf)
935 {
936 uint16_t *curve_settings;
937 int i, size = 0, ret = 0;
938 uint32_t cur_value = 0, value = 0, count = 0;
939 uint32_t freq_values[3] = {0};
940 uint32_t mark_index = 0;
941 struct smu_table_context *table_context = &smu->smu_table;
942 uint32_t gen_speed, lane_width;
943 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
944 struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
945 PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
946 OverDriveTable_t *od_table =
947 (OverDriveTable_t *)table_context->overdrive_table;
948 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
949 uint32_t min_value, max_value;
950
951 switch (clk_type) {
952 case SMU_GFXCLK:
953 case SMU_SCLK:
954 case SMU_SOCCLK:
955 case SMU_MCLK:
956 case SMU_UCLK:
957 case SMU_FCLK:
958 case SMU_DCEFCLK:
959 ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
960 if (ret)
961 return size;
962
963 ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
964 if (ret)
965 return size;
966
967 if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
968 for (i = 0; i < count; i++) {
969 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
970 if (ret)
971 return size;
972
973 size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
974 cur_value == value ? "*" : "");
975 }
976 } else {
977 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
978 if (ret)
979 return size;
980 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
981 if (ret)
982 return size;
983
984 freq_values[1] = cur_value;
985 mark_index = cur_value == freq_values[0] ? 0 :
986 cur_value == freq_values[2] ? 2 : 1;
987 if (mark_index != 1)
988 freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
989
990 for (i = 0; i < 3; i++) {
991 size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
992 i == mark_index ? "*" : "");
993 }
994
995 }
996 break;
997 case SMU_PCIE:
998 gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
999 lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
1000 for (i = 0; i < NUM_LINK_LEVELS; i++)
1001 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
1002 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
1003 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
1004 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
1005 (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
1006 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
1007 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
1008 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
1009 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
1010 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
1011 (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
1012 pptable->LclkFreq[i],
1013 (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
1014 (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
1015 "*" : "");
1016 break;
1017 case SMU_OD_SCLK:
1018 if (!smu->od_enabled || !od_table || !od_settings)
1019 break;
1020 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
1021 break;
1022 size += sprintf(buf + size, "OD_SCLK:\n");
1023 size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1024 break;
1025 case SMU_OD_MCLK:
1026 if (!smu->od_enabled || !od_table || !od_settings)
1027 break;
1028 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
1029 break;
1030 size += sprintf(buf + size, "OD_MCLK:\n");
1031 size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
1032 break;
1033 case SMU_OD_VDDC_CURVE:
1034 if (!smu->od_enabled || !od_table || !od_settings)
1035 break;
1036 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
1037 break;
1038 size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
1039 for (i = 0; i < 3; i++) {
1040 switch (i) {
1041 case 0:
1042 curve_settings = &od_table->GfxclkFreq1;
1043 break;
1044 case 1:
1045 curve_settings = &od_table->GfxclkFreq2;
1046 break;
1047 case 2:
1048 curve_settings = &od_table->GfxclkFreq3;
1049 break;
1050 default:
1051 break;
1052 }
1053 size += sprintf(buf + size, "%d: %uMHz %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
1054 }
1055 break;
1056 case SMU_OD_RANGE:
1057 if (!smu->od_enabled || !od_table || !od_settings)
1058 break;
1059 size = sprintf(buf, "%s:\n", "OD_RANGE");
1060
1061 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
1062 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
1063 &min_value, NULL);
1064 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
1065 NULL, &max_value);
1066 size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
1067 min_value, max_value);
1068 }
1069
1070 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
1071 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
1072 &min_value, &max_value);
1073 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
1074 min_value, max_value);
1075 }
1076
1077 if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
1078 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
1079 &min_value, &max_value);
1080 size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
1081 min_value, max_value);
1082 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
1083 &min_value, &max_value);
1084 size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
1085 min_value, max_value);
1086 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
1087 &min_value, &max_value);
1088 size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
1089 min_value, max_value);
1090 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
1091 &min_value, &max_value);
1092 size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
1093 min_value, max_value);
1094 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
1095 &min_value, &max_value);
1096 size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
1097 min_value, max_value);
1098 navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
1099 &min_value, &max_value);
1100 size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
1101 min_value, max_value);
1102 }
1103
1104 break;
1105 default:
1106 break;
1107 }
1108
1109 return size;
1110 }
1111
navi10_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask)1112 static int navi10_force_clk_levels(struct smu_context *smu,
1113 enum smu_clk_type clk_type, uint32_t mask)
1114 {
1115
1116 int ret = 0, size = 0;
1117 uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
1118
1119 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1120 soft_max_level = mask ? (fls(mask) - 1) : 0;
1121
1122 switch (clk_type) {
1123 case SMU_GFXCLK:
1124 case SMU_SCLK:
1125 case SMU_SOCCLK:
1126 case SMU_MCLK:
1127 case SMU_UCLK:
1128 case SMU_FCLK:
1129 /* There is only 2 levels for fine grained DPM */
1130 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
1131 soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1132 soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1133 }
1134
1135 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
1136 if (ret)
1137 return size;
1138
1139 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
1140 if (ret)
1141 return size;
1142
1143 ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1144 if (ret)
1145 return size;
1146 break;
1147 case SMU_DCEFCLK:
1148 dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
1149 break;
1150
1151 default:
1152 break;
1153 }
1154
1155 return size;
1156 }
1157
navi10_populate_umd_state_clk(struct smu_context *smu)1158 static int navi10_populate_umd_state_clk(struct smu_context *smu)
1159 {
1160 struct smu_11_0_dpm_context *dpm_context =
1161 smu->smu_dpm.dpm_context;
1162 struct smu_11_0_dpm_table *gfx_table =
1163 &dpm_context->dpm_tables.gfx_table;
1164 struct smu_11_0_dpm_table *mem_table =
1165 &dpm_context->dpm_tables.uclk_table;
1166 struct smu_11_0_dpm_table *soc_table =
1167 &dpm_context->dpm_tables.soc_table;
1168 struct smu_umd_pstate_table *pstate_table =
1169 &smu->pstate_table;
1170 struct amdgpu_device *adev = smu->adev;
1171 uint32_t sclk_freq;
1172
1173 pstate_table->gfxclk_pstate.min = gfx_table->min;
1174 switch (adev->asic_type) {
1175 case CHIP_NAVI10:
1176 switch (adev->pdev->revision) {
1177 case 0xf0: /* XTX */
1178 case 0xc0:
1179 sclk_freq = NAVI10_PEAK_SCLK_XTX;
1180 break;
1181 case 0xf1: /* XT */
1182 case 0xc1:
1183 sclk_freq = NAVI10_PEAK_SCLK_XT;
1184 break;
1185 default: /* XL */
1186 sclk_freq = NAVI10_PEAK_SCLK_XL;
1187 break;
1188 }
1189 break;
1190 case CHIP_NAVI14:
1191 switch (adev->pdev->revision) {
1192 case 0xc7: /* XT */
1193 case 0xf4:
1194 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK;
1195 break;
1196 case 0xc1: /* XTM */
1197 case 0xf2:
1198 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK;
1199 break;
1200 case 0xc3: /* XLM */
1201 case 0xf3:
1202 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1203 break;
1204 case 0xc5: /* XTX */
1205 case 0xf6:
1206 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK;
1207 break;
1208 default: /* XL */
1209 sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK;
1210 break;
1211 }
1212 break;
1213 case CHIP_NAVI12:
1214 sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
1215 break;
1216 default:
1217 sclk_freq = gfx_table->dpm_levels[gfx_table->count - 1].value;
1218 break;
1219 }
1220 pstate_table->gfxclk_pstate.peak = sclk_freq;
1221
1222 pstate_table->uclk_pstate.min = mem_table->min;
1223 pstate_table->uclk_pstate.peak = mem_table->max;
1224
1225 pstate_table->socclk_pstate.min = soc_table->min;
1226 pstate_table->socclk_pstate.peak = soc_table->max;
1227
1228 if (gfx_table->max > NAVI10_UMD_PSTATE_PROFILING_GFXCLK &&
1229 mem_table->max > NAVI10_UMD_PSTATE_PROFILING_MEMCLK &&
1230 soc_table->max > NAVI10_UMD_PSTATE_PROFILING_SOCCLK) {
1231 pstate_table->gfxclk_pstate.standard =
1232 NAVI10_UMD_PSTATE_PROFILING_GFXCLK;
1233 pstate_table->uclk_pstate.standard =
1234 NAVI10_UMD_PSTATE_PROFILING_MEMCLK;
1235 pstate_table->socclk_pstate.standard =
1236 NAVI10_UMD_PSTATE_PROFILING_SOCCLK;
1237 } else {
1238 pstate_table->gfxclk_pstate.standard =
1239 pstate_table->gfxclk_pstate.min;
1240 pstate_table->uclk_pstate.standard =
1241 pstate_table->uclk_pstate.min;
1242 pstate_table->socclk_pstate.standard =
1243 pstate_table->socclk_pstate.min;
1244 }
1245
1246 return 0;
1247 }
1248
navi10_get_clock_by_type_with_latency(struct smu_context *smu, enum smu_clk_type clk_type, struct pp_clock_levels_with_latency *clocks)1249 static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
1250 enum smu_clk_type clk_type,
1251 struct pp_clock_levels_with_latency *clocks)
1252 {
1253 int ret = 0, i = 0;
1254 uint32_t level_count = 0, freq = 0;
1255
1256 switch (clk_type) {
1257 case SMU_GFXCLK:
1258 case SMU_DCEFCLK:
1259 case SMU_SOCCLK:
1260 case SMU_MCLK:
1261 case SMU_UCLK:
1262 ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &level_count);
1263 if (ret)
1264 return ret;
1265
1266 level_count = min(level_count, (uint32_t)MAX_NUM_CLOCKS);
1267 clocks->num_levels = level_count;
1268
1269 for (i = 0; i < level_count; i++) {
1270 ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &freq);
1271 if (ret)
1272 return ret;
1273
1274 clocks->data[i].clocks_in_khz = freq * 1000;
1275 clocks->data[i].latency_in_us = 0;
1276 }
1277 break;
1278 default:
1279 break;
1280 }
1281
1282 return ret;
1283 }
1284
navi10_pre_display_config_changed(struct smu_context *smu)1285 static int navi10_pre_display_config_changed(struct smu_context *smu)
1286 {
1287 int ret = 0;
1288 uint32_t max_freq = 0;
1289
1290 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
1291 if (ret)
1292 return ret;
1293
1294 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1295 ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
1296 if (ret)
1297 return ret;
1298 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, max_freq);
1299 if (ret)
1300 return ret;
1301 }
1302
1303 return ret;
1304 }
1305
navi10_display_config_changed(struct smu_context *smu)1306 static int navi10_display_config_changed(struct smu_context *smu)
1307 {
1308 int ret = 0;
1309
1310 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1311 smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1312 smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1313 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1314 smu->display_config->num_display,
1315 NULL);
1316 if (ret)
1317 return ret;
1318 }
1319
1320 return ret;
1321 }
1322
navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)1323 static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
1324 {
1325 if (!value)
1326 return -EINVAL;
1327
1328 return navi10_get_smu_metrics_data(smu,
1329 METRICS_AVERAGE_SOCKETPOWER,
1330 value);
1331 }
1332
navi10_get_current_activity_percent(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value)1333 static int navi10_get_current_activity_percent(struct smu_context *smu,
1334 enum amd_pp_sensors sensor,
1335 uint32_t *value)
1336 {
1337 int ret = 0;
1338
1339 if (!value)
1340 return -EINVAL;
1341
1342 switch (sensor) {
1343 case AMDGPU_PP_SENSOR_GPU_LOAD:
1344 ret = navi10_get_smu_metrics_data(smu,
1345 METRICS_AVERAGE_GFXACTIVITY,
1346 value);
1347 break;
1348 case AMDGPU_PP_SENSOR_MEM_LOAD:
1349 ret = navi10_get_smu_metrics_data(smu,
1350 METRICS_AVERAGE_MEMACTIVITY,
1351 value);
1352 break;
1353 default:
1354 dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
1355 return -EINVAL;
1356 }
1357
1358 return ret;
1359 }
1360
navi10_is_dpm_running(struct smu_context *smu)1361 static bool navi10_is_dpm_running(struct smu_context *smu)
1362 {
1363 int ret = 0;
1364 uint32_t feature_mask[2];
1365 uint64_t feature_enabled;
1366
1367 ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
1368 if (ret)
1369 return false;
1370
1371 feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
1372
1373 return !!(feature_enabled & SMC_DPM_FEATURE);
1374 }
1375
navi10_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)1376 static int navi10_get_fan_speed_rpm(struct smu_context *smu,
1377 uint32_t *speed)
1378 {
1379 if (!speed)
1380 return -EINVAL;
1381
1382 return navi10_get_smu_metrics_data(smu,
1383 METRICS_CURR_FANSPEED,
1384 speed);
1385 }
1386
navi10_get_fan_parameters(struct smu_context *smu)1387 static int navi10_get_fan_parameters(struct smu_context *smu)
1388 {
1389 PPTable_t *pptable = smu->smu_table.driver_pptable;
1390
1391 smu->fan_max_rpm = pptable->FanMaximumRpm;
1392
1393 return 0;
1394 }
1395
navi10_get_power_profile_mode(struct smu_context *smu, char *buf)1396 static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1397 {
1398 DpmActivityMonitorCoeffInt_t activity_monitor;
1399 uint32_t i, size = 0;
1400 int16_t workload_type = 0;
1401 static const char *profile_name[] = {
1402 "BOOTUP_DEFAULT",
1403 "3D_FULL_SCREEN",
1404 "POWER_SAVING",
1405 "VIDEO",
1406 "VR",
1407 "COMPUTE",
1408 "CUSTOM"};
1409 static const char *title[] = {
1410 "PROFILE_INDEX(NAME)",
1411 "CLOCK_TYPE(NAME)",
1412 "FPS",
1413 "MinFreqType",
1414 "MinActiveFreqType",
1415 "MinActiveFreq",
1416 "BoosterFreqType",
1417 "BoosterFreq",
1418 "PD_Data_limit_c",
1419 "PD_Data_error_coeff",
1420 "PD_Data_error_rate_coeff"};
1421 int result = 0;
1422
1423 if (!buf)
1424 return -EINVAL;
1425
1426 size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
1427 title[0], title[1], title[2], title[3], title[4], title[5],
1428 title[6], title[7], title[8], title[9], title[10]);
1429
1430 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
1431 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1432 workload_type = smu_cmn_to_asic_specific_index(smu,
1433 CMN2ASIC_MAPPING_WORKLOAD,
1434 i);
1435 if (workload_type < 0)
1436 return -EINVAL;
1437
1438 result = smu_cmn_update_table(smu,
1439 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
1440 (void *)(&activity_monitor), false);
1441 if (result) {
1442 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1443 return result;
1444 }
1445
1446 size += sprintf(buf + size, "%2d %14s%s:\n",
1447 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1448
1449 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1450 " ",
1451 0,
1452 "GFXCLK",
1453 activity_monitor.Gfx_FPS,
1454 activity_monitor.Gfx_MinFreqStep,
1455 activity_monitor.Gfx_MinActiveFreqType,
1456 activity_monitor.Gfx_MinActiveFreq,
1457 activity_monitor.Gfx_BoosterFreqType,
1458 activity_monitor.Gfx_BoosterFreq,
1459 activity_monitor.Gfx_PD_Data_limit_c,
1460 activity_monitor.Gfx_PD_Data_error_coeff,
1461 activity_monitor.Gfx_PD_Data_error_rate_coeff);
1462
1463 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1464 " ",
1465 1,
1466 "SOCCLK",
1467 activity_monitor.Soc_FPS,
1468 activity_monitor.Soc_MinFreqStep,
1469 activity_monitor.Soc_MinActiveFreqType,
1470 activity_monitor.Soc_MinActiveFreq,
1471 activity_monitor.Soc_BoosterFreqType,
1472 activity_monitor.Soc_BoosterFreq,
1473 activity_monitor.Soc_PD_Data_limit_c,
1474 activity_monitor.Soc_PD_Data_error_coeff,
1475 activity_monitor.Soc_PD_Data_error_rate_coeff);
1476
1477 size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
1478 " ",
1479 2,
1480 "MEMLK",
1481 activity_monitor.Mem_FPS,
1482 activity_monitor.Mem_MinFreqStep,
1483 activity_monitor.Mem_MinActiveFreqType,
1484 activity_monitor.Mem_MinActiveFreq,
1485 activity_monitor.Mem_BoosterFreqType,
1486 activity_monitor.Mem_BoosterFreq,
1487 activity_monitor.Mem_PD_Data_limit_c,
1488 activity_monitor.Mem_PD_Data_error_coeff,
1489 activity_monitor.Mem_PD_Data_error_rate_coeff);
1490 }
1491
1492 return size;
1493 }
1494
navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)1495 static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1496 {
1497 DpmActivityMonitorCoeffInt_t activity_monitor;
1498 int workload_type, ret = 0;
1499
1500 smu->power_profile_mode = input[size];
1501
1502 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1503 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
1504 return -EINVAL;
1505 }
1506
1507 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1508
1509 ret = smu_cmn_update_table(smu,
1510 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1511 (void *)(&activity_monitor), false);
1512 if (ret) {
1513 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1514 return ret;
1515 }
1516
1517 switch (input[0]) {
1518 case 0: /* Gfxclk */
1519 activity_monitor.Gfx_FPS = input[1];
1520 activity_monitor.Gfx_MinFreqStep = input[2];
1521 activity_monitor.Gfx_MinActiveFreqType = input[3];
1522 activity_monitor.Gfx_MinActiveFreq = input[4];
1523 activity_monitor.Gfx_BoosterFreqType = input[5];
1524 activity_monitor.Gfx_BoosterFreq = input[6];
1525 activity_monitor.Gfx_PD_Data_limit_c = input[7];
1526 activity_monitor.Gfx_PD_Data_error_coeff = input[8];
1527 activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
1528 break;
1529 case 1: /* Socclk */
1530 activity_monitor.Soc_FPS = input[1];
1531 activity_monitor.Soc_MinFreqStep = input[2];
1532 activity_monitor.Soc_MinActiveFreqType = input[3];
1533 activity_monitor.Soc_MinActiveFreq = input[4];
1534 activity_monitor.Soc_BoosterFreqType = input[5];
1535 activity_monitor.Soc_BoosterFreq = input[6];
1536 activity_monitor.Soc_PD_Data_limit_c = input[7];
1537 activity_monitor.Soc_PD_Data_error_coeff = input[8];
1538 activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
1539 break;
1540 case 2: /* Memlk */
1541 activity_monitor.Mem_FPS = input[1];
1542 activity_monitor.Mem_MinFreqStep = input[2];
1543 activity_monitor.Mem_MinActiveFreqType = input[3];
1544 activity_monitor.Mem_MinActiveFreq = input[4];
1545 activity_monitor.Mem_BoosterFreqType = input[5];
1546 activity_monitor.Mem_BoosterFreq = input[6];
1547 activity_monitor.Mem_PD_Data_limit_c = input[7];
1548 activity_monitor.Mem_PD_Data_error_coeff = input[8];
1549 activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
1550 break;
1551 }
1552
1553 ret = smu_cmn_update_table(smu,
1554 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
1555 (void *)(&activity_monitor), true);
1556 if (ret) {
1557 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1558 return ret;
1559 }
1560 }
1561
1562 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1563 workload_type = smu_cmn_to_asic_specific_index(smu,
1564 CMN2ASIC_MAPPING_WORKLOAD,
1565 smu->power_profile_mode);
1566 if (workload_type < 0)
1567 return -EINVAL;
1568 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1569 1 << workload_type, NULL);
1570
1571 return ret;
1572 }
1573
navi10_notify_smc_display_config(struct smu_context *smu)1574 static int navi10_notify_smc_display_config(struct smu_context *smu)
1575 {
1576 struct smu_clocks min_clocks = {0};
1577 struct pp_display_clock_request clock_req;
1578 int ret = 0;
1579
1580 min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1581 min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1582 min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1583
1584 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1585 clock_req.clock_type = amd_pp_dcef_clock;
1586 clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
1587
1588 ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
1589 if (!ret) {
1590 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
1591 ret = smu_cmn_send_smc_msg_with_param(smu,
1592 SMU_MSG_SetMinDeepSleepDcefclk,
1593 min_clocks.dcef_clock_in_sr/100,
1594 NULL);
1595 if (ret) {
1596 dev_err(smu->adev->dev, "Attempt to set divider for DCEFCLK Failed!");
1597 return ret;
1598 }
1599 }
1600 } else {
1601 dev_info(smu->adev->dev, "Attempt to set Hard Min for DCEFCLK Failed!");
1602 }
1603 }
1604
1605 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1606 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
1607 if (ret) {
1608 dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
1609 return ret;
1610 }
1611 }
1612
1613 return 0;
1614 }
1615
navi10_set_watermarks_table(struct smu_context *smu, struct pp_smu_wm_range_sets *clock_ranges)1616 static int navi10_set_watermarks_table(struct smu_context *smu,
1617 struct pp_smu_wm_range_sets *clock_ranges)
1618 {
1619 Watermarks_t *table = smu->smu_table.watermarks_table;
1620 int ret = 0;
1621 int i;
1622
1623 if (clock_ranges) {
1624 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1625 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1626 return -EINVAL;
1627
1628 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1629 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1630 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1631 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1632 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1633 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1634 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1635 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1636 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1637
1638 table->WatermarkRow[WM_DCEFCLK][i].WmSetting =
1639 clock_ranges->reader_wm_sets[i].wm_inst;
1640 }
1641
1642 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1643 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1644 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1645 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1646 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1647 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1648 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1649 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1650 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1651
1652 table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1653 clock_ranges->writer_wm_sets[i].wm_inst;
1654 }
1655
1656 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1657 }
1658
1659 /* pass data to smu controller */
1660 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1661 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1662 ret = smu_cmn_write_watermarks_table(smu);
1663 if (ret) {
1664 dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1665 return ret;
1666 }
1667 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1668 }
1669
1670 return 0;
1671 }
1672
navi10_thermal_get_temperature(struct smu_context *smu, enum amd_pp_sensors sensor, uint32_t *value)1673 static int navi10_thermal_get_temperature(struct smu_context *smu,
1674 enum amd_pp_sensors sensor,
1675 uint32_t *value)
1676 {
1677 int ret = 0;
1678
1679 if (!value)
1680 return -EINVAL;
1681
1682 switch (sensor) {
1683 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1684 ret = navi10_get_smu_metrics_data(smu,
1685 METRICS_TEMPERATURE_HOTSPOT,
1686 value);
1687 break;
1688 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1689 ret = navi10_get_smu_metrics_data(smu,
1690 METRICS_TEMPERATURE_EDGE,
1691 value);
1692 break;
1693 case AMDGPU_PP_SENSOR_MEM_TEMP:
1694 ret = navi10_get_smu_metrics_data(smu,
1695 METRICS_TEMPERATURE_MEM,
1696 value);
1697 break;
1698 default:
1699 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1700 return -EINVAL;
1701 }
1702
1703 return ret;
1704 }
1705
navi10_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size)1706 static int navi10_read_sensor(struct smu_context *smu,
1707 enum amd_pp_sensors sensor,
1708 void *data, uint32_t *size)
1709 {
1710 int ret = 0;
1711 struct smu_table_context *table_context = &smu->smu_table;
1712 PPTable_t *pptable = table_context->driver_pptable;
1713
1714 if(!data || !size)
1715 return -EINVAL;
1716
1717 mutex_lock(&smu->sensor_lock);
1718 switch (sensor) {
1719 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
1720 *(uint32_t *)data = pptable->FanMaximumRpm;
1721 *size = 4;
1722 break;
1723 case AMDGPU_PP_SENSOR_MEM_LOAD:
1724 case AMDGPU_PP_SENSOR_GPU_LOAD:
1725 ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
1726 *size = 4;
1727 break;
1728 case AMDGPU_PP_SENSOR_GPU_POWER:
1729 ret = navi10_get_gpu_power(smu, (uint32_t *)data);
1730 *size = 4;
1731 break;
1732 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1733 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1734 case AMDGPU_PP_SENSOR_MEM_TEMP:
1735 ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1736 *size = 4;
1737 break;
1738 case AMDGPU_PP_SENSOR_GFX_MCLK:
1739 ret = navi10_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1740 *(uint32_t *)data *= 100;
1741 *size = 4;
1742 break;
1743 case AMDGPU_PP_SENSOR_GFX_SCLK:
1744 ret = navi10_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1745 *(uint32_t *)data *= 100;
1746 *size = 4;
1747 break;
1748 case AMDGPU_PP_SENSOR_VDDGFX:
1749 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1750 *size = 4;
1751 break;
1752 default:
1753 ret = -EOPNOTSUPP;
1754 break;
1755 }
1756 mutex_unlock(&smu->sensor_lock);
1757
1758 return ret;
1759 }
1760
navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)1761 static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
1762 {
1763 uint32_t num_discrete_levels = 0;
1764 uint16_t *dpm_levels = NULL;
1765 uint16_t i = 0;
1766 struct smu_table_context *table_context = &smu->smu_table;
1767 PPTable_t *driver_ppt = NULL;
1768
1769 if (!clocks_in_khz || !num_states || !table_context->driver_pptable)
1770 return -EINVAL;
1771
1772 driver_ppt = table_context->driver_pptable;
1773 num_discrete_levels = driver_ppt->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels;
1774 dpm_levels = driver_ppt->FreqTableUclk;
1775
1776 if (num_discrete_levels == 0 || dpm_levels == NULL)
1777 return -EINVAL;
1778
1779 *num_states = num_discrete_levels;
1780 for (i = 0; i < num_discrete_levels; i++) {
1781 /* convert to khz */
1782 *clocks_in_khz = (*dpm_levels) * 1000;
1783 clocks_in_khz++;
1784 dpm_levels++;
1785 }
1786
1787 return 0;
1788 }
1789
navi10_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range)1790 static int navi10_get_thermal_temperature_range(struct smu_context *smu,
1791 struct smu_temperature_range *range)
1792 {
1793 struct smu_table_context *table_context = &smu->smu_table;
1794 struct smu_11_0_powerplay_table *powerplay_table =
1795 table_context->power_play_table;
1796 PPTable_t *pptable = smu->smu_table.driver_pptable;
1797
1798 if (!range)
1799 return -EINVAL;
1800
1801 memcpy(range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
1802
1803 range->max = pptable->TedgeLimit *
1804 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1805 range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
1806 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1807 range->hotspot_crit_max = pptable->ThotspotLimit *
1808 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1809 range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
1810 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1811 range->mem_crit_max = pptable->TmemLimit *
1812 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1813 range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_MEM)*
1814 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1815 range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1816
1817 return 0;
1818 }
1819
navi10_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)1820 static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
1821 bool disable_memory_clock_switch)
1822 {
1823 int ret = 0;
1824 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
1825 (struct smu_11_0_max_sustainable_clocks *)
1826 smu->smu_table.max_sustainable_clocks;
1827 uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
1828 uint32_t max_memory_clock = max_sustainable_clocks->uclock;
1829
1830 if(smu->disable_uclk_switch == disable_memory_clock_switch)
1831 return 0;
1832
1833 if(disable_memory_clock_switch)
1834 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, max_memory_clock, 0);
1835 else
1836 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_memory_clock, 0);
1837
1838 if(!ret)
1839 smu->disable_uclk_switch = disable_memory_clock_switch;
1840
1841 return ret;
1842 }
1843
navi10_get_power_limit(struct smu_context *smu)1844 static int navi10_get_power_limit(struct smu_context *smu)
1845 {
1846 struct smu_11_0_powerplay_table *powerplay_table =
1847 (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
1848 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
1849 PPTable_t *pptable = smu->smu_table.driver_pptable;
1850 uint32_t power_limit, od_percent;
1851
1852 if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
1853 /* the last hope to figure out the ppt limit */
1854 if (!pptable) {
1855 dev_err(smu->adev->dev, "Cannot get PPT limit due to pptable missing!");
1856 return -EINVAL;
1857 }
1858 power_limit =
1859 pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
1860 }
1861 smu->current_power_limit = power_limit;
1862
1863 if (smu->od_enabled &&
1864 navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
1865 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
1866
1867 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
1868
1869 power_limit *= (100 + od_percent);
1870 power_limit /= 100;
1871 }
1872 smu->max_power_limit = power_limit;
1873
1874 return 0;
1875 }
1876
navi10_update_pcie_parameters(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap)1877 static int navi10_update_pcie_parameters(struct smu_context *smu,
1878 uint32_t pcie_gen_cap,
1879 uint32_t pcie_width_cap)
1880 {
1881 struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1882 PPTable_t *pptable = smu->smu_table.driver_pptable;
1883 uint32_t smu_pcie_arg;
1884 int ret, i;
1885
1886 /* lclk dpm table setup */
1887 for (i = 0; i < MAX_PCIE_CONF; i++) {
1888 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pptable->PcieGenSpeed[i];
1889 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pptable->PcieLaneCount[i];
1890 }
1891
1892 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1893 smu_pcie_arg = (i << 16) |
1894 ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
1895 (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
1896 pptable->PcieLaneCount[i] : pcie_width_cap);
1897 ret = smu_cmn_send_smc_msg_with_param(smu,
1898 SMU_MSG_OverridePcieParameters,
1899 smu_pcie_arg,
1900 NULL);
1901
1902 if (ret)
1903 return ret;
1904
1905 if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
1906 dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
1907 if (pptable->PcieLaneCount[i] > pcie_width_cap)
1908 dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
1909 }
1910
1911 return 0;
1912 }
1913
navi10_dump_od_table(struct smu_context *smu, OverDriveTable_t *od_table)1914 static inline void navi10_dump_od_table(struct smu_context *smu,
1915 OverDriveTable_t *od_table)
1916 {
1917 dev_dbg(smu->adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
1918 dev_dbg(smu->adev->dev, "OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
1919 dev_dbg(smu->adev->dev, "OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
1920 dev_dbg(smu->adev->dev, "OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
1921 dev_dbg(smu->adev->dev, "OD: UclkFmax: %d\n", od_table->UclkFmax);
1922 dev_dbg(smu->adev->dev, "OD: OverDrivePct: %d\n", od_table->OverDrivePct);
1923 }
1924
navi10_od_setting_check_range(struct smu_context *smu, struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)1925 static int navi10_od_setting_check_range(struct smu_context *smu,
1926 struct smu_11_0_overdrive_table *od_table,
1927 enum SMU_11_0_ODSETTING_ID setting,
1928 uint32_t value)
1929 {
1930 if (value < od_table->min[setting]) {
1931 dev_warn(smu->adev->dev, "OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
1932 return -EINVAL;
1933 }
1934 if (value > od_table->max[setting]) {
1935 dev_warn(smu->adev->dev, "OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
1936 return -EINVAL;
1937 }
1938 return 0;
1939 }
1940
navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, uint16_t *voltage, uint32_t freq)1941 static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
1942 uint16_t *voltage,
1943 uint32_t freq)
1944 {
1945 uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16);
1946 uint32_t value = 0;
1947 int ret;
1948
1949 ret = smu_cmn_send_smc_msg_with_param(smu,
1950 SMU_MSG_GetVoltageByDpm,
1951 param,
1952 &value);
1953 if (ret) {
1954 dev_err(smu->adev->dev, "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
1955 return ret;
1956 }
1957
1958 *voltage = (uint16_t)value;
1959
1960 return 0;
1961 }
1962
navi10_is_baco_supported(struct smu_context *smu)1963 static bool navi10_is_baco_supported(struct smu_context *smu)
1964 {
1965 struct amdgpu_device *adev = smu->adev;
1966
1967 if (amdgpu_sriov_vf(adev) || (!smu_v11_0_baco_is_support(smu)))
1968 return false;
1969
1970 return true;
1971 }
1972
navi10_set_default_od_settings(struct smu_context *smu)1973 static int navi10_set_default_od_settings(struct smu_context *smu)
1974 {
1975 OverDriveTable_t *od_table =
1976 (OverDriveTable_t *)smu->smu_table.overdrive_table;
1977 OverDriveTable_t *boot_od_table =
1978 (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
1979 int ret = 0;
1980
1981 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
1982 if (ret) {
1983 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
1984 return ret;
1985 }
1986
1987 if (!od_table->GfxclkVolt1) {
1988 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
1989 &od_table->GfxclkVolt1,
1990 od_table->GfxclkFreq1);
1991 if (ret)
1992 return ret;
1993 }
1994
1995 if (!od_table->GfxclkVolt2) {
1996 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
1997 &od_table->GfxclkVolt2,
1998 od_table->GfxclkFreq2);
1999 if (ret)
2000 return ret;
2001 }
2002
2003 if (!od_table->GfxclkVolt3) {
2004 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2005 &od_table->GfxclkVolt3,
2006 od_table->GfxclkFreq3);
2007 if (ret)
2008 return ret;
2009 }
2010
2011 memcpy(boot_od_table, od_table, sizeof(OverDriveTable_t));
2012
2013 navi10_dump_od_table(smu, od_table);
2014
2015 return 0;
2016 }
2017
navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size)2018 static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2019 int i;
2020 int ret = 0;
2021 struct smu_table_context *table_context = &smu->smu_table;
2022 OverDriveTable_t *od_table;
2023 struct smu_11_0_overdrive_table *od_settings;
2024 enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
2025 uint16_t *freq_ptr, *voltage_ptr;
2026 od_table = (OverDriveTable_t *)table_context->overdrive_table;
2027
2028 if (!smu->od_enabled) {
2029 dev_warn(smu->adev->dev, "OverDrive is not enabled!\n");
2030 return -EINVAL;
2031 }
2032
2033 if (!smu->od_settings) {
2034 dev_err(smu->adev->dev, "OD board limits are not set!\n");
2035 return -ENOENT;
2036 }
2037
2038 od_settings = smu->od_settings;
2039
2040 switch (type) {
2041 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2042 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
2043 dev_warn(smu->adev->dev, "GFXCLK_LIMITS not supported!\n");
2044 return -ENOTSUPP;
2045 }
2046 if (!table_context->overdrive_table) {
2047 dev_err(smu->adev->dev, "Overdrive is not initialized\n");
2048 return -EINVAL;
2049 }
2050 for (i = 0; i < size; i += 2) {
2051 if (i + 2 > size) {
2052 dev_info(smu->adev->dev, "invalid number of input parameters %d\n", size);
2053 return -EINVAL;
2054 }
2055 switch (input[i]) {
2056 case 0:
2057 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
2058 freq_ptr = &od_table->GfxclkFmin;
2059 if (input[i + 1] > od_table->GfxclkFmax) {
2060 dev_info(smu->adev->dev, "GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
2061 input[i + 1],
2062 od_table->GfxclkFmin);
2063 return -EINVAL;
2064 }
2065 break;
2066 case 1:
2067 freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
2068 freq_ptr = &od_table->GfxclkFmax;
2069 if (input[i + 1] < od_table->GfxclkFmin) {
2070 dev_info(smu->adev->dev, "GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
2071 input[i + 1],
2072 od_table->GfxclkFmax);
2073 return -EINVAL;
2074 }
2075 break;
2076 default:
2077 dev_info(smu->adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
2078 dev_info(smu->adev->dev, "Supported indices: [0:min,1:max]\n");
2079 return -EINVAL;
2080 }
2081 ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[i + 1]);
2082 if (ret)
2083 return ret;
2084 *freq_ptr = input[i + 1];
2085 }
2086 break;
2087 case PP_OD_EDIT_MCLK_VDDC_TABLE:
2088 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
2089 dev_warn(smu->adev->dev, "UCLK_MAX not supported!\n");
2090 return -ENOTSUPP;
2091 }
2092 if (size < 2) {
2093 dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2094 return -EINVAL;
2095 }
2096 if (input[0] != 1) {
2097 dev_info(smu->adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
2098 dev_info(smu->adev->dev, "Supported indices: [1:max]\n");
2099 return -EINVAL;
2100 }
2101 ret = navi10_od_setting_check_range(smu, od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
2102 if (ret)
2103 return ret;
2104 od_table->UclkFmax = input[1];
2105 break;
2106 case PP_OD_RESTORE_DEFAULT_TABLE:
2107 if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) {
2108 dev_err(smu->adev->dev, "Overdrive table was not initialized!\n");
2109 return -EINVAL;
2110 }
2111 memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t));
2112 break;
2113 case PP_OD_COMMIT_DPM_TABLE:
2114 navi10_dump_od_table(smu, od_table);
2115 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2116 if (ret) {
2117 dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2118 return ret;
2119 }
2120 break;
2121 case PP_OD_EDIT_VDDC_CURVE:
2122 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
2123 dev_warn(smu->adev->dev, "GFXCLK_CURVE not supported!\n");
2124 return -ENOTSUPP;
2125 }
2126 if (size < 3) {
2127 dev_info(smu->adev->dev, "invalid number of parameters: %d\n", size);
2128 return -EINVAL;
2129 }
2130 if (!od_table) {
2131 dev_info(smu->adev->dev, "Overdrive is not initialized\n");
2132 return -EINVAL;
2133 }
2134
2135 switch (input[0]) {
2136 case 0:
2137 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
2138 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
2139 freq_ptr = &od_table->GfxclkFreq1;
2140 voltage_ptr = &od_table->GfxclkVolt1;
2141 break;
2142 case 1:
2143 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
2144 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
2145 freq_ptr = &od_table->GfxclkFreq2;
2146 voltage_ptr = &od_table->GfxclkVolt2;
2147 break;
2148 case 2:
2149 freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
2150 voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
2151 freq_ptr = &od_table->GfxclkFreq3;
2152 voltage_ptr = &od_table->GfxclkVolt3;
2153 break;
2154 default:
2155 dev_info(smu->adev->dev, "Invalid VDDC_CURVE index: %ld\n", input[0]);
2156 dev_info(smu->adev->dev, "Supported indices: [0, 1, 2]\n");
2157 return -EINVAL;
2158 }
2159 ret = navi10_od_setting_check_range(smu, od_settings, freq_setting, input[1]);
2160 if (ret)
2161 return ret;
2162 // Allow setting zero to disable the OverDrive VDDC curve
2163 if (input[2] != 0) {
2164 ret = navi10_od_setting_check_range(smu, od_settings, voltage_setting, input[2]);
2165 if (ret)
2166 return ret;
2167 *freq_ptr = input[1];
2168 *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
2169 dev_dbg(smu->adev->dev, "OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
2170 } else {
2171 // If setting 0, disable all voltage curve settings
2172 od_table->GfxclkVolt1 = 0;
2173 od_table->GfxclkVolt2 = 0;
2174 od_table->GfxclkVolt3 = 0;
2175 }
2176 navi10_dump_od_table(smu, od_table);
2177 break;
2178 default:
2179 return -ENOSYS;
2180 }
2181 return ret;
2182 }
2183
navi10_run_btc(struct smu_context *smu)2184 static int navi10_run_btc(struct smu_context *smu)
2185 {
2186 int ret = 0;
2187
2188 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
2189 if (ret)
2190 dev_err(smu->adev->dev, "RunBtc failed!\n");
2191
2192 return ret;
2193 }
2194
navi10_need_umc_cdr_workaround(struct smu_context *smu)2195 static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
2196 {
2197 struct amdgpu_device *adev = smu->adev;
2198
2199 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
2200 return false;
2201
2202 if (adev->asic_type == CHIP_NAVI10 ||
2203 adev->asic_type == CHIP_NAVI14)
2204 return true;
2205
2206 return false;
2207 }
2208
navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)2209 static int navi10_umc_hybrid_cdr_workaround(struct smu_context *smu)
2210 {
2211 uint32_t uclk_count, uclk_min, uclk_max;
2212 int ret = 0;
2213
2214 /* This workaround can be applied only with uclk dpm enabled */
2215 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
2216 return 0;
2217
2218 ret = smu_v11_0_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
2219 if (ret)
2220 return ret;
2221
2222 ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
2223 if (ret)
2224 return ret;
2225
2226 /*
2227 * The NAVI10_UMC_HYBRID_CDR_WORKAROUND_UCLK_THRESHOLD is 750Mhz.
2228 * This workaround is needed only when the max uclk frequency
2229 * not greater than that.
2230 */
2231 if (uclk_max > 0x2EE)
2232 return 0;
2233
2234 ret = smu_v11_0_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
2235 if (ret)
2236 return ret;
2237
2238 /* Force UCLK out of the highest DPM */
2239 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_min);
2240 if (ret)
2241 return ret;
2242
2243 /* Revert the UCLK Hardmax */
2244 ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, 0, uclk_max);
2245 if (ret)
2246 return ret;
2247
2248 /*
2249 * In this case, SMU already disabled dummy pstate during enablement
2250 * of UCLK DPM, we have to re-enabled it.
2251 */
2252 return smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
2253 }
2254
navi10_set_dummy_pstates_table_location(struct smu_context *smu)2255 static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
2256 {
2257 struct smu_table_context *smu_table = &smu->smu_table;
2258 struct smu_table *dummy_read_table =
2259 &smu_table->dummy_read_1_table;
2260 char *dummy_table = dummy_read_table->cpu_addr;
2261 int ret = 0;
2262 uint32_t i;
2263
2264 for (i = 0; i < 0x40000; i += 0x1000 * 2) {
2265 memcpy(dummy_table, &NoDbiPrbs7[0], 0x1000);
2266 dummy_table += 0x1000;
2267 memcpy(dummy_table, &DbiPrbs7[0], 0x1000);
2268 dummy_table += 0x1000;
2269 }
2270
2271 amdgpu_asic_flush_hdp(smu->adev, NULL);
2272
2273 ret = smu_cmn_send_smc_msg_with_param(smu,
2274 SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
2275 upper_32_bits(dummy_read_table->mc_address),
2276 NULL);
2277 if (ret)
2278 return ret;
2279
2280 return smu_cmn_send_smc_msg_with_param(smu,
2281 SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW,
2282 lower_32_bits(dummy_read_table->mc_address),
2283 NULL);
2284 }
2285
navi10_run_umc_cdr_workaround(struct smu_context *smu)2286 static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
2287 {
2288 struct amdgpu_device *adev = smu->adev;
2289 uint8_t umc_fw_greater_than_v136 = false;
2290 uint8_t umc_fw_disable_cdr = false;
2291 uint32_t pmfw_version;
2292 uint32_t param;
2293 int ret = 0;
2294
2295 if (!navi10_need_umc_cdr_workaround(smu))
2296 return 0;
2297
2298 ret = smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
2299 if (ret) {
2300 dev_err(adev->dev, "Failed to get smu version!\n");
2301 return ret;
2302 }
2303
2304 /*
2305 * The messages below are only supported by Navi10 42.53.0 and later
2306 * PMFWs and Navi14 53.29.0 and later PMFWs.
2307 * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
2308 * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
2309 * - PPSMC_MSG_GetUMCFWWA
2310 */
2311 if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
2312 ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
2313 ret = smu_cmn_send_smc_msg_with_param(smu,
2314 SMU_MSG_GET_UMC_FW_WA,
2315 0,
2316 ¶m);
2317 if (ret)
2318 return ret;
2319
2320 /* First bit indicates if the UMC f/w is above v137 */
2321 umc_fw_greater_than_v136 = param & 0x1;
2322
2323 /* Second bit indicates if hybrid-cdr is disabled */
2324 umc_fw_disable_cdr = param & 0x2;
2325
2326 /* w/a only allowed if UMC f/w is <= 136 */
2327 if (umc_fw_greater_than_v136)
2328 return 0;
2329
2330 if (umc_fw_disable_cdr) {
2331 if (adev->asic_type == CHIP_NAVI10)
2332 return navi10_umc_hybrid_cdr_workaround(smu);
2333 } else {
2334 return navi10_set_dummy_pstates_table_location(smu);
2335 }
2336 } else {
2337 if (adev->asic_type == CHIP_NAVI10)
2338 return navi10_umc_hybrid_cdr_workaround(smu);
2339 }
2340
2341 return 0;
2342 }
2343
navi10_fill_i2c_req(SwI2cRequest_t *req, bool write, uint8_t address, uint32_t numbytes, uint8_t *data)2344 static void navi10_fill_i2c_req(SwI2cRequest_t *req, bool write,
2345 uint8_t address, uint32_t numbytes,
2346 uint8_t *data)
2347 {
2348 int i;
2349
2350 req->I2CcontrollerPort = 0;
2351 req->I2CSpeed = 2;
2352 req->SlaveAddress = address;
2353 req->NumCmds = numbytes;
2354
2355 for (i = 0; i < numbytes; i++) {
2356 SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
2357
2358 /* First 2 bytes are always write for lower 2b EEPROM address */
2359 if (i < 2)
2360 cmd->Cmd = 1;
2361 else
2362 cmd->Cmd = write;
2363
2364
2365 /* Add RESTART for read after address filled */
2366 cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
2367
2368 /* Add STOP in the end */
2369 cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
2370
2371 /* Fill with data regardless if read or write to simplify code */
2372 cmd->RegisterAddr = data[i];
2373 }
2374 }
2375
navi10_i2c_read_data(struct i2c_adapter *control, uint8_t address, uint8_t *data, uint32_t numbytes)2376 static int navi10_i2c_read_data(struct i2c_adapter *control,
2377 uint8_t address,
2378 uint8_t *data,
2379 uint32_t numbytes)
2380 {
2381 uint32_t i, ret = 0;
2382 SwI2cRequest_t req;
2383 struct amdgpu_device *adev = to_amdgpu_device(control);
2384 struct smu_table_context *smu_table = &adev->smu.smu_table;
2385 struct smu_table *table = &smu_table->driver_table;
2386
2387 if (numbytes > MAX_SW_I2C_COMMANDS) {
2388 dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
2389 numbytes, MAX_SW_I2C_COMMANDS);
2390 return -EINVAL;
2391 }
2392
2393 memset(&req, 0, sizeof(req));
2394 navi10_fill_i2c_req(&req, false, address, numbytes, data);
2395
2396 mutex_lock(&adev->smu.mutex);
2397 /* Now read data starting with that address */
2398 ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
2399 true);
2400 mutex_unlock(&adev->smu.mutex);
2401
2402 if (!ret) {
2403 SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
2404
2405 /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
2406 for (i = 0; i < numbytes; i++)
2407 data[i] = res->SwI2cCmds[i].Data;
2408
2409 dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
2410 (uint16_t)address, numbytes);
2411
2412 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
2413 8, 1, data, numbytes, false);
2414 } else
2415 dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
2416
2417 return ret;
2418 }
2419
navi10_i2c_write_data(struct i2c_adapter *control, uint8_t address, uint8_t *data, uint32_t numbytes)2420 static int navi10_i2c_write_data(struct i2c_adapter *control,
2421 uint8_t address,
2422 uint8_t *data,
2423 uint32_t numbytes)
2424 {
2425 uint32_t ret;
2426 SwI2cRequest_t req;
2427 struct amdgpu_device *adev = to_amdgpu_device(control);
2428
2429 if (numbytes > MAX_SW_I2C_COMMANDS) {
2430 dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
2431 numbytes, MAX_SW_I2C_COMMANDS);
2432 return -EINVAL;
2433 }
2434
2435 memset(&req, 0, sizeof(req));
2436 navi10_fill_i2c_req(&req, true, address, numbytes, data);
2437
2438 mutex_lock(&adev->smu.mutex);
2439 ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
2440 mutex_unlock(&adev->smu.mutex);
2441
2442 if (!ret) {
2443 dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
2444 (uint16_t)address, numbytes);
2445
2446 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
2447 8, 1, data, numbytes, false);
2448 /*
2449 * According to EEPROM spec there is a MAX of 10 ms required for
2450 * EEPROM to flush internal RX buffer after STOP was issued at the
2451 * end of write transaction. During this time the EEPROM will not be
2452 * responsive to any more commands - so wait a bit more.
2453 */
2454 msleep(10);
2455
2456 } else
2457 dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
2458
2459 return ret;
2460 }
2461
navi10_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)2462 static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
2463 struct i2c_msg *msgs, int num)
2464 {
2465 uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
2466 uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
2467
2468 for (i = 0; i < num; i++) {
2469 /*
2470 * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
2471 * once and hence the data needs to be spliced into chunks and sent each
2472 * chunk separately
2473 */
2474 data_size = msgs[i].len - 2;
2475 data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
2476 next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
2477 data_ptr = msgs[i].buf + 2;
2478
2479 for (j = 0; j < data_size / data_chunk_size; j++) {
2480 /* Insert the EEPROM dest addess, bits 0-15 */
2481 data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
2482 data_chunk[1] = (next_eeprom_addr & 0xff);
2483
2484 if (msgs[i].flags & I2C_M_RD) {
2485 ret = navi10_i2c_read_data(i2c_adap,
2486 (uint8_t)msgs[i].addr,
2487 data_chunk, MAX_SW_I2C_COMMANDS);
2488
2489 memcpy(data_ptr, data_chunk + 2, data_chunk_size);
2490 } else {
2491
2492 memcpy(data_chunk + 2, data_ptr, data_chunk_size);
2493
2494 ret = navi10_i2c_write_data(i2c_adap,
2495 (uint8_t)msgs[i].addr,
2496 data_chunk, MAX_SW_I2C_COMMANDS);
2497 }
2498
2499 if (ret) {
2500 num = -EIO;
2501 goto fail;
2502 }
2503
2504 next_eeprom_addr += data_chunk_size;
2505 data_ptr += data_chunk_size;
2506 }
2507
2508 if (data_size % data_chunk_size) {
2509 data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
2510 data_chunk[1] = (next_eeprom_addr & 0xff);
2511
2512 if (msgs[i].flags & I2C_M_RD) {
2513 ret = navi10_i2c_read_data(i2c_adap,
2514 (uint8_t)msgs[i].addr,
2515 data_chunk, (data_size % data_chunk_size) + 2);
2516
2517 memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
2518 } else {
2519 memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
2520
2521 ret = navi10_i2c_write_data(i2c_adap,
2522 (uint8_t)msgs[i].addr,
2523 data_chunk, (data_size % data_chunk_size) + 2);
2524 }
2525
2526 if (ret) {
2527 num = -EIO;
2528 goto fail;
2529 }
2530 }
2531 }
2532
2533 fail:
2534 return num;
2535 }
2536
navi10_i2c_func(struct i2c_adapter *adap)2537 static u32 navi10_i2c_func(struct i2c_adapter *adap)
2538 {
2539 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2540 }
2541
2542
2543 static const struct i2c_algorithm navi10_i2c_algo = {
2544 .master_xfer = navi10_i2c_xfer,
2545 .functionality = navi10_i2c_func,
2546 };
2547
navi10_get_gpu_metrics(struct smu_context *smu, void **table)2548 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
2549 void **table)
2550 {
2551 struct smu_table_context *smu_table = &smu->smu_table;
2552 struct gpu_metrics_v1_0 *gpu_metrics =
2553 (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
2554 struct amdgpu_device *adev = smu->adev;
2555 SmuMetrics_NV12_t nv12_metrics = { 0 };
2556 SmuMetrics_t metrics;
2557 int ret = 0;
2558
2559 mutex_lock(&smu->metrics_lock);
2560
2561 ret = smu_cmn_get_metrics_table_locked(smu,
2562 NULL,
2563 true);
2564 if (ret) {
2565 mutex_unlock(&smu->metrics_lock);
2566 return ret;
2567 }
2568
2569 memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
2570 if (adev->asic_type == CHIP_NAVI12)
2571 memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
2572
2573 mutex_unlock(&smu->metrics_lock);
2574
2575 smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
2576
2577 gpu_metrics->temperature_edge = metrics.TemperatureEdge;
2578 gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
2579 gpu_metrics->temperature_mem = metrics.TemperatureMem;
2580 gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
2581 gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
2582 gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
2583
2584 gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
2585 gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
2586
2587 gpu_metrics->average_socket_power = metrics.AverageSocketPower;
2588
2589 gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
2590 gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
2591 gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
2592
2593 if (adev->asic_type == CHIP_NAVI12) {
2594 gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator;
2595 gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency;
2596 gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency;
2597 gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage;
2598 }
2599
2600 gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
2601 gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
2602 gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
2603 gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
2604 gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
2605
2606 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
2607
2608 gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
2609
2610 gpu_metrics->pcie_link_width =
2611 smu_v11_0_get_current_pcie_link_width(smu);
2612 gpu_metrics->pcie_link_speed =
2613 smu_v11_0_get_current_pcie_link_speed(smu);
2614
2615 *table = (void *)gpu_metrics;
2616
2617 return sizeof(struct gpu_metrics_v1_0);
2618 }
2619
navi10_enable_mgpu_fan_boost(struct smu_context *smu)2620 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
2621 {
2622 struct smu_table_context *table_context = &smu->smu_table;
2623 PPTable_t *smc_pptable = table_context->driver_pptable;
2624 struct amdgpu_device *adev = smu->adev;
2625 uint32_t param = 0;
2626
2627 /* Navi12 does not support this */
2628 if (adev->asic_type == CHIP_NAVI12)
2629 return 0;
2630
2631 /*
2632 * Skip the MGpuFanBoost setting for those ASICs
2633 * which do not support it
2634 */
2635 if (!smc_pptable->MGpuFanBoostLimitRpm)
2636 return 0;
2637
2638 /* Workaround for WS SKU */
2639 if (adev->pdev->device == 0x7312 &&
2640 adev->pdev->revision == 0)
2641 param = 0xD188;
2642
2643 return smu_cmn_send_smc_msg_with_param(smu,
2644 SMU_MSG_SetMGpuFanBoostLimitRpm,
2645 param,
2646 NULL);
2647 }
2648
navi10_post_smu_init(struct smu_context *smu)2649 static int navi10_post_smu_init(struct smu_context *smu)
2650 {
2651 struct amdgpu_device *adev = smu->adev;
2652 int ret = 0;
2653
2654 if (amdgpu_sriov_vf(adev))
2655 return 0;
2656
2657 ret = navi10_run_umc_cdr_workaround(smu);
2658 if (ret) {
2659 dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
2660 return ret;
2661 }
2662
2663 if (!smu->dc_controlled_by_gpio) {
2664 /*
2665 * For Navi1X, manually switch it to AC mode as PMFW
2666 * may boot it with DC mode.
2667 */
2668 ret = smu_v11_0_set_power_source(smu,
2669 adev->pm.ac_power ?
2670 SMU_POWER_SOURCE_AC :
2671 SMU_POWER_SOURCE_DC);
2672 if (ret) {
2673 dev_err(adev->dev, "Failed to switch to %s mode!\n",
2674 adev->pm.ac_power ? "AC" : "DC");
2675 return ret;
2676 }
2677 }
2678
2679 return ret;
2680 }
2681
2682 static const struct pptable_funcs navi10_ppt_funcs = {
2683 .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
2684 .set_default_dpm_table = navi10_set_default_dpm_table,
2685 .dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
2686 .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
2687 .print_clk_levels = navi10_print_clk_levels,
2688 .force_clk_levels = navi10_force_clk_levels,
2689 .populate_umd_state_clk = navi10_populate_umd_state_clk,
2690 .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
2691 .pre_display_config_changed = navi10_pre_display_config_changed,
2692 .display_config_changed = navi10_display_config_changed,
2693 .notify_smc_display_config = navi10_notify_smc_display_config,
2694 .is_dpm_running = navi10_is_dpm_running,
2695 .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
2696 .get_power_profile_mode = navi10_get_power_profile_mode,
2697 .set_power_profile_mode = navi10_set_power_profile_mode,
2698 .set_watermarks_table = navi10_set_watermarks_table,
2699 .read_sensor = navi10_read_sensor,
2700 .get_uclk_dpm_states = navi10_get_uclk_dpm_states,
2701 .set_performance_level = smu_v11_0_set_performance_level,
2702 .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
2703 .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch,
2704 .get_power_limit = navi10_get_power_limit,
2705 .update_pcie_parameters = navi10_update_pcie_parameters,
2706 .init_microcode = smu_v11_0_init_microcode,
2707 .load_microcode = smu_v11_0_load_microcode,
2708 .fini_microcode = smu_v11_0_fini_microcode,
2709 .init_smc_tables = navi10_init_smc_tables,
2710 .fini_smc_tables = smu_v11_0_fini_smc_tables,
2711 .init_power = smu_v11_0_init_power,
2712 .fini_power = smu_v11_0_fini_power,
2713 .check_fw_status = smu_v11_0_check_fw_status,
2714 .setup_pptable = navi10_setup_pptable,
2715 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2716 .check_fw_version = smu_v11_0_check_fw_version,
2717 .write_pptable = smu_cmn_write_pptable,
2718 .set_driver_table_location = smu_v11_0_set_driver_table_location,
2719 .set_tool_table_location = smu_v11_0_set_tool_table_location,
2720 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2721 .system_features_control = smu_v11_0_system_features_control,
2722 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2723 .send_smc_msg = smu_cmn_send_smc_msg,
2724 .init_display_count = smu_v11_0_init_display_count,
2725 .set_allowed_mask = smu_v11_0_set_allowed_mask,
2726 .get_enabled_mask = smu_cmn_get_enabled_mask,
2727 .feature_is_enabled = smu_cmn_feature_is_enabled,
2728 .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
2729 .notify_display_change = smu_v11_0_notify_display_change,
2730 .set_power_limit = smu_v11_0_set_power_limit,
2731 .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
2732 .enable_thermal_alert = smu_v11_0_enable_thermal_alert,
2733 .disable_thermal_alert = smu_v11_0_disable_thermal_alert,
2734 .set_min_dcef_deep_sleep = smu_v11_0_set_min_deep_sleep_dcefclk,
2735 .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
2736 .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
2737 .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
2738 .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
2739 .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
2740 .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
2741 .gfx_off_control = smu_v11_0_gfx_off_control,
2742 .register_irq_handler = smu_v11_0_register_irq_handler,
2743 .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
2744 .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
2745 .baco_is_support= navi10_is_baco_supported,
2746 .baco_get_state = smu_v11_0_baco_get_state,
2747 .baco_set_state = smu_v11_0_baco_set_state,
2748 .baco_enter = smu_v11_0_baco_enter,
2749 .baco_exit = smu_v11_0_baco_exit,
2750 .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
2751 .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
2752 .set_default_od_settings = navi10_set_default_od_settings,
2753 .od_edit_dpm_table = navi10_od_edit_dpm_table,
2754 .run_btc = navi10_run_btc,
2755 .set_power_source = smu_v11_0_set_power_source,
2756 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2757 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2758 .get_gpu_metrics = navi10_get_gpu_metrics,
2759 .enable_mgpu_fan_boost = navi10_enable_mgpu_fan_boost,
2760 .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
2761 .deep_sleep_control = smu_v11_0_deep_sleep_control,
2762 .get_fan_parameters = navi10_get_fan_parameters,
2763 .post_init = navi10_post_smu_init,
2764 .interrupt_work = smu_v11_0_interrupt_work,
2765 };
2766
navi10_set_ppt_funcs(struct smu_context *smu)2767 void navi10_set_ppt_funcs(struct smu_context *smu)
2768 {
2769 smu->ppt_funcs = &navi10_ppt_funcs;
2770 smu->message_map = navi10_message_map;
2771 smu->clock_map = navi10_clk_map;
2772 smu->feature_map = navi10_feature_mask_map;
2773 smu->table_map = navi10_table_map;
2774 smu->pwr_src_map = navi10_pwr_src_map;
2775 smu->workload_map = navi10_workload_map;
2776 }
2777