1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 *          Alex Deucher <alexdeucher@gmail.com>
24 */
25
26#include <drm/drm_debugfs.h>
27
28#include "amdgpu.h"
29#include "amdgpu_drv.h"
30#include "amdgpu_pm.h"
31#include "amdgpu_dpm.h"
32#include "amdgpu_smu.h"
33#include "atom.h"
34#include <linux/pci.h>
35#include <linux/hwmon.h>
36#include <linux/hwmon-sysfs.h>
37#include <linux/nospec.h>
38#include <linux/pm_runtime.h>
39#include "hwmgr.h"
40
41static const struct cg_flag_name clocks[] = {
42	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
43	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
44	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
45	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
46	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
47	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
48	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
49	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
50	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
51	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
52	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
53	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
54	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
55	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
56	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
57	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
58	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
59	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
60	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
61	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
62	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
63	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
64	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
65	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
66
67	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
68	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
69	{0, NULL},
70};
71
72static const struct hwmon_temp_label {
73	enum PP_HWMON_TEMP channel;
74	const char *label;
75} temp_label[] = {
76	{PP_TEMP_EDGE, "edge"},
77	{PP_TEMP_JUNCTION, "junction"},
78	{PP_TEMP_MEM, "mem"},
79};
80
81/**
82 * DOC: power_dpm_state
83 *
84 * The power_dpm_state file is a legacy interface and is only provided for
85 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
86 * certain power related parameters.  The file power_dpm_state is used for this.
87 * It accepts the following arguments:
88 *
89 * - battery
90 *
91 * - balanced
92 *
93 * - performance
94 *
95 * battery
96 *
97 * On older GPUs, the vbios provided a special power state for battery
98 * operation.  Selecting battery switched to this state.  This is no
99 * longer provided on newer GPUs so the option does nothing in that case.
100 *
101 * balanced
102 *
103 * On older GPUs, the vbios provided a special power state for balanced
104 * operation.  Selecting balanced switched to this state.  This is no
105 * longer provided on newer GPUs so the option does nothing in that case.
106 *
107 * performance
108 *
109 * On older GPUs, the vbios provided a special power state for performance
110 * operation.  Selecting performance switched to this state.  This is no
111 * longer provided on newer GPUs so the option does nothing in that case.
112 *
113 */
114
115static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
116					  struct device_attribute *attr,
117					  char *buf)
118{
119	struct drm_device *ddev = dev_get_drvdata(dev);
120	struct amdgpu_device *adev = drm_to_adev(ddev);
121	enum amd_pm_state_type pm;
122	int ret;
123
124	if (amdgpu_in_reset(adev))
125		return -EPERM;
126
127	ret = pm_runtime_get_sync(ddev->dev);
128	if (ret < 0) {
129		pm_runtime_put_autosuspend(ddev->dev);
130		return ret;
131	}
132
133	if (is_support_sw_smu(adev)) {
134		if (adev->smu.ppt_funcs->get_current_power_state)
135			pm = smu_get_current_power_state(&adev->smu);
136		else
137			pm = adev->pm.dpm.user_state;
138	} else if (adev->powerplay.pp_funcs->get_current_power_state) {
139		pm = amdgpu_dpm_get_current_power_state(adev);
140	} else {
141		pm = adev->pm.dpm.user_state;
142	}
143
144	pm_runtime_mark_last_busy(ddev->dev);
145	pm_runtime_put_autosuspend(ddev->dev);
146
147	return snprintf(buf, PAGE_SIZE, "%s\n",
148			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
149			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
150}
151
152static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
153					  struct device_attribute *attr,
154					  const char *buf,
155					  size_t count)
156{
157	struct drm_device *ddev = dev_get_drvdata(dev);
158	struct amdgpu_device *adev = drm_to_adev(ddev);
159	enum amd_pm_state_type  state;
160	int ret;
161
162	if (amdgpu_in_reset(adev))
163		return -EPERM;
164
165	if (strncmp("battery", buf, strlen("battery")) == 0)
166		state = POWER_STATE_TYPE_BATTERY;
167	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
168		state = POWER_STATE_TYPE_BALANCED;
169	else if (strncmp("performance", buf, strlen("performance")) == 0)
170		state = POWER_STATE_TYPE_PERFORMANCE;
171	else
172		return -EINVAL;
173
174	ret = pm_runtime_get_sync(ddev->dev);
175	if (ret < 0) {
176		pm_runtime_put_autosuspend(ddev->dev);
177		return ret;
178	}
179
180	if (is_support_sw_smu(adev)) {
181		mutex_lock(&adev->pm.mutex);
182		adev->pm.dpm.user_state = state;
183		mutex_unlock(&adev->pm.mutex);
184	} else if (adev->powerplay.pp_funcs->dispatch_tasks) {
185		amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
186	} else {
187		mutex_lock(&adev->pm.mutex);
188		adev->pm.dpm.user_state = state;
189		mutex_unlock(&adev->pm.mutex);
190
191		amdgpu_pm_compute_clocks(adev);
192	}
193	pm_runtime_mark_last_busy(ddev->dev);
194	pm_runtime_put_autosuspend(ddev->dev);
195
196	return count;
197}
198
199
200/**
201 * DOC: power_dpm_force_performance_level
202 *
203 * The amdgpu driver provides a sysfs API for adjusting certain power
204 * related parameters.  The file power_dpm_force_performance_level is
205 * used for this.  It accepts the following arguments:
206 *
207 * - auto
208 *
209 * - low
210 *
211 * - high
212 *
213 * - manual
214 *
215 * - profile_standard
216 *
217 * - profile_min_sclk
218 *
219 * - profile_min_mclk
220 *
221 * - profile_peak
222 *
223 * auto
224 *
225 * When auto is selected, the driver will attempt to dynamically select
226 * the optimal power profile for current conditions in the driver.
227 *
228 * low
229 *
230 * When low is selected, the clocks are forced to the lowest power state.
231 *
232 * high
233 *
234 * When high is selected, the clocks are forced to the highest power state.
235 *
236 * manual
237 *
238 * When manual is selected, the user can manually adjust which power states
239 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
240 * and pp_dpm_pcie files and adjust the power state transition heuristics
241 * via the pp_power_profile_mode sysfs file.
242 *
243 * profile_standard
244 * profile_min_sclk
245 * profile_min_mclk
246 * profile_peak
247 *
248 * When the profiling modes are selected, clock and power gating are
249 * disabled and the clocks are set for different profiling cases. This
250 * mode is recommended for profiling specific work loads where you do
251 * not want clock or power gating for clock fluctuation to interfere
252 * with your results. profile_standard sets the clocks to a fixed clock
253 * level which varies from asic to asic.  profile_min_sclk forces the sclk
254 * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
255 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
256 *
257 */
258
259static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
260							    struct device_attribute *attr,
261							    char *buf)
262{
263	struct drm_device *ddev = dev_get_drvdata(dev);
264	struct amdgpu_device *adev = drm_to_adev(ddev);
265	enum amd_dpm_forced_level level = 0xff;
266	int ret;
267
268	if (amdgpu_in_reset(adev))
269		return -EPERM;
270
271	ret = pm_runtime_get_sync(ddev->dev);
272	if (ret < 0) {
273		pm_runtime_put_autosuspend(ddev->dev);
274		return ret;
275	}
276
277	if (is_support_sw_smu(adev))
278		level = smu_get_performance_level(&adev->smu);
279	else if (adev->powerplay.pp_funcs->get_performance_level)
280		level = amdgpu_dpm_get_performance_level(adev);
281	else
282		level = adev->pm.dpm.forced_level;
283
284	pm_runtime_mark_last_busy(ddev->dev);
285	pm_runtime_put_autosuspend(ddev->dev);
286
287	return snprintf(buf, PAGE_SIZE, "%s\n",
288			(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
289			(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
290			(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
291			(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
292			(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
293			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
294			(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
295			(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
296			"unknown");
297}
298
299static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
300							    struct device_attribute *attr,
301							    const char *buf,
302							    size_t count)
303{
304	struct drm_device *ddev = dev_get_drvdata(dev);
305	struct amdgpu_device *adev = drm_to_adev(ddev);
306	enum amd_dpm_forced_level level;
307	enum amd_dpm_forced_level current_level = 0xff;
308	int ret = 0;
309
310	if (amdgpu_in_reset(adev))
311		return -EPERM;
312
313	if (strncmp("low", buf, strlen("low")) == 0) {
314		level = AMD_DPM_FORCED_LEVEL_LOW;
315	} else if (strncmp("high", buf, strlen("high")) == 0) {
316		level = AMD_DPM_FORCED_LEVEL_HIGH;
317	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
318		level = AMD_DPM_FORCED_LEVEL_AUTO;
319	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
320		level = AMD_DPM_FORCED_LEVEL_MANUAL;
321	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
331	}  else {
332		return -EINVAL;
333	}
334
335	ret = pm_runtime_get_sync(ddev->dev);
336	if (ret < 0) {
337		pm_runtime_put_autosuspend(ddev->dev);
338		return ret;
339	}
340
341	if (is_support_sw_smu(adev))
342		current_level = smu_get_performance_level(&adev->smu);
343	else if (adev->powerplay.pp_funcs->get_performance_level)
344		current_level = amdgpu_dpm_get_performance_level(adev);
345
346	if (current_level == level) {
347		pm_runtime_mark_last_busy(ddev->dev);
348		pm_runtime_put_autosuspend(ddev->dev);
349		return count;
350	}
351
352	if (adev->asic_type == CHIP_RAVEN) {
353		if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
354			if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
355				amdgpu_gfx_off_ctrl(adev, false);
356			else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
357				amdgpu_gfx_off_ctrl(adev, true);
358		}
359	}
360
361	/* profile_exit setting is valid only when current mode is in profile mode */
362	if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
363	    AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
364	    AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
365	    AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
366	    (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
367		pr_err("Currently not in any profile mode!\n");
368		pm_runtime_mark_last_busy(ddev->dev);
369		pm_runtime_put_autosuspend(ddev->dev);
370		return -EINVAL;
371	}
372
373	if (is_support_sw_smu(adev)) {
374		ret = smu_force_performance_level(&adev->smu, level);
375		if (ret) {
376			pm_runtime_mark_last_busy(ddev->dev);
377			pm_runtime_put_autosuspend(ddev->dev);
378			return -EINVAL;
379		}
380	} else if (adev->powerplay.pp_funcs->force_performance_level) {
381		mutex_lock(&adev->pm.mutex);
382		if (adev->pm.dpm.thermal_active) {
383			mutex_unlock(&adev->pm.mutex);
384			pm_runtime_mark_last_busy(ddev->dev);
385			pm_runtime_put_autosuspend(ddev->dev);
386			return -EINVAL;
387		}
388		ret = amdgpu_dpm_force_performance_level(adev, level);
389		if (ret) {
390			mutex_unlock(&adev->pm.mutex);
391			pm_runtime_mark_last_busy(ddev->dev);
392			pm_runtime_put_autosuspend(ddev->dev);
393			return -EINVAL;
394		} else {
395			adev->pm.dpm.forced_level = level;
396		}
397		mutex_unlock(&adev->pm.mutex);
398	}
399	pm_runtime_mark_last_busy(ddev->dev);
400	pm_runtime_put_autosuspend(ddev->dev);
401
402	return count;
403}
404
405static ssize_t amdgpu_get_pp_num_states(struct device *dev,
406		struct device_attribute *attr,
407		char *buf)
408{
409	struct drm_device *ddev = dev_get_drvdata(dev);
410	struct amdgpu_device *adev = drm_to_adev(ddev);
411	struct pp_states_info data;
412	int i, buf_len, ret;
413
414	if (amdgpu_in_reset(adev))
415		return -EPERM;
416
417	ret = pm_runtime_get_sync(ddev->dev);
418	if (ret < 0) {
419		pm_runtime_put_autosuspend(ddev->dev);
420		return ret;
421	}
422
423	if (is_support_sw_smu(adev)) {
424		ret = smu_get_power_num_states(&adev->smu, &data);
425		if (ret)
426			return ret;
427	} else if (adev->powerplay.pp_funcs->get_pp_num_states) {
428		amdgpu_dpm_get_pp_num_states(adev, &data);
429	} else {
430		memset(&data, 0, sizeof(data));
431	}
432
433	pm_runtime_mark_last_busy(ddev->dev);
434	pm_runtime_put_autosuspend(ddev->dev);
435
436	buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
437	for (i = 0; i < data.nums; i++)
438		buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
439				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
440				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
441				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
442				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
443
444	return buf_len;
445}
446
447static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
448		struct device_attribute *attr,
449		char *buf)
450{
451	struct drm_device *ddev = dev_get_drvdata(dev);
452	struct amdgpu_device *adev = drm_to_adev(ddev);
453	struct pp_states_info data;
454	struct smu_context *smu = &adev->smu;
455	enum amd_pm_state_type pm = 0;
456	int i = 0, ret = 0;
457
458	if (amdgpu_in_reset(adev))
459		return -EPERM;
460
461	ret = pm_runtime_get_sync(ddev->dev);
462	if (ret < 0) {
463		pm_runtime_put_autosuspend(ddev->dev);
464		return ret;
465	}
466
467	if (is_support_sw_smu(adev)) {
468		pm = smu_get_current_power_state(smu);
469		ret = smu_get_power_num_states(smu, &data);
470		if (ret)
471			return ret;
472	} else if (adev->powerplay.pp_funcs->get_current_power_state
473		 && adev->powerplay.pp_funcs->get_pp_num_states) {
474		pm = amdgpu_dpm_get_current_power_state(adev);
475		amdgpu_dpm_get_pp_num_states(adev, &data);
476	}
477
478	pm_runtime_mark_last_busy(ddev->dev);
479	pm_runtime_put_autosuspend(ddev->dev);
480
481	for (i = 0; i < data.nums; i++) {
482		if (pm == data.states[i])
483			break;
484	}
485
486	if (i == data.nums)
487		i = -EINVAL;
488
489	return snprintf(buf, PAGE_SIZE, "%d\n", i);
490}
491
492static ssize_t amdgpu_get_pp_force_state(struct device *dev,
493		struct device_attribute *attr,
494		char *buf)
495{
496	struct drm_device *ddev = dev_get_drvdata(dev);
497	struct amdgpu_device *adev = drm_to_adev(ddev);
498
499	if (amdgpu_in_reset(adev))
500		return -EPERM;
501
502	if (adev->pp_force_state_enabled)
503		return amdgpu_get_pp_cur_state(dev, attr, buf);
504	else
505		return snprintf(buf, PAGE_SIZE, "\n");
506}
507
508static ssize_t amdgpu_set_pp_force_state(struct device *dev,
509		struct device_attribute *attr,
510		const char *buf,
511		size_t count)
512{
513	struct drm_device *ddev = dev_get_drvdata(dev);
514	struct amdgpu_device *adev = drm_to_adev(ddev);
515	enum amd_pm_state_type state = 0;
516	unsigned long idx;
517	int ret;
518
519	if (amdgpu_in_reset(adev))
520		return -EPERM;
521
522	if (strlen(buf) == 1)
523		adev->pp_force_state_enabled = false;
524	else if (is_support_sw_smu(adev))
525		adev->pp_force_state_enabled = false;
526	else if (adev->powerplay.pp_funcs->dispatch_tasks &&
527			adev->powerplay.pp_funcs->get_pp_num_states) {
528		struct pp_states_info data;
529
530		ret = kstrtoul(buf, 0, &idx);
531		if (ret || idx >= ARRAY_SIZE(data.states))
532			return -EINVAL;
533
534		idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
535
536		amdgpu_dpm_get_pp_num_states(adev, &data);
537		state = data.states[idx];
538
539		ret = pm_runtime_get_sync(ddev->dev);
540		if (ret < 0) {
541			pm_runtime_put_autosuspend(ddev->dev);
542			return ret;
543		}
544
545		/* only set user selected power states */
546		if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
547		    state != POWER_STATE_TYPE_DEFAULT) {
548			amdgpu_dpm_dispatch_task(adev,
549					AMD_PP_TASK_ENABLE_USER_STATE, &state);
550			adev->pp_force_state_enabled = true;
551		}
552		pm_runtime_mark_last_busy(ddev->dev);
553		pm_runtime_put_autosuspend(ddev->dev);
554	}
555
556	return count;
557}
558
559/**
560 * DOC: pp_table
561 *
562 * The amdgpu driver provides a sysfs API for uploading new powerplay
563 * tables.  The file pp_table is used for this.  Reading the file
564 * will dump the current power play table.  Writing to the file
565 * will attempt to upload a new powerplay table and re-initialize
566 * powerplay using that new table.
567 *
568 */
569
570static ssize_t amdgpu_get_pp_table(struct device *dev,
571		struct device_attribute *attr,
572		char *buf)
573{
574	struct drm_device *ddev = dev_get_drvdata(dev);
575	struct amdgpu_device *adev = drm_to_adev(ddev);
576	char *table = NULL;
577	int size, ret;
578
579	if (amdgpu_in_reset(adev))
580		return -EPERM;
581
582	ret = pm_runtime_get_sync(ddev->dev);
583	if (ret < 0) {
584		pm_runtime_put_autosuspend(ddev->dev);
585		return ret;
586	}
587
588	if (is_support_sw_smu(adev)) {
589		size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
590		pm_runtime_mark_last_busy(ddev->dev);
591		pm_runtime_put_autosuspend(ddev->dev);
592		if (size < 0)
593			return size;
594	} else if (adev->powerplay.pp_funcs->get_pp_table) {
595		size = amdgpu_dpm_get_pp_table(adev, &table);
596		pm_runtime_mark_last_busy(ddev->dev);
597		pm_runtime_put_autosuspend(ddev->dev);
598		if (size < 0)
599			return size;
600	} else {
601		pm_runtime_mark_last_busy(ddev->dev);
602		pm_runtime_put_autosuspend(ddev->dev);
603		return 0;
604	}
605
606	if (size >= PAGE_SIZE)
607		size = PAGE_SIZE - 1;
608
609	memcpy(buf, table, size);
610
611	return size;
612}
613
614static ssize_t amdgpu_set_pp_table(struct device *dev,
615		struct device_attribute *attr,
616		const char *buf,
617		size_t count)
618{
619	struct drm_device *ddev = dev_get_drvdata(dev);
620	struct amdgpu_device *adev = drm_to_adev(ddev);
621	int ret = 0;
622
623	if (amdgpu_in_reset(adev))
624		return -EPERM;
625
626	ret = pm_runtime_get_sync(ddev->dev);
627	if (ret < 0) {
628		pm_runtime_put_autosuspend(ddev->dev);
629		return ret;
630	}
631
632	if (is_support_sw_smu(adev)) {
633		ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
634		if (ret) {
635			pm_runtime_mark_last_busy(ddev->dev);
636			pm_runtime_put_autosuspend(ddev->dev);
637			return ret;
638		}
639	} else if (adev->powerplay.pp_funcs->set_pp_table)
640		amdgpu_dpm_set_pp_table(adev, buf, count);
641
642	pm_runtime_mark_last_busy(ddev->dev);
643	pm_runtime_put_autosuspend(ddev->dev);
644
645	return count;
646}
647
648/**
649 * DOC: pp_od_clk_voltage
650 *
651 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
652 * in each power level within a power state.  The pp_od_clk_voltage is used for
653 * this.
654 *
655 * Note that the actual memory controller clock rate are exposed, not
656 * the effective memory clock of the DRAMs. To translate it, use the
657 * following formula:
658 *
659 * Clock conversion (Mhz):
660 *
661 * HBM: effective_memory_clock = memory_controller_clock * 1
662 *
663 * G5: effective_memory_clock = memory_controller_clock * 1
664 *
665 * G6: effective_memory_clock = memory_controller_clock * 2
666 *
667 * DRAM data rate (MT/s):
668 *
669 * HBM: effective_memory_clock * 2 = data_rate
670 *
671 * G5: effective_memory_clock * 4 = data_rate
672 *
673 * G6: effective_memory_clock * 8 = data_rate
674 *
675 * Bandwidth (MB/s):
676 *
677 * data_rate * vram_bit_width / 8 = memory_bandwidth
678 *
679 * Some examples:
680 *
681 * G5 on RX460:
682 *
683 * memory_controller_clock = 1750 Mhz
684 *
685 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
686 *
687 * data rate = 1750 * 4 = 7000 MT/s
688 *
689 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
690 *
691 * G6 on RX5700:
692 *
693 * memory_controller_clock = 875 Mhz
694 *
695 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
696 *
697 * data rate = 1750 * 8 = 14000 MT/s
698 *
699 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
700 *
701 * < For Vega10 and previous ASICs >
702 *
703 * Reading the file will display:
704 *
705 * - a list of engine clock levels and voltages labeled OD_SCLK
706 *
707 * - a list of memory clock levels and voltages labeled OD_MCLK
708 *
709 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
710 *
711 * To manually adjust these settings, first select manual using
712 * power_dpm_force_performance_level. Enter a new value for each
713 * level by writing a string that contains "s/m level clock voltage" to
714 * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
715 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
716 * 810 mV.  When you have edited all of the states as needed, write
717 * "c" (commit) to the file to commit your changes.  If you want to reset to the
718 * default power levels, write "r" (reset) to the file to reset them.
719 *
720 *
721 * < For Vega20 and newer ASICs >
722 *
723 * Reading the file will display:
724 *
725 * - minimum and maximum engine clock labeled OD_SCLK
726 *
727 * - maximum memory clock labeled OD_MCLK
728 *
729 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
730 *   They can be used to calibrate the sclk voltage curve.
731 *
732 * - a list of valid ranges for sclk, mclk, and voltage curve points
733 *   labeled OD_RANGE
734 *
735 * To manually adjust these settings:
736 *
737 * - First select manual using power_dpm_force_performance_level
738 *
739 * - For clock frequency setting, enter a new value by writing a
740 *   string that contains "s/m index clock" to the file. The index
741 *   should be 0 if to set minimum clock. And 1 if to set maximum
742 *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
743 *   "m 1 800" will update maximum mclk to be 800Mhz.
744 *
745 *   For sclk voltage curve, enter the new values by writing a
746 *   string that contains "vc point clock voltage" to the file. The
747 *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
748 *   update point1 with clock set as 300Mhz and voltage as
749 *   600mV. "vc 2 1000 1000" will update point3 with clock set
750 *   as 1000Mhz and voltage 1000mV.
751 *
752 * - When you have edited all of the states as needed, write "c" (commit)
753 *   to the file to commit your changes
754 *
755 * - If you want to reset to the default power levels, write "r" (reset)
756 *   to the file to reset them
757 *
758 */
759
760static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
761		struct device_attribute *attr,
762		const char *buf,
763		size_t count)
764{
765	struct drm_device *ddev = dev_get_drvdata(dev);
766	struct amdgpu_device *adev = drm_to_adev(ddev);
767	int ret;
768	uint32_t parameter_size = 0;
769	long parameter[64];
770	char buf_cpy[128];
771	char *tmp_str;
772	char *sub_str;
773	const char delimiter[3] = {' ', '\n', '\0'};
774	uint32_t type;
775
776	if (amdgpu_in_reset(adev))
777		return -EPERM;
778
779	if (count > 127 || count == 0)
780		return -EINVAL;
781
782	if (*buf == 's')
783		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
784	else if (*buf == 'm')
785		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
786	else if(*buf == 'r')
787		type = PP_OD_RESTORE_DEFAULT_TABLE;
788	else if (*buf == 'c')
789		type = PP_OD_COMMIT_DPM_TABLE;
790	else if (!strncmp(buf, "vc", 2))
791		type = PP_OD_EDIT_VDDC_CURVE;
792	else
793		return -EINVAL;
794
795	memcpy(buf_cpy, buf, count);
796	buf_cpy[count] = 0;
797
798	tmp_str = buf_cpy;
799
800	if (type == PP_OD_EDIT_VDDC_CURVE)
801		tmp_str++;
802	while (isspace(*++tmp_str));
803
804	while (tmp_str[0]) {
805		sub_str = strsep(&tmp_str, delimiter);
806		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
807		if (ret)
808			return -EINVAL;
809		parameter_size++;
810
811		if (!tmp_str)
812			break;
813
814		while (isspace(*tmp_str))
815			tmp_str++;
816	}
817
818	ret = pm_runtime_get_sync(ddev->dev);
819	if (ret < 0) {
820		pm_runtime_put_autosuspend(ddev->dev);
821		return ret;
822	}
823
824	if (is_support_sw_smu(adev)) {
825		ret = smu_od_edit_dpm_table(&adev->smu, type,
826					    parameter, parameter_size);
827
828		if (ret) {
829			pm_runtime_mark_last_busy(ddev->dev);
830			pm_runtime_put_autosuspend(ddev->dev);
831			return -EINVAL;
832		}
833	} else {
834
835		if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
836			ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
837								parameter,
838								parameter_size);
839			if (ret) {
840				pm_runtime_mark_last_busy(ddev->dev);
841				pm_runtime_put_autosuspend(ddev->dev);
842				return -EINVAL;
843			}
844		}
845
846		if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
847			ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
848						parameter, parameter_size);
849			if (ret) {
850				pm_runtime_mark_last_busy(ddev->dev);
851				pm_runtime_put_autosuspend(ddev->dev);
852				return -EINVAL;
853			}
854		}
855
856		if (type == PP_OD_COMMIT_DPM_TABLE) {
857			if (adev->powerplay.pp_funcs->dispatch_tasks) {
858				amdgpu_dpm_dispatch_task(adev,
859						AMD_PP_TASK_READJUST_POWER_STATE,
860						NULL);
861				pm_runtime_mark_last_busy(ddev->dev);
862				pm_runtime_put_autosuspend(ddev->dev);
863				return count;
864			} else {
865				pm_runtime_mark_last_busy(ddev->dev);
866				pm_runtime_put_autosuspend(ddev->dev);
867				return -EINVAL;
868			}
869		}
870	}
871	pm_runtime_mark_last_busy(ddev->dev);
872	pm_runtime_put_autosuspend(ddev->dev);
873
874	return count;
875}
876
877static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
878		struct device_attribute *attr,
879		char *buf)
880{
881	struct drm_device *ddev = dev_get_drvdata(dev);
882	struct amdgpu_device *adev = drm_to_adev(ddev);
883	ssize_t size;
884	int ret;
885
886	if (amdgpu_in_reset(adev))
887		return -EPERM;
888
889	ret = pm_runtime_get_sync(ddev->dev);
890	if (ret < 0) {
891		pm_runtime_put_autosuspend(ddev->dev);
892		return ret;
893	}
894
895	if (is_support_sw_smu(adev)) {
896		size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
897		size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
898		size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
899		size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
900	} else if (adev->powerplay.pp_funcs->print_clock_levels) {
901		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
902		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
903		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
904		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
905	} else {
906		size = snprintf(buf, PAGE_SIZE, "\n");
907	}
908	pm_runtime_mark_last_busy(ddev->dev);
909	pm_runtime_put_autosuspend(ddev->dev);
910
911	return size;
912}
913
914/**
915 * DOC: pp_features
916 *
917 * The amdgpu driver provides a sysfs API for adjusting what powerplay
918 * features to be enabled. The file pp_features is used for this. And
919 * this is only available for Vega10 and later dGPUs.
920 *
921 * Reading back the file will show you the followings:
922 * - Current ppfeature masks
923 * - List of the all supported powerplay features with their naming,
924 *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
925 *
926 * To manually enable or disable a specific feature, just set or clear
927 * the corresponding bit from original ppfeature masks and input the
928 * new ppfeature masks.
929 */
930static ssize_t amdgpu_set_pp_features(struct device *dev,
931				      struct device_attribute *attr,
932				      const char *buf,
933				      size_t count)
934{
935	struct drm_device *ddev = dev_get_drvdata(dev);
936	struct amdgpu_device *adev = drm_to_adev(ddev);
937	uint64_t featuremask;
938	int ret;
939
940	if (amdgpu_in_reset(adev))
941		return -EPERM;
942
943	ret = kstrtou64(buf, 0, &featuremask);
944	if (ret)
945		return -EINVAL;
946
947	pr_debug("featuremask = 0x%llx\n", featuremask);
948
949	ret = pm_runtime_get_sync(ddev->dev);
950	if (ret < 0) {
951		pm_runtime_put_autosuspend(ddev->dev);
952		return ret;
953	}
954
955	if (is_support_sw_smu(adev)) {
956		ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
957		if (ret) {
958			pm_runtime_mark_last_busy(ddev->dev);
959			pm_runtime_put_autosuspend(ddev->dev);
960			return -EINVAL;
961		}
962	} else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
963		ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
964		if (ret) {
965			pm_runtime_mark_last_busy(ddev->dev);
966			pm_runtime_put_autosuspend(ddev->dev);
967			return -EINVAL;
968		}
969	}
970	pm_runtime_mark_last_busy(ddev->dev);
971	pm_runtime_put_autosuspend(ddev->dev);
972
973	return count;
974}
975
976static ssize_t amdgpu_get_pp_features(struct device *dev,
977				      struct device_attribute *attr,
978				      char *buf)
979{
980	struct drm_device *ddev = dev_get_drvdata(dev);
981	struct amdgpu_device *adev = drm_to_adev(ddev);
982	ssize_t size;
983	int ret;
984
985	if (amdgpu_in_reset(adev))
986		return -EPERM;
987
988	ret = pm_runtime_get_sync(ddev->dev);
989	if (ret < 0) {
990		pm_runtime_put_autosuspend(ddev->dev);
991		return ret;
992	}
993
994	if (is_support_sw_smu(adev))
995		size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
996	else if (adev->powerplay.pp_funcs->get_ppfeature_status)
997		size = amdgpu_dpm_get_ppfeature_status(adev, buf);
998	else
999		size = snprintf(buf, PAGE_SIZE, "\n");
1000
1001	pm_runtime_mark_last_busy(ddev->dev);
1002	pm_runtime_put_autosuspend(ddev->dev);
1003
1004	return size;
1005}
1006
1007/**
1008 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
1009 *
1010 * The amdgpu driver provides a sysfs API for adjusting what power levels
1011 * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
1012 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1013 * this.
1014 *
1015 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1016 * Vega10 and later ASICs.
1017 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1018 *
1019 * Reading back the files will show you the available power levels within
1020 * the power state and the clock information for those levels.
1021 *
1022 * To manually adjust these states, first select manual using
1023 * power_dpm_force_performance_level.
1024 * Secondly, enter a new value for each level by inputing a string that
1025 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1026 * E.g.,
1027 *
1028 * .. code-block:: bash
1029 *
1030 *	echo "4 5 6" > pp_dpm_sclk
1031 *
1032 * will enable sclk levels 4, 5, and 6.
1033 *
1034 * NOTE: change to the dcefclk max dpm level is not supported now
1035 */
1036
1037static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1038		struct device_attribute *attr,
1039		char *buf)
1040{
1041	struct drm_device *ddev = dev_get_drvdata(dev);
1042	struct amdgpu_device *adev = drm_to_adev(ddev);
1043	ssize_t size;
1044	int ret;
1045
1046	if (amdgpu_in_reset(adev))
1047		return -EPERM;
1048
1049	ret = pm_runtime_get_sync(ddev->dev);
1050	if (ret < 0) {
1051		pm_runtime_put_autosuspend(ddev->dev);
1052		return ret;
1053	}
1054
1055	if (is_support_sw_smu(adev))
1056		size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1057	else if (adev->powerplay.pp_funcs->print_clock_levels)
1058		size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1059	else
1060		size = snprintf(buf, PAGE_SIZE, "\n");
1061
1062	pm_runtime_mark_last_busy(ddev->dev);
1063	pm_runtime_put_autosuspend(ddev->dev);
1064
1065	return size;
1066}
1067
1068/*
1069 * Worst case: 32 bits individually specified, in octal at 12 characters
1070 * per line (+1 for \n).
1071 */
1072#define AMDGPU_MASK_BUF_MAX	(32 * 13)
1073
1074static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1075{
1076	int ret;
1077	unsigned long level;
1078	char *sub_str = NULL;
1079	char *tmp;
1080	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1081	const char delimiter[3] = {' ', '\n', '\0'};
1082	size_t bytes;
1083
1084	*mask = 0;
1085
1086	bytes = min(count, sizeof(buf_cpy) - 1);
1087	memcpy(buf_cpy, buf, bytes);
1088	buf_cpy[bytes] = '\0';
1089	tmp = buf_cpy;
1090	while (tmp[0]) {
1091		sub_str = strsep(&tmp, delimiter);
1092		if (strlen(sub_str)) {
1093			ret = kstrtoul(sub_str, 0, &level);
1094			if (ret || level > 31)
1095				return -EINVAL;
1096			*mask |= 1 << level;
1097		} else
1098			break;
1099	}
1100
1101	return 0;
1102}
1103
1104static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1105		struct device_attribute *attr,
1106		const char *buf,
1107		size_t count)
1108{
1109	struct drm_device *ddev = dev_get_drvdata(dev);
1110	struct amdgpu_device *adev = drm_to_adev(ddev);
1111	int ret;
1112	uint32_t mask = 0;
1113
1114	if (amdgpu_in_reset(adev))
1115		return -EPERM;
1116
1117	ret = amdgpu_read_mask(buf, count, &mask);
1118	if (ret)
1119		return ret;
1120
1121	ret = pm_runtime_get_sync(ddev->dev);
1122	if (ret < 0) {
1123		pm_runtime_put_autosuspend(ddev->dev);
1124		return ret;
1125	}
1126
1127	if (is_support_sw_smu(adev))
1128		ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1129	else if (adev->powerplay.pp_funcs->force_clock_level)
1130		ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1131
1132	pm_runtime_mark_last_busy(ddev->dev);
1133	pm_runtime_put_autosuspend(ddev->dev);
1134
1135	if (ret)
1136		return -EINVAL;
1137
1138	return count;
1139}
1140
1141static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1142		struct device_attribute *attr,
1143		char *buf)
1144{
1145	struct drm_device *ddev = dev_get_drvdata(dev);
1146	struct amdgpu_device *adev = drm_to_adev(ddev);
1147	ssize_t size;
1148	int ret;
1149
1150	if (amdgpu_in_reset(adev))
1151		return -EPERM;
1152
1153	ret = pm_runtime_get_sync(ddev->dev);
1154	if (ret < 0) {
1155		pm_runtime_put_autosuspend(ddev->dev);
1156		return ret;
1157	}
1158
1159	if (is_support_sw_smu(adev))
1160		size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1161	else if (adev->powerplay.pp_funcs->print_clock_levels)
1162		size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1163	else
1164		size = snprintf(buf, PAGE_SIZE, "\n");
1165
1166	pm_runtime_mark_last_busy(ddev->dev);
1167	pm_runtime_put_autosuspend(ddev->dev);
1168
1169	return size;
1170}
1171
1172static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1173		struct device_attribute *attr,
1174		const char *buf,
1175		size_t count)
1176{
1177	struct drm_device *ddev = dev_get_drvdata(dev);
1178	struct amdgpu_device *adev = drm_to_adev(ddev);
1179	uint32_t mask = 0;
1180	int ret;
1181
1182	if (amdgpu_in_reset(adev))
1183		return -EPERM;
1184
1185	ret = amdgpu_read_mask(buf, count, &mask);
1186	if (ret)
1187		return ret;
1188
1189	ret = pm_runtime_get_sync(ddev->dev);
1190	if (ret < 0) {
1191		pm_runtime_put_autosuspend(ddev->dev);
1192		return ret;
1193	}
1194
1195	if (is_support_sw_smu(adev))
1196		ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1197	else if (adev->powerplay.pp_funcs->force_clock_level)
1198		ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1199
1200	pm_runtime_mark_last_busy(ddev->dev);
1201	pm_runtime_put_autosuspend(ddev->dev);
1202
1203	if (ret)
1204		return -EINVAL;
1205
1206	return count;
1207}
1208
1209static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1210		struct device_attribute *attr,
1211		char *buf)
1212{
1213	struct drm_device *ddev = dev_get_drvdata(dev);
1214	struct amdgpu_device *adev = drm_to_adev(ddev);
1215	ssize_t size;
1216	int ret;
1217
1218	if (amdgpu_in_reset(adev))
1219		return -EPERM;
1220
1221	ret = pm_runtime_get_sync(ddev->dev);
1222	if (ret < 0) {
1223		pm_runtime_put_autosuspend(ddev->dev);
1224		return ret;
1225	}
1226
1227	if (is_support_sw_smu(adev))
1228		size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1229	else if (adev->powerplay.pp_funcs->print_clock_levels)
1230		size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1231	else
1232		size = snprintf(buf, PAGE_SIZE, "\n");
1233
1234	pm_runtime_mark_last_busy(ddev->dev);
1235	pm_runtime_put_autosuspend(ddev->dev);
1236
1237	return size;
1238}
1239
1240static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1241		struct device_attribute *attr,
1242		const char *buf,
1243		size_t count)
1244{
1245	struct drm_device *ddev = dev_get_drvdata(dev);
1246	struct amdgpu_device *adev = drm_to_adev(ddev);
1247	int ret;
1248	uint32_t mask = 0;
1249
1250	if (amdgpu_in_reset(adev))
1251		return -EPERM;
1252
1253	ret = amdgpu_read_mask(buf, count, &mask);
1254	if (ret)
1255		return ret;
1256
1257	ret = pm_runtime_get_sync(ddev->dev);
1258	if (ret < 0) {
1259		pm_runtime_put_autosuspend(ddev->dev);
1260		return ret;
1261	}
1262
1263	if (is_support_sw_smu(adev))
1264		ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1265	else if (adev->powerplay.pp_funcs->force_clock_level)
1266		ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1267	else
1268		ret = 0;
1269
1270	pm_runtime_mark_last_busy(ddev->dev);
1271	pm_runtime_put_autosuspend(ddev->dev);
1272
1273	if (ret)
1274		return -EINVAL;
1275
1276	return count;
1277}
1278
1279static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1280		struct device_attribute *attr,
1281		char *buf)
1282{
1283	struct drm_device *ddev = dev_get_drvdata(dev);
1284	struct amdgpu_device *adev = drm_to_adev(ddev);
1285	ssize_t size;
1286	int ret;
1287
1288	if (amdgpu_in_reset(adev))
1289		return -EPERM;
1290
1291	ret = pm_runtime_get_sync(ddev->dev);
1292	if (ret < 0) {
1293		pm_runtime_put_autosuspend(ddev->dev);
1294		return ret;
1295	}
1296
1297	if (is_support_sw_smu(adev))
1298		size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1299	else if (adev->powerplay.pp_funcs->print_clock_levels)
1300		size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1301	else
1302		size = snprintf(buf, PAGE_SIZE, "\n");
1303
1304	pm_runtime_mark_last_busy(ddev->dev);
1305	pm_runtime_put_autosuspend(ddev->dev);
1306
1307	return size;
1308}
1309
1310static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1311		struct device_attribute *attr,
1312		const char *buf,
1313		size_t count)
1314{
1315	struct drm_device *ddev = dev_get_drvdata(dev);
1316	struct amdgpu_device *adev = drm_to_adev(ddev);
1317	int ret;
1318	uint32_t mask = 0;
1319
1320	if (amdgpu_in_reset(adev))
1321		return -EPERM;
1322
1323	ret = amdgpu_read_mask(buf, count, &mask);
1324	if (ret)
1325		return ret;
1326
1327	ret = pm_runtime_get_sync(ddev->dev);
1328	if (ret < 0) {
1329		pm_runtime_put_autosuspend(ddev->dev);
1330		return ret;
1331	}
1332
1333	if (is_support_sw_smu(adev))
1334		ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1335	else if (adev->powerplay.pp_funcs->force_clock_level)
1336		ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1337	else
1338		ret = 0;
1339
1340	pm_runtime_mark_last_busy(ddev->dev);
1341	pm_runtime_put_autosuspend(ddev->dev);
1342
1343	if (ret)
1344		return -EINVAL;
1345
1346	return count;
1347}
1348
1349static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1350		struct device_attribute *attr,
1351		char *buf)
1352{
1353	struct drm_device *ddev = dev_get_drvdata(dev);
1354	struct amdgpu_device *adev = drm_to_adev(ddev);
1355	ssize_t size;
1356	int ret;
1357
1358	if (amdgpu_in_reset(adev))
1359		return -EPERM;
1360
1361	ret = pm_runtime_get_sync(ddev->dev);
1362	if (ret < 0) {
1363		pm_runtime_put_autosuspend(ddev->dev);
1364		return ret;
1365	}
1366
1367	if (is_support_sw_smu(adev))
1368		size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1369	else if (adev->powerplay.pp_funcs->print_clock_levels)
1370		size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1371	else
1372		size = snprintf(buf, PAGE_SIZE, "\n");
1373
1374	pm_runtime_mark_last_busy(ddev->dev);
1375	pm_runtime_put_autosuspend(ddev->dev);
1376
1377	return size;
1378}
1379
1380static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1381		struct device_attribute *attr,
1382		const char *buf,
1383		size_t count)
1384{
1385	struct drm_device *ddev = dev_get_drvdata(dev);
1386	struct amdgpu_device *adev = drm_to_adev(ddev);
1387	int ret;
1388	uint32_t mask = 0;
1389
1390	if (amdgpu_in_reset(adev))
1391		return -EPERM;
1392
1393	ret = amdgpu_read_mask(buf, count, &mask);
1394	if (ret)
1395		return ret;
1396
1397	ret = pm_runtime_get_sync(ddev->dev);
1398	if (ret < 0) {
1399		pm_runtime_put_autosuspend(ddev->dev);
1400		return ret;
1401	}
1402
1403	if (is_support_sw_smu(adev))
1404		ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1405	else if (adev->powerplay.pp_funcs->force_clock_level)
1406		ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1407	else
1408		ret = 0;
1409
1410	pm_runtime_mark_last_busy(ddev->dev);
1411	pm_runtime_put_autosuspend(ddev->dev);
1412
1413	if (ret)
1414		return -EINVAL;
1415
1416	return count;
1417}
1418
1419static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1420		struct device_attribute *attr,
1421		char *buf)
1422{
1423	struct drm_device *ddev = dev_get_drvdata(dev);
1424	struct amdgpu_device *adev = drm_to_adev(ddev);
1425	ssize_t size;
1426	int ret;
1427
1428	if (amdgpu_in_reset(adev))
1429		return -EPERM;
1430
1431	ret = pm_runtime_get_sync(ddev->dev);
1432	if (ret < 0) {
1433		pm_runtime_put_autosuspend(ddev->dev);
1434		return ret;
1435	}
1436
1437	if (is_support_sw_smu(adev))
1438		size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1439	else if (adev->powerplay.pp_funcs->print_clock_levels)
1440		size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1441	else
1442		size = snprintf(buf, PAGE_SIZE, "\n");
1443
1444	pm_runtime_mark_last_busy(ddev->dev);
1445	pm_runtime_put_autosuspend(ddev->dev);
1446
1447	return size;
1448}
1449
1450static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1451		struct device_attribute *attr,
1452		const char *buf,
1453		size_t count)
1454{
1455	struct drm_device *ddev = dev_get_drvdata(dev);
1456	struct amdgpu_device *adev = drm_to_adev(ddev);
1457	int ret;
1458	uint32_t mask = 0;
1459
1460	if (amdgpu_in_reset(adev))
1461		return -EPERM;
1462
1463	ret = amdgpu_read_mask(buf, count, &mask);
1464	if (ret)
1465		return ret;
1466
1467	ret = pm_runtime_get_sync(ddev->dev);
1468	if (ret < 0) {
1469		pm_runtime_put_autosuspend(ddev->dev);
1470		return ret;
1471	}
1472
1473	if (is_support_sw_smu(adev))
1474		ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1475	else if (adev->powerplay.pp_funcs->force_clock_level)
1476		ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1477	else
1478		ret = 0;
1479
1480	pm_runtime_mark_last_busy(ddev->dev);
1481	pm_runtime_put_autosuspend(ddev->dev);
1482
1483	if (ret)
1484		return -EINVAL;
1485
1486	return count;
1487}
1488
1489static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1490		struct device_attribute *attr,
1491		char *buf)
1492{
1493	struct drm_device *ddev = dev_get_drvdata(dev);
1494	struct amdgpu_device *adev = drm_to_adev(ddev);
1495	uint32_t value = 0;
1496	int ret;
1497
1498	if (amdgpu_in_reset(adev))
1499		return -EPERM;
1500
1501	ret = pm_runtime_get_sync(ddev->dev);
1502	if (ret < 0) {
1503		pm_runtime_put_autosuspend(ddev->dev);
1504		return ret;
1505	}
1506
1507	if (is_support_sw_smu(adev))
1508		value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1509	else if (adev->powerplay.pp_funcs->get_sclk_od)
1510		value = amdgpu_dpm_get_sclk_od(adev);
1511
1512	pm_runtime_mark_last_busy(ddev->dev);
1513	pm_runtime_put_autosuspend(ddev->dev);
1514
1515	return snprintf(buf, PAGE_SIZE, "%d\n", value);
1516}
1517
1518static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1519		struct device_attribute *attr,
1520		const char *buf,
1521		size_t count)
1522{
1523	struct drm_device *ddev = dev_get_drvdata(dev);
1524	struct amdgpu_device *adev = drm_to_adev(ddev);
1525	int ret;
1526	long int value;
1527
1528	if (amdgpu_in_reset(adev))
1529		return -EPERM;
1530
1531	ret = kstrtol(buf, 0, &value);
1532
1533	if (ret)
1534		return -EINVAL;
1535
1536	ret = pm_runtime_get_sync(ddev->dev);
1537	if (ret < 0) {
1538		pm_runtime_put_autosuspend(ddev->dev);
1539		return ret;
1540	}
1541
1542	if (is_support_sw_smu(adev)) {
1543		value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1544	} else {
1545		if (adev->powerplay.pp_funcs->set_sclk_od)
1546			amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1547
1548		if (adev->powerplay.pp_funcs->dispatch_tasks) {
1549			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1550		} else {
1551			adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1552			amdgpu_pm_compute_clocks(adev);
1553		}
1554	}
1555
1556	pm_runtime_mark_last_busy(ddev->dev);
1557	pm_runtime_put_autosuspend(ddev->dev);
1558
1559	return count;
1560}
1561
1562static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1563		struct device_attribute *attr,
1564		char *buf)
1565{
1566	struct drm_device *ddev = dev_get_drvdata(dev);
1567	struct amdgpu_device *adev = drm_to_adev(ddev);
1568	uint32_t value = 0;
1569	int ret;
1570
1571	if (amdgpu_in_reset(adev))
1572		return -EPERM;
1573
1574	ret = pm_runtime_get_sync(ddev->dev);
1575	if (ret < 0) {
1576		pm_runtime_put_autosuspend(ddev->dev);
1577		return ret;
1578	}
1579
1580	if (is_support_sw_smu(adev))
1581		value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1582	else if (adev->powerplay.pp_funcs->get_mclk_od)
1583		value = amdgpu_dpm_get_mclk_od(adev);
1584
1585	pm_runtime_mark_last_busy(ddev->dev);
1586	pm_runtime_put_autosuspend(ddev->dev);
1587
1588	return snprintf(buf, PAGE_SIZE, "%d\n", value);
1589}
1590
1591static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1592		struct device_attribute *attr,
1593		const char *buf,
1594		size_t count)
1595{
1596	struct drm_device *ddev = dev_get_drvdata(dev);
1597	struct amdgpu_device *adev = drm_to_adev(ddev);
1598	int ret;
1599	long int value;
1600
1601	if (amdgpu_in_reset(adev))
1602		return -EPERM;
1603
1604	ret = kstrtol(buf, 0, &value);
1605
1606	if (ret)
1607		return -EINVAL;
1608
1609	ret = pm_runtime_get_sync(ddev->dev);
1610	if (ret < 0) {
1611		pm_runtime_put_autosuspend(ddev->dev);
1612		return ret;
1613	}
1614
1615	if (is_support_sw_smu(adev)) {
1616		value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1617	} else {
1618		if (adev->powerplay.pp_funcs->set_mclk_od)
1619			amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1620
1621		if (adev->powerplay.pp_funcs->dispatch_tasks) {
1622			amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1623		} else {
1624			adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1625			amdgpu_pm_compute_clocks(adev);
1626		}
1627	}
1628
1629	pm_runtime_mark_last_busy(ddev->dev);
1630	pm_runtime_put_autosuspend(ddev->dev);
1631
1632	return count;
1633}
1634
1635/**
1636 * DOC: pp_power_profile_mode
1637 *
1638 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1639 * related to switching between power levels in a power state.  The file
1640 * pp_power_profile_mode is used for this.
1641 *
1642 * Reading this file outputs a list of all of the predefined power profiles
1643 * and the relevant heuristics settings for that profile.
1644 *
1645 * To select a profile or create a custom profile, first select manual using
1646 * power_dpm_force_performance_level.  Writing the number of a predefined
1647 * profile to pp_power_profile_mode will enable those heuristics.  To
1648 * create a custom set of heuristics, write a string of numbers to the file
1649 * starting with the number of the custom profile along with a setting
1650 * for each heuristic parameter.  Due to differences across asic families
1651 * the heuristic parameters vary from family to family.
1652 *
1653 */
1654
1655static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1656		struct device_attribute *attr,
1657		char *buf)
1658{
1659	struct drm_device *ddev = dev_get_drvdata(dev);
1660	struct amdgpu_device *adev = drm_to_adev(ddev);
1661	ssize_t size;
1662	int ret;
1663
1664	if (amdgpu_in_reset(adev))
1665		return -EPERM;
1666
1667	ret = pm_runtime_get_sync(ddev->dev);
1668	if (ret < 0) {
1669		pm_runtime_put_autosuspend(ddev->dev);
1670		return ret;
1671	}
1672
1673	if (is_support_sw_smu(adev))
1674		size = smu_get_power_profile_mode(&adev->smu, buf);
1675	else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1676		size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1677	else
1678		size = snprintf(buf, PAGE_SIZE, "\n");
1679
1680	pm_runtime_mark_last_busy(ddev->dev);
1681	pm_runtime_put_autosuspend(ddev->dev);
1682
1683	return size;
1684}
1685
1686
1687static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1688		struct device_attribute *attr,
1689		const char *buf,
1690		size_t count)
1691{
1692	int ret;
1693	struct drm_device *ddev = dev_get_drvdata(dev);
1694	struct amdgpu_device *adev = drm_to_adev(ddev);
1695	uint32_t parameter_size = 0;
1696	long parameter[64];
1697	char *sub_str, buf_cpy[128];
1698	char *tmp_str;
1699	uint32_t i = 0;
1700	char tmp[2];
1701	long int profile_mode = 0;
1702	const char delimiter[3] = {' ', '\n', '\0'};
1703
1704	if (amdgpu_in_reset(adev))
1705		return -EPERM;
1706
1707	tmp[0] = *(buf);
1708	tmp[1] = '\0';
1709	ret = kstrtol(tmp, 0, &profile_mode);
1710	if (ret)
1711		return -EINVAL;
1712
1713	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1714		if (count < 2 || count > 127)
1715			return -EINVAL;
1716		while (isspace(*++buf))
1717			i++;
1718		memcpy(buf_cpy, buf, count-i);
1719		tmp_str = buf_cpy;
1720		while (tmp_str[0]) {
1721			sub_str = strsep(&tmp_str, delimiter);
1722			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1723			if (ret)
1724				return -EINVAL;
1725			parameter_size++;
1726			while (isspace(*tmp_str))
1727				tmp_str++;
1728		}
1729	}
1730	parameter[parameter_size] = profile_mode;
1731
1732	ret = pm_runtime_get_sync(ddev->dev);
1733	if (ret < 0) {
1734		pm_runtime_put_autosuspend(ddev->dev);
1735		return ret;
1736	}
1737
1738	if (is_support_sw_smu(adev))
1739		ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1740	else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1741		ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1742
1743	pm_runtime_mark_last_busy(ddev->dev);
1744	pm_runtime_put_autosuspend(ddev->dev);
1745
1746	if (!ret)
1747		return count;
1748
1749	return -EINVAL;
1750}
1751
1752/**
1753 * DOC: gpu_busy_percent
1754 *
1755 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1756 * is as a percentage.  The file gpu_busy_percent is used for this.
1757 * The SMU firmware computes a percentage of load based on the
1758 * aggregate activity level in the IP cores.
1759 */
1760static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1761					   struct device_attribute *attr,
1762					   char *buf)
1763{
1764	struct drm_device *ddev = dev_get_drvdata(dev);
1765	struct amdgpu_device *adev = drm_to_adev(ddev);
1766	int r, value, size = sizeof(value);
1767
1768	if (amdgpu_in_reset(adev))
1769		return -EPERM;
1770
1771	r = pm_runtime_get_sync(ddev->dev);
1772	if (r < 0) {
1773		pm_runtime_put_autosuspend(ddev->dev);
1774		return r;
1775	}
1776
1777	/* read the IP busy sensor */
1778	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1779				   (void *)&value, &size);
1780
1781	pm_runtime_mark_last_busy(ddev->dev);
1782	pm_runtime_put_autosuspend(ddev->dev);
1783
1784	if (r)
1785		return r;
1786
1787	return snprintf(buf, PAGE_SIZE, "%d\n", value);
1788}
1789
1790/**
1791 * DOC: mem_busy_percent
1792 *
1793 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1794 * is as a percentage.  The file mem_busy_percent is used for this.
1795 * The SMU firmware computes a percentage of load based on the
1796 * aggregate activity level in the IP cores.
1797 */
1798static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1799					   struct device_attribute *attr,
1800					   char *buf)
1801{
1802	struct drm_device *ddev = dev_get_drvdata(dev);
1803	struct amdgpu_device *adev = drm_to_adev(ddev);
1804	int r, value, size = sizeof(value);
1805
1806	if (amdgpu_in_reset(adev))
1807		return -EPERM;
1808
1809	r = pm_runtime_get_sync(ddev->dev);
1810	if (r < 0) {
1811		pm_runtime_put_autosuspend(ddev->dev);
1812		return r;
1813	}
1814
1815	/* read the IP busy sensor */
1816	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1817				   (void *)&value, &size);
1818
1819	pm_runtime_mark_last_busy(ddev->dev);
1820	pm_runtime_put_autosuspend(ddev->dev);
1821
1822	if (r)
1823		return r;
1824
1825	return snprintf(buf, PAGE_SIZE, "%d\n", value);
1826}
1827
1828/**
1829 * DOC: pcie_bw
1830 *
1831 * The amdgpu driver provides a sysfs API for estimating how much data
1832 * has been received and sent by the GPU in the last second through PCIe.
1833 * The file pcie_bw is used for this.
1834 * The Perf counters count the number of received and sent messages and return
1835 * those values, as well as the maximum payload size of a PCIe packet (mps).
1836 * Note that it is not possible to easily and quickly obtain the size of each
1837 * packet transmitted, so we output the max payload size (mps) to allow for
1838 * quick estimation of the PCIe bandwidth usage
1839 */
1840static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1841		struct device_attribute *attr,
1842		char *buf)
1843{
1844	struct drm_device *ddev = dev_get_drvdata(dev);
1845	struct amdgpu_device *adev = drm_to_adev(ddev);
1846	uint64_t count0 = 0, count1 = 0;
1847	int ret;
1848
1849	if (amdgpu_in_reset(adev))
1850		return -EPERM;
1851
1852	if (adev->flags & AMD_IS_APU)
1853		return -ENODATA;
1854
1855	if (!adev->asic_funcs->get_pcie_usage)
1856		return -ENODATA;
1857
1858	ret = pm_runtime_get_sync(ddev->dev);
1859	if (ret < 0) {
1860		pm_runtime_put_autosuspend(ddev->dev);
1861		return ret;
1862	}
1863
1864	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1865
1866	pm_runtime_mark_last_busy(ddev->dev);
1867	pm_runtime_put_autosuspend(ddev->dev);
1868
1869	return snprintf(buf, PAGE_SIZE,	"%llu %llu %i\n",
1870			count0, count1, pcie_get_mps(adev->pdev));
1871}
1872
1873/**
1874 * DOC: unique_id
1875 *
1876 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1877 * The file unique_id is used for this.
1878 * This will provide a Unique ID that will persist from machine to machine
1879 *
1880 * NOTE: This will only work for GFX9 and newer. This file will be absent
1881 * on unsupported ASICs (GFX8 and older)
1882 */
1883static ssize_t amdgpu_get_unique_id(struct device *dev,
1884		struct device_attribute *attr,
1885		char *buf)
1886{
1887	struct drm_device *ddev = dev_get_drvdata(dev);
1888	struct amdgpu_device *adev = drm_to_adev(ddev);
1889
1890	if (amdgpu_in_reset(adev))
1891		return -EPERM;
1892
1893	if (adev->unique_id)
1894		return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1895
1896	return 0;
1897}
1898
1899/**
1900 * DOC: thermal_throttling_logging
1901 *
1902 * Thermal throttling pulls down the clock frequency and thus the performance.
1903 * It's an useful mechanism to protect the chip from overheating. Since it
1904 * impacts performance, the user controls whether it is enabled and if so,
1905 * the log frequency.
1906 *
1907 * Reading back the file shows you the status(enabled or disabled) and
1908 * the interval(in seconds) between each thermal logging.
1909 *
1910 * Writing an integer to the file, sets a new logging interval, in seconds.
1911 * The value should be between 1 and 3600. If the value is less than 1,
1912 * thermal logging is disabled. Values greater than 3600 are ignored.
1913 */
1914static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1915						     struct device_attribute *attr,
1916						     char *buf)
1917{
1918	struct drm_device *ddev = dev_get_drvdata(dev);
1919	struct amdgpu_device *adev = drm_to_adev(ddev);
1920
1921	return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
1922			adev_to_drm(adev)->unique,
1923			atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1924			adev->throttling_logging_rs.interval / HZ + 1);
1925}
1926
1927static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1928						     struct device_attribute *attr,
1929						     const char *buf,
1930						     size_t count)
1931{
1932	struct drm_device *ddev = dev_get_drvdata(dev);
1933	struct amdgpu_device *adev = drm_to_adev(ddev);
1934	long throttling_logging_interval;
1935	unsigned long flags;
1936	int ret = 0;
1937
1938	ret = kstrtol(buf, 0, &throttling_logging_interval);
1939	if (ret)
1940		return ret;
1941
1942	if (throttling_logging_interval > 3600)
1943		return -EINVAL;
1944
1945	if (throttling_logging_interval > 0) {
1946		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1947		/*
1948		 * Reset the ratelimit timer internals.
1949		 * This can effectively restart the timer.
1950		 */
1951		adev->throttling_logging_rs.interval =
1952			(throttling_logging_interval - 1) * HZ;
1953		adev->throttling_logging_rs.begin = 0;
1954		adev->throttling_logging_rs.printed = 0;
1955		adev->throttling_logging_rs.missed = 0;
1956		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1957
1958		atomic_set(&adev->throttling_logging_enabled, 1);
1959	} else {
1960		atomic_set(&adev->throttling_logging_enabled, 0);
1961	}
1962
1963	return count;
1964}
1965
1966/**
1967 * DOC: gpu_metrics
1968 *
1969 * The amdgpu driver provides a sysfs API for retrieving current gpu
1970 * metrics data. The file gpu_metrics is used for this. Reading the
1971 * file will dump all the current gpu metrics data.
1972 *
1973 * These data include temperature, frequency, engines utilization,
1974 * power consume, throttler status, fan speed and cpu core statistics(
1975 * available for APU only). That's it will give a snapshot of all sensors
1976 * at the same time.
1977 */
1978static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1979				      struct device_attribute *attr,
1980				      char *buf)
1981{
1982	struct drm_device *ddev = dev_get_drvdata(dev);
1983	struct amdgpu_device *adev = drm_to_adev(ddev);
1984	void *gpu_metrics;
1985	ssize_t size = 0;
1986	int ret;
1987
1988	if (amdgpu_in_reset(adev))
1989		return -EPERM;
1990
1991	ret = pm_runtime_get_sync(ddev->dev);
1992	if (ret < 0) {
1993		pm_runtime_put_autosuspend(ddev->dev);
1994		return ret;
1995	}
1996
1997	if (is_support_sw_smu(adev))
1998		size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
1999	else if (adev->powerplay.pp_funcs->get_gpu_metrics)
2000		size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
2001
2002	if (size <= 0)
2003		goto out;
2004
2005	if (size >= PAGE_SIZE)
2006		size = PAGE_SIZE - 1;
2007
2008	memcpy(buf, gpu_metrics, size);
2009
2010out:
2011	pm_runtime_mark_last_busy(ddev->dev);
2012	pm_runtime_put_autosuspend(ddev->dev);
2013
2014	return size;
2015}
2016
2017static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2018	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2019	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC),
2020	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC),
2021	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC),
2022	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC),
2023	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC),
2024	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2025	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2026	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2027	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2028	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC),
2029	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC),
2030	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
2031	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
2032	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC),
2033	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC),
2034	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC),
2035	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC),
2036	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
2037	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC),
2038	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC),
2039	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC),
2040	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC),
2041};
2042
2043static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2044			       uint32_t mask, enum amdgpu_device_attr_states *states)
2045{
2046	struct device_attribute *dev_attr = &attr->dev_attr;
2047	const char *attr_name = dev_attr->attr.name;
2048	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2049	enum amd_asic_type asic_type = adev->asic_type;
2050
2051	if (!(attr->flags & mask)) {
2052		*states = ATTR_STATE_UNSUPPORTED;
2053		return 0;
2054	}
2055
2056#define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))
2057
2058	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2059		if (asic_type < CHIP_VEGA10)
2060			*states = ATTR_STATE_UNSUPPORTED;
2061	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2062		if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2063			*states = ATTR_STATE_UNSUPPORTED;
2064	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2065		if (asic_type < CHIP_VEGA20)
2066			*states = ATTR_STATE_UNSUPPORTED;
2067	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2068		if (asic_type == CHIP_ARCTURUS)
2069			*states = ATTR_STATE_UNSUPPORTED;
2070	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2071		*states = ATTR_STATE_UNSUPPORTED;
2072		if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2073		    (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2074			*states = ATTR_STATE_SUPPORTED;
2075	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2076		if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2077			*states = ATTR_STATE_UNSUPPORTED;
2078	} else if (DEVICE_ATTR_IS(pcie_bw)) {
2079		/* PCIe Perf counters won't work on APU nodes */
2080		if (adev->flags & AMD_IS_APU)
2081			*states = ATTR_STATE_UNSUPPORTED;
2082	} else if (DEVICE_ATTR_IS(unique_id)) {
2083		if (asic_type != CHIP_VEGA10 &&
2084		    asic_type != CHIP_VEGA20 &&
2085		    asic_type != CHIP_ARCTURUS)
2086			*states = ATTR_STATE_UNSUPPORTED;
2087	} else if (DEVICE_ATTR_IS(pp_features)) {
2088		if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2089			*states = ATTR_STATE_UNSUPPORTED;
2090	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
2091		if (asic_type < CHIP_VEGA12)
2092			*states = ATTR_STATE_UNSUPPORTED;
2093	}
2094
2095	if (asic_type == CHIP_ARCTURUS) {
2096		/* Arcturus does not support standalone mclk/socclk/fclk level setting */
2097		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2098		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
2099		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
2100			dev_attr->attr.mode &= ~S_IWUGO;
2101			dev_attr->store = NULL;
2102		}
2103	}
2104
2105	/* setting should not be allowed from VF if not in one VF mode */
2106	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2107		dev_attr->attr.mode &= ~S_IWUGO;
2108		dev_attr->store = NULL;
2109	}
2110
2111#undef DEVICE_ATTR_IS
2112
2113	return 0;
2114}
2115
2116
2117static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2118				     struct amdgpu_device_attr *attr,
2119				     uint32_t mask, struct list_head *attr_list)
2120{
2121	int ret = 0;
2122	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2123	struct amdgpu_device_attr_entry *attr_entry;
2124	struct device_attribute *dev_attr;
2125	const char *name;
2126
2127	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2128			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2129
2130	if (!attr)
2131		return -EINVAL;
2132
2133	dev_attr = &attr->dev_attr;
2134	name = dev_attr->attr.name;
2135
2136	attr_update = attr->attr_update ? attr_update : default_attr_update;
2137
2138	ret = attr_update(adev, attr, mask, &attr_states);
2139	if (ret) {
2140		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2141			name, ret);
2142		return ret;
2143	}
2144
2145	if (attr_states == ATTR_STATE_UNSUPPORTED)
2146		return 0;
2147
2148	ret = device_create_file(adev->dev, dev_attr);
2149	if (ret) {
2150		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2151			name, ret);
2152	}
2153
2154	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2155	if (!attr_entry)
2156		return -ENOMEM;
2157
2158	attr_entry->attr = attr;
2159	INIT_LIST_HEAD(&attr_entry->entry);
2160
2161	list_add_tail(&attr_entry->entry, attr_list);
2162
2163	return ret;
2164}
2165
2166static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2167{
2168	struct device_attribute *dev_attr = &attr->dev_attr;
2169
2170	device_remove_file(adev->dev, dev_attr);
2171}
2172
2173static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2174					     struct list_head *attr_list);
2175
2176static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2177					    struct amdgpu_device_attr *attrs,
2178					    uint32_t counts,
2179					    uint32_t mask,
2180					    struct list_head *attr_list)
2181{
2182	int ret = 0;
2183	uint32_t i = 0;
2184
2185	for (i = 0; i < counts; i++) {
2186		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2187		if (ret)
2188			goto failed;
2189	}
2190
2191	return 0;
2192
2193failed:
2194	amdgpu_device_attr_remove_groups(adev, attr_list);
2195
2196	return ret;
2197}
2198
2199static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2200					     struct list_head *attr_list)
2201{
2202	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2203
2204	if (list_empty(attr_list))
2205		return ;
2206
2207	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2208		amdgpu_device_attr_remove(adev, entry->attr);
2209		list_del(&entry->entry);
2210		kfree(entry);
2211	}
2212}
2213
2214static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2215				      struct device_attribute *attr,
2216				      char *buf)
2217{
2218	struct amdgpu_device *adev = dev_get_drvdata(dev);
2219	int channel = to_sensor_dev_attr(attr)->index;
2220	int r, temp = 0, size = sizeof(temp);
2221
2222	if (amdgpu_in_reset(adev))
2223		return -EPERM;
2224
2225	if (channel >= PP_TEMP_MAX)
2226		return -EINVAL;
2227
2228	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2229	if (r < 0) {
2230		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2231		return r;
2232	}
2233
2234	switch (channel) {
2235	case PP_TEMP_JUNCTION:
2236		/* get current junction temperature */
2237		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2238					   (void *)&temp, &size);
2239		break;
2240	case PP_TEMP_EDGE:
2241		/* get current edge temperature */
2242		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2243					   (void *)&temp, &size);
2244		break;
2245	case PP_TEMP_MEM:
2246		/* get current memory temperature */
2247		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2248					   (void *)&temp, &size);
2249		break;
2250	default:
2251		r = -EINVAL;
2252		break;
2253	}
2254
2255	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2256	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2257
2258	if (r)
2259		return r;
2260
2261	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2262}
2263
2264static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2265					     struct device_attribute *attr,
2266					     char *buf)
2267{
2268	struct amdgpu_device *adev = dev_get_drvdata(dev);
2269	int hyst = to_sensor_dev_attr(attr)->index;
2270	int temp;
2271
2272	if (hyst)
2273		temp = adev->pm.dpm.thermal.min_temp;
2274	else
2275		temp = adev->pm.dpm.thermal.max_temp;
2276
2277	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2278}
2279
2280static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2281					     struct device_attribute *attr,
2282					     char *buf)
2283{
2284	struct amdgpu_device *adev = dev_get_drvdata(dev);
2285	int hyst = to_sensor_dev_attr(attr)->index;
2286	int temp;
2287
2288	if (hyst)
2289		temp = adev->pm.dpm.thermal.min_hotspot_temp;
2290	else
2291		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2292
2293	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2294}
2295
2296static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2297					     struct device_attribute *attr,
2298					     char *buf)
2299{
2300	struct amdgpu_device *adev = dev_get_drvdata(dev);
2301	int hyst = to_sensor_dev_attr(attr)->index;
2302	int temp;
2303
2304	if (hyst)
2305		temp = adev->pm.dpm.thermal.min_mem_temp;
2306	else
2307		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2308
2309	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2310}
2311
2312static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2313					     struct device_attribute *attr,
2314					     char *buf)
2315{
2316	int channel = to_sensor_dev_attr(attr)->index;
2317
2318	if (channel >= PP_TEMP_MAX)
2319		return -EINVAL;
2320
2321	return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2322}
2323
2324static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2325					     struct device_attribute *attr,
2326					     char *buf)
2327{
2328	struct amdgpu_device *adev = dev_get_drvdata(dev);
2329	int channel = to_sensor_dev_attr(attr)->index;
2330	int temp = 0;
2331
2332	if (channel >= PP_TEMP_MAX)
2333		return -EINVAL;
2334
2335	switch (channel) {
2336	case PP_TEMP_JUNCTION:
2337		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2338		break;
2339	case PP_TEMP_EDGE:
2340		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2341		break;
2342	case PP_TEMP_MEM:
2343		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2344		break;
2345	}
2346
2347	return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2348}
2349
2350static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2351					    struct device_attribute *attr,
2352					    char *buf)
2353{
2354	struct amdgpu_device *adev = dev_get_drvdata(dev);
2355	u32 pwm_mode = 0;
2356	int ret;
2357
2358	if (amdgpu_in_reset(adev))
2359		return -EPERM;
2360
2361	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2362	if (ret < 0) {
2363		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2364		return ret;
2365	}
2366
2367	if (is_support_sw_smu(adev)) {
2368		pwm_mode = smu_get_fan_control_mode(&adev->smu);
2369	} else {
2370		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2371			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2372			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2373			return -EINVAL;
2374		}
2375
2376		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2377	}
2378
2379	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2380	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2381
2382	return sprintf(buf, "%i\n", pwm_mode);
2383}
2384
2385static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2386					    struct device_attribute *attr,
2387					    const char *buf,
2388					    size_t count)
2389{
2390	struct amdgpu_device *adev = dev_get_drvdata(dev);
2391	int err, ret;
2392	int value;
2393
2394	if (amdgpu_in_reset(adev))
2395		return -EPERM;
2396
2397	err = kstrtoint(buf, 10, &value);
2398	if (err)
2399		return err;
2400
2401	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2402	if (ret < 0) {
2403		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2404		return ret;
2405	}
2406
2407	if (is_support_sw_smu(adev)) {
2408		smu_set_fan_control_mode(&adev->smu, value);
2409	} else {
2410		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2411			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2412			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2413			return -EINVAL;
2414		}
2415
2416		amdgpu_dpm_set_fan_control_mode(adev, value);
2417	}
2418
2419	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2420	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2421
2422	return count;
2423}
2424
2425static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2426					 struct device_attribute *attr,
2427					 char *buf)
2428{
2429	return sprintf(buf, "%i\n", 0);
2430}
2431
2432static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2433					 struct device_attribute *attr,
2434					 char *buf)
2435{
2436	return sprintf(buf, "%i\n", 255);
2437}
2438
2439static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2440				     struct device_attribute *attr,
2441				     const char *buf, size_t count)
2442{
2443	struct amdgpu_device *adev = dev_get_drvdata(dev);
2444	int err;
2445	u32 value;
2446	u32 pwm_mode;
2447
2448	if (amdgpu_in_reset(adev))
2449		return -EPERM;
2450
2451	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2452	if (err < 0) {
2453		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2454		return err;
2455	}
2456
2457	if (is_support_sw_smu(adev))
2458		pwm_mode = smu_get_fan_control_mode(&adev->smu);
2459	else
2460		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2461
2462	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2463		pr_info("manual fan speed control should be enabled first\n");
2464		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2465		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2466		return -EINVAL;
2467	}
2468
2469	err = kstrtou32(buf, 10, &value);
2470	if (err) {
2471		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2472		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2473		return err;
2474	}
2475
2476	value = (value * 100) / 255;
2477
2478	if (is_support_sw_smu(adev))
2479		err = smu_set_fan_speed_percent(&adev->smu, value);
2480	else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2481		err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2482	else
2483		err = -EINVAL;
2484
2485	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2486	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2487
2488	if (err)
2489		return err;
2490
2491	return count;
2492}
2493
2494static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2495				     struct device_attribute *attr,
2496				     char *buf)
2497{
2498	struct amdgpu_device *adev = dev_get_drvdata(dev);
2499	int err;
2500	u32 speed = 0;
2501
2502	if (amdgpu_in_reset(adev))
2503		return -EPERM;
2504
2505	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2506	if (err < 0) {
2507		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2508		return err;
2509	}
2510
2511	if (is_support_sw_smu(adev))
2512		err = smu_get_fan_speed_percent(&adev->smu, &speed);
2513	else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2514		err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2515	else
2516		err = -EINVAL;
2517
2518	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2519	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2520
2521	if (err)
2522		return err;
2523
2524	speed = (speed * 255) / 100;
2525
2526	return sprintf(buf, "%i\n", speed);
2527}
2528
2529static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2530					   struct device_attribute *attr,
2531					   char *buf)
2532{
2533	struct amdgpu_device *adev = dev_get_drvdata(dev);
2534	int err;
2535	u32 speed = 0;
2536
2537	if (amdgpu_in_reset(adev))
2538		return -EPERM;
2539
2540	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2541	if (err < 0) {
2542		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2543		return err;
2544	}
2545
2546	if (is_support_sw_smu(adev))
2547		err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2548	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2549		err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2550	else
2551		err = -EINVAL;
2552
2553	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2554	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2555
2556	if (err)
2557		return err;
2558
2559	return sprintf(buf, "%i\n", speed);
2560}
2561
2562static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2563					 struct device_attribute *attr,
2564					 char *buf)
2565{
2566	struct amdgpu_device *adev = dev_get_drvdata(dev);
2567	u32 min_rpm = 0;
2568	u32 size = sizeof(min_rpm);
2569	int r;
2570
2571	if (amdgpu_in_reset(adev))
2572		return -EPERM;
2573
2574	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2575	if (r < 0) {
2576		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2577		return r;
2578	}
2579
2580	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2581				   (void *)&min_rpm, &size);
2582
2583	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2584	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2585
2586	if (r)
2587		return r;
2588
2589	return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2590}
2591
2592static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2593					 struct device_attribute *attr,
2594					 char *buf)
2595{
2596	struct amdgpu_device *adev = dev_get_drvdata(dev);
2597	u32 max_rpm = 0;
2598	u32 size = sizeof(max_rpm);
2599	int r;
2600
2601	if (amdgpu_in_reset(adev))
2602		return -EPERM;
2603
2604	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2605	if (r < 0) {
2606		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2607		return r;
2608	}
2609
2610	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2611				   (void *)&max_rpm, &size);
2612
2613	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2614	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2615
2616	if (r)
2617		return r;
2618
2619	return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2620}
2621
2622static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2623					   struct device_attribute *attr,
2624					   char *buf)
2625{
2626	struct amdgpu_device *adev = dev_get_drvdata(dev);
2627	int err;
2628	u32 rpm = 0;
2629
2630	if (amdgpu_in_reset(adev))
2631		return -EPERM;
2632
2633	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2634	if (err < 0) {
2635		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2636		return err;
2637	}
2638
2639	if (is_support_sw_smu(adev))
2640		err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2641	else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2642		err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2643	else
2644		err = -EINVAL;
2645
2646	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2647	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2648
2649	if (err)
2650		return err;
2651
2652	return sprintf(buf, "%i\n", rpm);
2653}
2654
2655static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2656				     struct device_attribute *attr,
2657				     const char *buf, size_t count)
2658{
2659	struct amdgpu_device *adev = dev_get_drvdata(dev);
2660	int err;
2661	u32 value;
2662	u32 pwm_mode;
2663
2664	if (amdgpu_in_reset(adev))
2665		return -EPERM;
2666
2667	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2668	if (err < 0) {
2669		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2670		return err;
2671	}
2672
2673	if (is_support_sw_smu(adev))
2674		pwm_mode = smu_get_fan_control_mode(&adev->smu);
2675	else
2676		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2677
2678	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2679		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2680		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2681		return -ENODATA;
2682	}
2683
2684	err = kstrtou32(buf, 10, &value);
2685	if (err) {
2686		pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2687		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2688		return err;
2689	}
2690
2691	if (is_support_sw_smu(adev))
2692		err = smu_set_fan_speed_rpm(&adev->smu, value);
2693	else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2694		err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2695	else
2696		err = -EINVAL;
2697
2698	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2699	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2700
2701	if (err)
2702		return err;
2703
2704	return count;
2705}
2706
2707static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2708					    struct device_attribute *attr,
2709					    char *buf)
2710{
2711	struct amdgpu_device *adev = dev_get_drvdata(dev);
2712	u32 pwm_mode = 0;
2713	int ret;
2714
2715	if (amdgpu_in_reset(adev))
2716		return -EPERM;
2717
2718	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2719	if (ret < 0) {
2720		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2721		return ret;
2722	}
2723
2724	if (is_support_sw_smu(adev)) {
2725		pwm_mode = smu_get_fan_control_mode(&adev->smu);
2726	} else {
2727		if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2728			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2729			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2730			return -EINVAL;
2731		}
2732
2733		pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2734	}
2735
2736	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2737	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2738
2739	return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2740}
2741
2742static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2743					    struct device_attribute *attr,
2744					    const char *buf,
2745					    size_t count)
2746{
2747	struct amdgpu_device *adev = dev_get_drvdata(dev);
2748	int err;
2749	int value;
2750	u32 pwm_mode;
2751
2752	if (amdgpu_in_reset(adev))
2753		return -EPERM;
2754
2755	err = kstrtoint(buf, 10, &value);
2756	if (err)
2757		return err;
2758
2759	if (value == 0)
2760		pwm_mode = AMD_FAN_CTRL_AUTO;
2761	else if (value == 1)
2762		pwm_mode = AMD_FAN_CTRL_MANUAL;
2763	else
2764		return -EINVAL;
2765
2766	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2767	if (err < 0) {
2768		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2769		return err;
2770	}
2771
2772	if (is_support_sw_smu(adev)) {
2773		smu_set_fan_control_mode(&adev->smu, pwm_mode);
2774	} else {
2775		if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2776			pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2777			pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2778			return -EINVAL;
2779		}
2780		amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2781	}
2782
2783	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2784	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2785
2786	return count;
2787}
2788
2789static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2790					struct device_attribute *attr,
2791					char *buf)
2792{
2793	struct amdgpu_device *adev = dev_get_drvdata(dev);
2794	u32 vddgfx;
2795	int r, size = sizeof(vddgfx);
2796
2797	if (amdgpu_in_reset(adev))
2798		return -EPERM;
2799
2800	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2801	if (r < 0) {
2802		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2803		return r;
2804	}
2805
2806	/* get the voltage */
2807	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2808				   (void *)&vddgfx, &size);
2809
2810	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2811	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2812
2813	if (r)
2814		return r;
2815
2816	return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2817}
2818
2819static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2820					      struct device_attribute *attr,
2821					      char *buf)
2822{
2823	return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2824}
2825
2826static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2827				       struct device_attribute *attr,
2828				       char *buf)
2829{
2830	struct amdgpu_device *adev = dev_get_drvdata(dev);
2831	u32 vddnb;
2832	int r, size = sizeof(vddnb);
2833
2834	if (amdgpu_in_reset(adev))
2835		return -EPERM;
2836
2837	/* only APUs have vddnb */
2838	if  (!(adev->flags & AMD_IS_APU))
2839		return -EINVAL;
2840
2841	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2842	if (r < 0) {
2843		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2844		return r;
2845	}
2846
2847	/* get the voltage */
2848	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2849				   (void *)&vddnb, &size);
2850
2851	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2852	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2853
2854	if (r)
2855		return r;
2856
2857	return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2858}
2859
2860static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2861					      struct device_attribute *attr,
2862					      char *buf)
2863{
2864	return snprintf(buf, PAGE_SIZE, "vddnb\n");
2865}
2866
2867static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2868					   struct device_attribute *attr,
2869					   char *buf)
2870{
2871	struct amdgpu_device *adev = dev_get_drvdata(dev);
2872	u32 query = 0;
2873	int r, size = sizeof(u32);
2874	unsigned uw;
2875
2876	if (amdgpu_in_reset(adev))
2877		return -EPERM;
2878
2879	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2880	if (r < 0) {
2881		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2882		return r;
2883	}
2884
2885	/* get the voltage */
2886	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2887				   (void *)&query, &size);
2888
2889	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2890	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2891
2892	if (r)
2893		return r;
2894
2895	/* convert to microwatts */
2896	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2897
2898	return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2899}
2900
2901static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2902					 struct device_attribute *attr,
2903					 char *buf)
2904{
2905	return sprintf(buf, "%i\n", 0);
2906}
2907
2908static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2909					 struct device_attribute *attr,
2910					 char *buf)
2911{
2912	struct amdgpu_device *adev = dev_get_drvdata(dev);
2913	uint32_t limit = 0;
2914	ssize_t size;
2915	int r;
2916
2917	if (amdgpu_in_reset(adev))
2918		return -EPERM;
2919
2920	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2921	if (r < 0) {
2922		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2923		return r;
2924	}
2925
2926	if (is_support_sw_smu(adev)) {
2927		smu_get_power_limit(&adev->smu, &limit, true);
2928		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2929	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2930		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2931		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2932	} else {
2933		size = snprintf(buf, PAGE_SIZE, "\n");
2934	}
2935
2936	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2937	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2938
2939	return size;
2940}
2941
2942static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2943					 struct device_attribute *attr,
2944					 char *buf)
2945{
2946	struct amdgpu_device *adev = dev_get_drvdata(dev);
2947	uint32_t limit = 0;
2948	ssize_t size;
2949	int r;
2950
2951	if (amdgpu_in_reset(adev))
2952		return -EPERM;
2953
2954	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2955	if (r < 0) {
2956		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2957		return r;
2958	}
2959
2960	if (is_support_sw_smu(adev)) {
2961		smu_get_power_limit(&adev->smu, &limit, false);
2962		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2963	} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2964		adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2965		size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2966	} else {
2967		size = snprintf(buf, PAGE_SIZE, "\n");
2968	}
2969
2970	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2971	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2972
2973	return size;
2974}
2975
2976
2977static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2978		struct device_attribute *attr,
2979		const char *buf,
2980		size_t count)
2981{
2982	struct amdgpu_device *adev = dev_get_drvdata(dev);
2983	int err;
2984	u32 value;
2985
2986	if (amdgpu_in_reset(adev))
2987		return -EPERM;
2988
2989	if (amdgpu_sriov_vf(adev))
2990		return -EINVAL;
2991
2992	err = kstrtou32(buf, 10, &value);
2993	if (err)
2994		return err;
2995
2996	value = value / 1000000; /* convert to Watt */
2997
2998
2999	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3000	if (err < 0) {
3001		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3002		return err;
3003	}
3004
3005	if (is_support_sw_smu(adev))
3006		err = smu_set_power_limit(&adev->smu, value);
3007	else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
3008		err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
3009	else
3010		err = -EINVAL;
3011
3012	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3013	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3014
3015	if (err)
3016		return err;
3017
3018	return count;
3019}
3020
3021static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3022				      struct device_attribute *attr,
3023				      char *buf)
3024{
3025	struct amdgpu_device *adev = dev_get_drvdata(dev);
3026	uint32_t sclk;
3027	int r, size = sizeof(sclk);
3028
3029	if (amdgpu_in_reset(adev))
3030		return -EPERM;
3031
3032	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3033	if (r < 0) {
3034		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3035		return r;
3036	}
3037
3038	/* get the sclk */
3039	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3040				   (void *)&sclk, &size);
3041
3042	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3043	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3044
3045	if (r)
3046		return r;
3047
3048	return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3049}
3050
3051static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3052					    struct device_attribute *attr,
3053					    char *buf)
3054{
3055	return snprintf(buf, PAGE_SIZE, "sclk\n");
3056}
3057
3058static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3059				      struct device_attribute *attr,
3060				      char *buf)
3061{
3062	struct amdgpu_device *adev = dev_get_drvdata(dev);
3063	uint32_t mclk;
3064	int r, size = sizeof(mclk);
3065
3066	if (amdgpu_in_reset(adev))
3067		return -EPERM;
3068
3069	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3070	if (r < 0) {
3071		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3072		return r;
3073	}
3074
3075	/* get the sclk */
3076	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3077				   (void *)&mclk, &size);
3078
3079	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3080	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3081
3082	if (r)
3083		return r;
3084
3085	return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3086}
3087
3088static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3089					    struct device_attribute *attr,
3090					    char *buf)
3091{
3092	return snprintf(buf, PAGE_SIZE, "mclk\n");
3093}
3094
3095/**
3096 * DOC: hwmon
3097 *
3098 * The amdgpu driver exposes the following sensor interfaces:
3099 *
3100 * - GPU temperature (via the on-die sensor)
3101 *
3102 * - GPU voltage
3103 *
3104 * - Northbridge voltage (APUs only)
3105 *
3106 * - GPU power
3107 *
3108 * - GPU fan
3109 *
3110 * - GPU gfx/compute engine clock
3111 *
3112 * - GPU memory clock (dGPU only)
3113 *
3114 * hwmon interfaces for GPU temperature:
3115 *
3116 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3117 *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3118 *
3119 * - temp[1-3]_label: temperature channel label
3120 *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3121 *
3122 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3123 *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3124 *
3125 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3126 *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3127 *
3128 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3129 *   - these are supported on SOC15 dGPUs only
3130 *
3131 * hwmon interfaces for GPU voltage:
3132 *
3133 * - in0_input: the voltage on the GPU in millivolts
3134 *
3135 * - in1_input: the voltage on the Northbridge in millivolts
3136 *
3137 * hwmon interfaces for GPU power:
3138 *
3139 * - power1_average: average power used by the GPU in microWatts
3140 *
3141 * - power1_cap_min: minimum cap supported in microWatts
3142 *
3143 * - power1_cap_max: maximum cap supported in microWatts
3144 *
3145 * - power1_cap: selected power cap in microWatts
3146 *
3147 * hwmon interfaces for GPU fan:
3148 *
3149 * - pwm1: pulse width modulation fan level (0-255)
3150 *
3151 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3152 *
3153 * - pwm1_min: pulse width modulation fan control minimum level (0)
3154 *
3155 * - pwm1_max: pulse width modulation fan control maximum level (255)
3156 *
3157 * - fan1_min: an minimum value Unit: revolution/min (RPM)
3158 *
3159 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
3160 *
3161 * - fan1_input: fan speed in RPM
3162 *
3163 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3164 *
3165 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3166 *
3167 * hwmon interfaces for GPU clocks:
3168 *
3169 * - freq1_input: the gfx/compute clock in hertz
3170 *
3171 * - freq2_input: the memory clock in hertz
3172 *
3173 * You can use hwmon tools like sensors to view this information on your system.
3174 *
3175 */
3176
3177static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3178static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3179static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3180static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3181static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3182static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3183static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3184static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3185static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3186static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3187static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3188static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3189static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3190static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3191static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3192static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3193static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3194static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3195static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3196static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3197static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3198static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3199static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3200static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3201static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3202static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3203static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3204static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3205static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3206static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3207static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3208static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3209static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3210static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3211static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3212static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3213
3214static struct attribute *hwmon_attributes[] = {
3215	&sensor_dev_attr_temp1_input.dev_attr.attr,
3216	&sensor_dev_attr_temp1_crit.dev_attr.attr,
3217	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3218	&sensor_dev_attr_temp2_input.dev_attr.attr,
3219	&sensor_dev_attr_temp2_crit.dev_attr.attr,
3220	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3221	&sensor_dev_attr_temp3_input.dev_attr.attr,
3222	&sensor_dev_attr_temp3_crit.dev_attr.attr,
3223	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3224	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3225	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3226	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3227	&sensor_dev_attr_temp1_label.dev_attr.attr,
3228	&sensor_dev_attr_temp2_label.dev_attr.attr,
3229	&sensor_dev_attr_temp3_label.dev_attr.attr,
3230	&sensor_dev_attr_pwm1.dev_attr.attr,
3231	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3232	&sensor_dev_attr_pwm1_min.dev_attr.attr,
3233	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3234	&sensor_dev_attr_fan1_input.dev_attr.attr,
3235	&sensor_dev_attr_fan1_min.dev_attr.attr,
3236	&sensor_dev_attr_fan1_max.dev_attr.attr,
3237	&sensor_dev_attr_fan1_target.dev_attr.attr,
3238	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3239	&sensor_dev_attr_in0_input.dev_attr.attr,
3240	&sensor_dev_attr_in0_label.dev_attr.attr,
3241	&sensor_dev_attr_in1_input.dev_attr.attr,
3242	&sensor_dev_attr_in1_label.dev_attr.attr,
3243	&sensor_dev_attr_power1_average.dev_attr.attr,
3244	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3245	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3246	&sensor_dev_attr_power1_cap.dev_attr.attr,
3247	&sensor_dev_attr_freq1_input.dev_attr.attr,
3248	&sensor_dev_attr_freq1_label.dev_attr.attr,
3249	&sensor_dev_attr_freq2_input.dev_attr.attr,
3250	&sensor_dev_attr_freq2_label.dev_attr.attr,
3251	NULL
3252};
3253
3254static umode_t hwmon_attributes_visible(struct kobject *kobj,
3255					struct attribute *attr, int index)
3256{
3257	struct device *dev = kobj_to_dev(kobj);
3258	struct amdgpu_device *adev = dev_get_drvdata(dev);
3259	umode_t effective_mode = attr->mode;
3260
3261	/* under multi-vf mode, the hwmon attributes are all not supported */
3262	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3263		return 0;
3264
3265	/* there is no fan under pp one vf mode */
3266	if (amdgpu_sriov_is_pp_one_vf(adev) &&
3267	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3268	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3269	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3270	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3271	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3272	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3273	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3274	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3275	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3276		return 0;
3277
3278	/* Skip fan attributes if fan is not present */
3279	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3280	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3281	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3282	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3283	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3284	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3285	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3286	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3287	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3288		return 0;
3289
3290	/* Skip fan attributes on APU */
3291	if ((adev->flags & AMD_IS_APU) &&
3292	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3293	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3294	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3295	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3296	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3297	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3298	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3299	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3300	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3301		return 0;
3302
3303	/* Skip crit temp on APU */
3304	if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3305	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3306	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3307		return 0;
3308
3309	/* Skip limit attributes if DPM is not enabled */
3310	if (!adev->pm.dpm_enabled &&
3311	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3312	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3313	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3314	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3315	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3316	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3317	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3318	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3319	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3320	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3321	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3322		return 0;
3323
3324	if (!is_support_sw_smu(adev)) {
3325		/* mask fan attributes if we have no bindings for this asic to expose */
3326		if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3327		     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3328		    (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3329		     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3330			effective_mode &= ~S_IRUGO;
3331
3332		if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3333		     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3334		    (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3335		     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3336			effective_mode &= ~S_IWUSR;
3337	}
3338
3339	if (((adev->flags & AMD_IS_APU) ||
3340	     adev->family == AMDGPU_FAMILY_SI) &&	/* not implemented yet */
3341	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3342	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3343	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3344		return 0;
3345
3346	if (((adev->family == AMDGPU_FAMILY_SI) ||
3347	     ((adev->flags & AMD_IS_APU) &&
3348	      (adev->asic_type < CHIP_RENOIR))) &&	/* not implemented yet */
3349	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3350		return 0;
3351
3352	if (!is_support_sw_smu(adev)) {
3353		/* hide max/min values if we can't both query and manage the fan */
3354		if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3355		     !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3356		     (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3357		     !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3358		    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3359		     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3360			return 0;
3361
3362		if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3363		     !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3364		    (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3365		     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3366			return 0;
3367	}
3368
3369	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
3370	     adev->family == AMDGPU_FAMILY_KV) &&	/* not implemented yet */
3371	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3372	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3373		return 0;
3374
3375	/* only APUs have vddnb */
3376	if (!(adev->flags & AMD_IS_APU) &&
3377	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3378	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3379		return 0;
3380
3381	/* no mclk on APUs */
3382	if ((adev->flags & AMD_IS_APU) &&
3383	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3384	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3385		return 0;
3386
3387	/* only SOC15 dGPUs support hotspot and mem temperatures */
3388	if (((adev->flags & AMD_IS_APU) ||
3389	     adev->asic_type < CHIP_VEGA10) &&
3390	    (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3391	     attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3392	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3393	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3394	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3395	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3396	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3397	     attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3398	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3399	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3400	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3401		return 0;
3402
3403	return effective_mode;
3404}
3405
3406static const struct attribute_group hwmon_attrgroup = {
3407	.attrs = hwmon_attributes,
3408	.is_visible = hwmon_attributes_visible,
3409};
3410
3411static const struct attribute_group *hwmon_groups[] = {
3412	&hwmon_attrgroup,
3413	NULL
3414};
3415
3416int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3417{
3418	int ret;
3419	uint32_t mask = 0;
3420
3421	if (adev->pm.sysfs_initialized)
3422		return 0;
3423
3424	if (adev->pm.dpm_enabled == 0)
3425		return 0;
3426
3427	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3428
3429	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3430								   DRIVER_NAME, adev,
3431								   hwmon_groups);
3432	if (IS_ERR(adev->pm.int_hwmon_dev)) {
3433		ret = PTR_ERR(adev->pm.int_hwmon_dev);
3434		dev_err(adev->dev,
3435			"Unable to register hwmon device: %d\n", ret);
3436		return ret;
3437	}
3438
3439	switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3440	case SRIOV_VF_MODE_ONE_VF:
3441		mask = ATTR_FLAG_ONEVF;
3442		break;
3443	case SRIOV_VF_MODE_MULTI_VF:
3444		mask = 0;
3445		break;
3446	case SRIOV_VF_MODE_BARE_METAL:
3447	default:
3448		mask = ATTR_FLAG_MASK_ALL;
3449		break;
3450	}
3451
3452	ret = amdgpu_device_attr_create_groups(adev,
3453					       amdgpu_device_attrs,
3454					       ARRAY_SIZE(amdgpu_device_attrs),
3455					       mask,
3456					       &adev->pm.pm_attr_list);
3457	if (ret)
3458		return ret;
3459
3460	adev->pm.sysfs_initialized = true;
3461
3462	return 0;
3463}
3464
3465void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3466{
3467	if (adev->pm.dpm_enabled == 0)
3468		return;
3469
3470	if (adev->pm.int_hwmon_dev)
3471		hwmon_device_unregister(adev->pm.int_hwmon_dev);
3472
3473	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3474}
3475
3476/*
3477 * Debugfs info
3478 */
3479#if defined(CONFIG_DEBUG_FS)
3480
3481static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3482{
3483	uint32_t value;
3484	uint64_t value64;
3485	uint32_t query = 0;
3486	int size;
3487
3488	/* GPU Clocks */
3489	size = sizeof(value);
3490	seq_printf(m, "GFX Clocks and Power:\n");
3491	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3492		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3493	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3494		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3495	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3496		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3497	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3498		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3499	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3500		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3501	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3502		seq_printf(m, "\t%u mV (VDDNB)\n", value);
3503	size = sizeof(uint32_t);
3504	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3505		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3506	size = sizeof(value);
3507	seq_printf(m, "\n");
3508
3509	/* GPU Temp */
3510	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3511		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3512
3513	/* GPU Load */
3514	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3515		seq_printf(m, "GPU Load: %u %%\n", value);
3516	/* MEM Load */
3517	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3518		seq_printf(m, "MEM Load: %u %%\n", value);
3519
3520	seq_printf(m, "\n");
3521
3522	/* SMC feature mask */
3523	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3524		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3525
3526	if (adev->asic_type > CHIP_VEGA20) {
3527		/* VCN clocks */
3528		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3529			if (!value) {
3530				seq_printf(m, "VCN: Disabled\n");
3531			} else {
3532				seq_printf(m, "VCN: Enabled\n");
3533				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3534					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3535				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3536					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3537			}
3538		}
3539		seq_printf(m, "\n");
3540	} else {
3541		/* UVD clocks */
3542		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3543			if (!value) {
3544				seq_printf(m, "UVD: Disabled\n");
3545			} else {
3546				seq_printf(m, "UVD: Enabled\n");
3547				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3548					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3549				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3550					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3551			}
3552		}
3553		seq_printf(m, "\n");
3554
3555		/* VCE clocks */
3556		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3557			if (!value) {
3558				seq_printf(m, "VCE: Disabled\n");
3559			} else {
3560				seq_printf(m, "VCE: Enabled\n");
3561				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3562					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3563			}
3564		}
3565	}
3566
3567	return 0;
3568}
3569
3570static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3571{
3572	int i;
3573
3574	for (i = 0; clocks[i].flag; i++)
3575		seq_printf(m, "\t%s: %s\n", clocks[i].name,
3576			   (flags & clocks[i].flag) ? "On" : "Off");
3577}
3578
3579static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3580{
3581	struct drm_info_node *node = (struct drm_info_node *) m->private;
3582	struct drm_device *dev = node->minor->dev;
3583	struct amdgpu_device *adev = drm_to_adev(dev);
3584	u32 flags = 0;
3585	int r;
3586
3587	if (amdgpu_in_reset(adev))
3588		return -EPERM;
3589
3590	r = pm_runtime_get_sync(dev->dev);
3591	if (r < 0) {
3592		pm_runtime_put_autosuspend(dev->dev);
3593		return r;
3594	}
3595
3596	if (!adev->pm.dpm_enabled) {
3597		seq_printf(m, "dpm not enabled\n");
3598		pm_runtime_mark_last_busy(dev->dev);
3599		pm_runtime_put_autosuspend(dev->dev);
3600		return 0;
3601	}
3602
3603	if (!is_support_sw_smu(adev) &&
3604	    adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3605		mutex_lock(&adev->pm.mutex);
3606		if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3607			adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3608		else
3609			seq_printf(m, "Debugfs support not implemented for this asic\n");
3610		mutex_unlock(&adev->pm.mutex);
3611		r = 0;
3612	} else {
3613		r = amdgpu_debugfs_pm_info_pp(m, adev);
3614	}
3615	if (r)
3616		goto out;
3617
3618	amdgpu_device_ip_get_clockgating_state(adev, &flags);
3619
3620	seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3621	amdgpu_parse_cg_state(m, flags);
3622	seq_printf(m, "\n");
3623
3624out:
3625	pm_runtime_mark_last_busy(dev->dev);
3626	pm_runtime_put_autosuspend(dev->dev);
3627
3628	return r;
3629}
3630
3631static const struct drm_info_list amdgpu_pm_info_list[] = {
3632	{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3633};
3634#endif
3635
3636int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3637{
3638#if defined(CONFIG_DEBUG_FS)
3639	return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
3640#else
3641	return 0;
3642#endif
3643}
3644