1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 *          Alex Deucher <alexdeucher@gmail.com>
24 */
25
26#include "amdgpu.h"
27#include "amdgpu_drv.h"
28#include "amdgpu_pm.h"
29#include "amdgpu_dpm.h"
30#include "atom.h"
31#include <linux/pci.h>
32#include <linux/hwmon.h>
33#include <linux/hwmon-sysfs.h>
34#include <linux/nospec.h>
35#include <linux/pm_runtime.h>
36#include <asm/processor.h>
37
38static const struct hwmon_temp_label {
39	enum PP_HWMON_TEMP channel;
40	const char *label;
41} temp_label[] = {
42	{PP_TEMP_EDGE, "edge"},
43	{PP_TEMP_JUNCTION, "junction"},
44	{PP_TEMP_MEM, "mem"},
45};
46
47const char * const amdgpu_pp_profile_name[] = {
48	"BOOTUP_DEFAULT",
49	"3D_FULL_SCREEN",
50	"POWER_SAVING",
51	"VIDEO",
52	"VR",
53	"COMPUTE",
54	"CUSTOM",
55	"WINDOW_3D",
56	"CAPPED",
57	"UNCAPPED",
58};
59
60/**
61 * DOC: power_dpm_state
62 *
63 * The power_dpm_state file is a legacy interface and is only provided for
64 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
65 * certain power related parameters.  The file power_dpm_state is used for this.
66 * It accepts the following arguments:
67 *
68 * - battery
69 *
70 * - balanced
71 *
72 * - performance
73 *
74 * battery
75 *
76 * On older GPUs, the vbios provided a special power state for battery
77 * operation.  Selecting battery switched to this state.  This is no
78 * longer provided on newer GPUs so the option does nothing in that case.
79 *
80 * balanced
81 *
82 * On older GPUs, the vbios provided a special power state for balanced
83 * operation.  Selecting balanced switched to this state.  This is no
84 * longer provided on newer GPUs so the option does nothing in that case.
85 *
86 * performance
87 *
88 * On older GPUs, the vbios provided a special power state for performance
89 * operation.  Selecting performance switched to this state.  This is no
90 * longer provided on newer GPUs so the option does nothing in that case.
91 *
92 */
93
94static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
95					  struct device_attribute *attr,
96					  char *buf)
97{
98	struct drm_device *ddev = dev_get_drvdata(dev);
99	struct amdgpu_device *adev = drm_to_adev(ddev);
100	enum amd_pm_state_type pm;
101	int ret;
102
103	if (amdgpu_in_reset(adev))
104		return -EPERM;
105	if (adev->in_suspend && !adev->in_runpm)
106		return -EPERM;
107
108	ret = pm_runtime_get_sync(ddev->dev);
109	if (ret < 0) {
110		pm_runtime_put_autosuspend(ddev->dev);
111		return ret;
112	}
113
114	amdgpu_dpm_get_current_power_state(adev, &pm);
115
116	pm_runtime_mark_last_busy(ddev->dev);
117	pm_runtime_put_autosuspend(ddev->dev);
118
119	return sysfs_emit(buf, "%s\n",
120			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
121			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
122}
123
124static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
125					  struct device_attribute *attr,
126					  const char *buf,
127					  size_t count)
128{
129	struct drm_device *ddev = dev_get_drvdata(dev);
130	struct amdgpu_device *adev = drm_to_adev(ddev);
131	enum amd_pm_state_type  state;
132	int ret;
133
134	if (amdgpu_in_reset(adev))
135		return -EPERM;
136	if (adev->in_suspend && !adev->in_runpm)
137		return -EPERM;
138
139	if (strncmp("battery", buf, strlen("battery")) == 0)
140		state = POWER_STATE_TYPE_BATTERY;
141	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
142		state = POWER_STATE_TYPE_BALANCED;
143	else if (strncmp("performance", buf, strlen("performance")) == 0)
144		state = POWER_STATE_TYPE_PERFORMANCE;
145	else
146		return -EINVAL;
147
148	ret = pm_runtime_get_sync(ddev->dev);
149	if (ret < 0) {
150		pm_runtime_put_autosuspend(ddev->dev);
151		return ret;
152	}
153
154	amdgpu_dpm_set_power_state(adev, state);
155
156	pm_runtime_mark_last_busy(ddev->dev);
157	pm_runtime_put_autosuspend(ddev->dev);
158
159	return count;
160}
161
162
163/**
164 * DOC: power_dpm_force_performance_level
165 *
166 * The amdgpu driver provides a sysfs API for adjusting certain power
167 * related parameters.  The file power_dpm_force_performance_level is
168 * used for this.  It accepts the following arguments:
169 *
170 * - auto
171 *
172 * - low
173 *
174 * - high
175 *
176 * - manual
177 *
178 * - profile_standard
179 *
180 * - profile_min_sclk
181 *
182 * - profile_min_mclk
183 *
184 * - profile_peak
185 *
186 * auto
187 *
188 * When auto is selected, the driver will attempt to dynamically select
189 * the optimal power profile for current conditions in the driver.
190 *
191 * low
192 *
193 * When low is selected, the clocks are forced to the lowest power state.
194 *
195 * high
196 *
197 * When high is selected, the clocks are forced to the highest power state.
198 *
199 * manual
200 *
201 * When manual is selected, the user can manually adjust which power states
202 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
203 * and pp_dpm_pcie files and adjust the power state transition heuristics
204 * via the pp_power_profile_mode sysfs file.
205 *
206 * profile_standard
207 * profile_min_sclk
208 * profile_min_mclk
209 * profile_peak
210 *
211 * When the profiling modes are selected, clock and power gating are
212 * disabled and the clocks are set for different profiling cases. This
213 * mode is recommended for profiling specific work loads where you do
214 * not want clock or power gating for clock fluctuation to interfere
215 * with your results. profile_standard sets the clocks to a fixed clock
216 * level which varies from asic to asic.  profile_min_sclk forces the sclk
217 * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
218 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
219 *
220 */
221
222static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
223							    struct device_attribute *attr,
224							    char *buf)
225{
226	struct drm_device *ddev = dev_get_drvdata(dev);
227	struct amdgpu_device *adev = drm_to_adev(ddev);
228	enum amd_dpm_forced_level level = 0xff;
229	int ret;
230
231	if (amdgpu_in_reset(adev))
232		return -EPERM;
233	if (adev->in_suspend && !adev->in_runpm)
234		return -EPERM;
235
236	ret = pm_runtime_get_sync(ddev->dev);
237	if (ret < 0) {
238		pm_runtime_put_autosuspend(ddev->dev);
239		return ret;
240	}
241
242	level = amdgpu_dpm_get_performance_level(adev);
243
244	pm_runtime_mark_last_busy(ddev->dev);
245	pm_runtime_put_autosuspend(ddev->dev);
246
247	return sysfs_emit(buf, "%s\n",
248			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
249			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
250			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
251			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
252			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
253			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
254			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
255			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
256			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
257			  "unknown");
258}
259
260static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
261							    struct device_attribute *attr,
262							    const char *buf,
263							    size_t count)
264{
265	struct drm_device *ddev = dev_get_drvdata(dev);
266	struct amdgpu_device *adev = drm_to_adev(ddev);
267	enum amd_dpm_forced_level level;
268	int ret = 0;
269
270	if (amdgpu_in_reset(adev))
271		return -EPERM;
272	if (adev->in_suspend && !adev->in_runpm)
273		return -EPERM;
274
275	if (strncmp("low", buf, strlen("low")) == 0) {
276		level = AMD_DPM_FORCED_LEVEL_LOW;
277	} else if (strncmp("high", buf, strlen("high")) == 0) {
278		level = AMD_DPM_FORCED_LEVEL_HIGH;
279	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
280		level = AMD_DPM_FORCED_LEVEL_AUTO;
281	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
282		level = AMD_DPM_FORCED_LEVEL_MANUAL;
283	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
284		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
285	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
286		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
287	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
288		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
289	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
290		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
291	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
292		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
293	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
294		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
295	}  else {
296		return -EINVAL;
297	}
298
299	ret = pm_runtime_get_sync(ddev->dev);
300	if (ret < 0) {
301		pm_runtime_put_autosuspend(ddev->dev);
302		return ret;
303	}
304
305	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
306	if (amdgpu_dpm_force_performance_level(adev, level)) {
307		pm_runtime_mark_last_busy(ddev->dev);
308		pm_runtime_put_autosuspend(ddev->dev);
309		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
310		return -EINVAL;
311	}
312	/* override whatever a user ctx may have set */
313	adev->pm.stable_pstate_ctx = NULL;
314	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
315
316	pm_runtime_mark_last_busy(ddev->dev);
317	pm_runtime_put_autosuspend(ddev->dev);
318
319	return count;
320}
321
322static ssize_t amdgpu_get_pp_num_states(struct device *dev,
323		struct device_attribute *attr,
324		char *buf)
325{
326	struct drm_device *ddev = dev_get_drvdata(dev);
327	struct amdgpu_device *adev = drm_to_adev(ddev);
328	struct pp_states_info data;
329	uint32_t i;
330	int buf_len, ret;
331
332	if (amdgpu_in_reset(adev))
333		return -EPERM;
334	if (adev->in_suspend && !adev->in_runpm)
335		return -EPERM;
336
337	ret = pm_runtime_get_sync(ddev->dev);
338	if (ret < 0) {
339		pm_runtime_put_autosuspend(ddev->dev);
340		return ret;
341	}
342
343	if (amdgpu_dpm_get_pp_num_states(adev, &data))
344		memset(&data, 0, sizeof(data));
345
346	pm_runtime_mark_last_busy(ddev->dev);
347	pm_runtime_put_autosuspend(ddev->dev);
348
349	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
350	for (i = 0; i < data.nums; i++)
351		buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
352				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
353				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
354				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
355				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
356
357	return buf_len;
358}
359
360static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
361		struct device_attribute *attr,
362		char *buf)
363{
364	struct drm_device *ddev = dev_get_drvdata(dev);
365	struct amdgpu_device *adev = drm_to_adev(ddev);
366	struct pp_states_info data = {0};
367	enum amd_pm_state_type pm = 0;
368	int i = 0, ret = 0;
369
370	if (amdgpu_in_reset(adev))
371		return -EPERM;
372	if (adev->in_suspend && !adev->in_runpm)
373		return -EPERM;
374
375	ret = pm_runtime_get_sync(ddev->dev);
376	if (ret < 0) {
377		pm_runtime_put_autosuspend(ddev->dev);
378		return ret;
379	}
380
381	amdgpu_dpm_get_current_power_state(adev, &pm);
382
383	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
384
385	pm_runtime_mark_last_busy(ddev->dev);
386	pm_runtime_put_autosuspend(ddev->dev);
387
388	if (ret)
389		return ret;
390
391	for (i = 0; i < data.nums; i++) {
392		if (pm == data.states[i])
393			break;
394	}
395
396	if (i == data.nums)
397		i = -EINVAL;
398
399	return sysfs_emit(buf, "%d\n", i);
400}
401
402static ssize_t amdgpu_get_pp_force_state(struct device *dev,
403		struct device_attribute *attr,
404		char *buf)
405{
406	struct drm_device *ddev = dev_get_drvdata(dev);
407	struct amdgpu_device *adev = drm_to_adev(ddev);
408
409	if (amdgpu_in_reset(adev))
410		return -EPERM;
411	if (adev->in_suspend && !adev->in_runpm)
412		return -EPERM;
413
414	if (adev->pm.pp_force_state_enabled)
415		return amdgpu_get_pp_cur_state(dev, attr, buf);
416	else
417		return sysfs_emit(buf, "\n");
418}
419
420static ssize_t amdgpu_set_pp_force_state(struct device *dev,
421		struct device_attribute *attr,
422		const char *buf,
423		size_t count)
424{
425	struct drm_device *ddev = dev_get_drvdata(dev);
426	struct amdgpu_device *adev = drm_to_adev(ddev);
427	enum amd_pm_state_type state = 0;
428	struct pp_states_info data;
429	unsigned long idx;
430	int ret;
431
432	if (amdgpu_in_reset(adev))
433		return -EPERM;
434	if (adev->in_suspend && !adev->in_runpm)
435		return -EPERM;
436
437	adev->pm.pp_force_state_enabled = false;
438
439	if (strlen(buf) == 1)
440		return count;
441
442	ret = kstrtoul(buf, 0, &idx);
443	if (ret || idx >= ARRAY_SIZE(data.states))
444		return -EINVAL;
445
446	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
447
448	ret = pm_runtime_get_sync(ddev->dev);
449	if (ret < 0) {
450		pm_runtime_put_autosuspend(ddev->dev);
451		return ret;
452	}
453
454	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
455	if (ret)
456		goto err_out;
457
458	state = data.states[idx];
459
460	/* only set user selected power states */
461	if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
462	    state != POWER_STATE_TYPE_DEFAULT) {
463		ret = amdgpu_dpm_dispatch_task(adev,
464				AMD_PP_TASK_ENABLE_USER_STATE, &state);
465		if (ret)
466			goto err_out;
467
468		adev->pm.pp_force_state_enabled = true;
469	}
470
471	pm_runtime_mark_last_busy(ddev->dev);
472	pm_runtime_put_autosuspend(ddev->dev);
473
474	return count;
475
476err_out:
477	pm_runtime_mark_last_busy(ddev->dev);
478	pm_runtime_put_autosuspend(ddev->dev);
479	return ret;
480}
481
482/**
483 * DOC: pp_table
484 *
485 * The amdgpu driver provides a sysfs API for uploading new powerplay
486 * tables.  The file pp_table is used for this.  Reading the file
487 * will dump the current power play table.  Writing to the file
488 * will attempt to upload a new powerplay table and re-initialize
489 * powerplay using that new table.
490 *
491 */
492
493static ssize_t amdgpu_get_pp_table(struct device *dev,
494		struct device_attribute *attr,
495		char *buf)
496{
497	struct drm_device *ddev = dev_get_drvdata(dev);
498	struct amdgpu_device *adev = drm_to_adev(ddev);
499	char *table = NULL;
500	int size, ret;
501
502	if (amdgpu_in_reset(adev))
503		return -EPERM;
504	if (adev->in_suspend && !adev->in_runpm)
505		return -EPERM;
506
507	ret = pm_runtime_get_sync(ddev->dev);
508	if (ret < 0) {
509		pm_runtime_put_autosuspend(ddev->dev);
510		return ret;
511	}
512
513	size = amdgpu_dpm_get_pp_table(adev, &table);
514
515	pm_runtime_mark_last_busy(ddev->dev);
516	pm_runtime_put_autosuspend(ddev->dev);
517
518	if (size <= 0)
519		return size;
520
521	if (size >= PAGE_SIZE)
522		size = PAGE_SIZE - 1;
523
524	memcpy(buf, table, size);
525
526	return size;
527}
528
529static ssize_t amdgpu_set_pp_table(struct device *dev,
530		struct device_attribute *attr,
531		const char *buf,
532		size_t count)
533{
534	struct drm_device *ddev = dev_get_drvdata(dev);
535	struct amdgpu_device *adev = drm_to_adev(ddev);
536	int ret = 0;
537
538	if (amdgpu_in_reset(adev))
539		return -EPERM;
540	if (adev->in_suspend && !adev->in_runpm)
541		return -EPERM;
542
543	ret = pm_runtime_get_sync(ddev->dev);
544	if (ret < 0) {
545		pm_runtime_put_autosuspend(ddev->dev);
546		return ret;
547	}
548
549	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
550
551	pm_runtime_mark_last_busy(ddev->dev);
552	pm_runtime_put_autosuspend(ddev->dev);
553
554	if (ret)
555		return ret;
556
557	return count;
558}
559
560/**
561 * DOC: pp_od_clk_voltage
562 *
563 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
564 * in each power level within a power state.  The pp_od_clk_voltage is used for
565 * this.
566 *
567 * Note that the actual memory controller clock rate are exposed, not
568 * the effective memory clock of the DRAMs. To translate it, use the
569 * following formula:
570 *
571 * Clock conversion (Mhz):
572 *
573 * HBM: effective_memory_clock = memory_controller_clock * 1
574 *
575 * G5: effective_memory_clock = memory_controller_clock * 1
576 *
577 * G6: effective_memory_clock = memory_controller_clock * 2
578 *
579 * DRAM data rate (MT/s):
580 *
581 * HBM: effective_memory_clock * 2 = data_rate
582 *
583 * G5: effective_memory_clock * 4 = data_rate
584 *
585 * G6: effective_memory_clock * 8 = data_rate
586 *
587 * Bandwidth (MB/s):
588 *
589 * data_rate * vram_bit_width / 8 = memory_bandwidth
590 *
591 * Some examples:
592 *
593 * G5 on RX460:
594 *
595 * memory_controller_clock = 1750 Mhz
596 *
597 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
598 *
599 * data rate = 1750 * 4 = 7000 MT/s
600 *
601 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
602 *
603 * G6 on RX5700:
604 *
605 * memory_controller_clock = 875 Mhz
606 *
607 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
608 *
609 * data rate = 1750 * 8 = 14000 MT/s
610 *
611 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
612 *
613 * < For Vega10 and previous ASICs >
614 *
615 * Reading the file will display:
616 *
617 * - a list of engine clock levels and voltages labeled OD_SCLK
618 *
619 * - a list of memory clock levels and voltages labeled OD_MCLK
620 *
621 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
622 *
623 * To manually adjust these settings, first select manual using
624 * power_dpm_force_performance_level. Enter a new value for each
625 * level by writing a string that contains "s/m level clock voltage" to
626 * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
627 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
628 * 810 mV.  When you have edited all of the states as needed, write
629 * "c" (commit) to the file to commit your changes.  If you want to reset to the
630 * default power levels, write "r" (reset) to the file to reset them.
631 *
632 *
633 * < For Vega20 and newer ASICs >
634 *
635 * Reading the file will display:
636 *
637 * - minimum and maximum engine clock labeled OD_SCLK
638 *
639 * - minimum(not available for Vega20 and Navi1x) and maximum memory
640 *   clock labeled OD_MCLK
641 *
642 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
643 *   They can be used to calibrate the sclk voltage curve. This is
644 *   available for Vega20 and NV1X.
645 *
646 * - voltage offset for the six anchor points of the v/f curve labeled
647 *   OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This
648 *   is only availabe for some SMU13 ASICs.
649 *
650 * - voltage offset(in mV) applied on target voltage calculation.
651 *   This is available for Sienna Cichlid, Navy Flounder and Dimgrey
652 *   Cavefish. For these ASICs, the target voltage calculation can be
653 *   illustrated by "voltage = voltage calculated from v/f curve +
654 *   overdrive vddgfx offset"
655 *
656 * - a list of valid ranges for sclk, mclk, and voltage curve points
657 *   labeled OD_RANGE
658 *
659 * < For APUs >
660 *
661 * Reading the file will display:
662 *
663 * - minimum and maximum engine clock labeled OD_SCLK
664 *
665 * - a list of valid ranges for sclk labeled OD_RANGE
666 *
667 * < For VanGogh >
668 *
669 * Reading the file will display:
670 *
671 * - minimum and maximum engine clock labeled OD_SCLK
672 * - minimum and maximum core clocks labeled OD_CCLK
673 *
674 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
675 *
676 * To manually adjust these settings:
677 *
678 * - First select manual using power_dpm_force_performance_level
679 *
680 * - For clock frequency setting, enter a new value by writing a
681 *   string that contains "s/m index clock" to the file. The index
682 *   should be 0 if to set minimum clock. And 1 if to set maximum
683 *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
684 *   "m 1 800" will update maximum mclk to be 800Mhz. For core
685 *   clocks on VanGogh, the string contains "p core index clock".
686 *   E.g., "p 2 0 800" would set the minimum core clock on core
687 *   2 to 800Mhz.
688 *
689 *   For sclk voltage curve,
690 *     - For NV1X, enter the new values by writing a string that
691 *       contains "vc point clock voltage" to the file. The points
692 *       are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update
693 *       point1 with clock set as 300Mhz and voltage as 600mV. "vc 2
694 *       1000 1000" will update point3 with clock set as 1000Mhz and
695 *       voltage 1000mV.
696 *     - For SMU13 ASICs, enter the new values by writing a string that
697 *       contains "vc anchor_point_index voltage_offset" to the file.
698 *       There are total six anchor points defined on the v/f curve with
699 *       index as 0 - 5.
700 *       - "vc 0 10" will update the voltage offset for point1 as 10mv.
701 *       - "vc 5 -10" will update the voltage offset for point6 as -10mv.
702 *
703 *   To update the voltage offset applied for gfxclk/voltage calculation,
704 *   enter the new value by writing a string that contains "vo offset".
705 *   This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
706 *   And the offset can be a positive or negative value.
707 *
708 * - When you have edited all of the states as needed, write "c" (commit)
709 *   to the file to commit your changes
710 *
711 * - If you want to reset to the default power levels, write "r" (reset)
712 *   to the file to reset them
713 *
714 */
715
716static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
717		struct device_attribute *attr,
718		const char *buf,
719		size_t count)
720{
721	struct drm_device *ddev = dev_get_drvdata(dev);
722	struct amdgpu_device *adev = drm_to_adev(ddev);
723	int ret;
724	uint32_t parameter_size = 0;
725	long parameter[64];
726	char buf_cpy[128];
727	char *tmp_str;
728	char *sub_str;
729	const char delimiter[3] = {' ', '\n', '\0'};
730	uint32_t type;
731
732	if (amdgpu_in_reset(adev))
733		return -EPERM;
734	if (adev->in_suspend && !adev->in_runpm)
735		return -EPERM;
736
737	if (count > 127 || count == 0)
738		return -EINVAL;
739
740	if (*buf == 's')
741		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
742	else if (*buf == 'p')
743		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
744	else if (*buf == 'm')
745		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
746	else if (*buf == 'r')
747		type = PP_OD_RESTORE_DEFAULT_TABLE;
748	else if (*buf == 'c')
749		type = PP_OD_COMMIT_DPM_TABLE;
750	else if (!strncmp(buf, "vc", 2))
751		type = PP_OD_EDIT_VDDC_CURVE;
752	else if (!strncmp(buf, "vo", 2))
753		type = PP_OD_EDIT_VDDGFX_OFFSET;
754	else
755		return -EINVAL;
756
757	memcpy(buf_cpy, buf, count);
758	buf_cpy[count] = 0;
759
760	tmp_str = buf_cpy;
761
762	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
763	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
764		tmp_str++;
765	while (isspace(*++tmp_str));
766
767	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
768		if (strlen(sub_str) == 0)
769			continue;
770		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
771		if (ret)
772			return -EINVAL;
773		parameter_size++;
774
775		if (!tmp_str)
776			break;
777
778		while (isspace(*tmp_str))
779			tmp_str++;
780	}
781
782	ret = pm_runtime_get_sync(ddev->dev);
783	if (ret < 0) {
784		pm_runtime_put_autosuspend(ddev->dev);
785		return ret;
786	}
787
788	if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
789					      type,
790					      parameter,
791					      parameter_size))
792		goto err_out;
793
794	if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
795					  parameter, parameter_size))
796		goto err_out;
797
798	if (type == PP_OD_COMMIT_DPM_TABLE) {
799		if (amdgpu_dpm_dispatch_task(adev,
800					     AMD_PP_TASK_READJUST_POWER_STATE,
801					     NULL))
802			goto err_out;
803	}
804
805	pm_runtime_mark_last_busy(ddev->dev);
806	pm_runtime_put_autosuspend(ddev->dev);
807
808	return count;
809
810err_out:
811	pm_runtime_mark_last_busy(ddev->dev);
812	pm_runtime_put_autosuspend(ddev->dev);
813	return -EINVAL;
814}
815
816static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
817		struct device_attribute *attr,
818		char *buf)
819{
820	struct drm_device *ddev = dev_get_drvdata(dev);
821	struct amdgpu_device *adev = drm_to_adev(ddev);
822	int size = 0;
823	int ret;
824	enum pp_clock_type od_clocks[6] = {
825		OD_SCLK,
826		OD_MCLK,
827		OD_VDDC_CURVE,
828		OD_RANGE,
829		OD_VDDGFX_OFFSET,
830		OD_CCLK,
831	};
832	uint clk_index;
833
834	if (amdgpu_in_reset(adev))
835		return -EPERM;
836	if (adev->in_suspend && !adev->in_runpm)
837		return -EPERM;
838
839	ret = pm_runtime_get_sync(ddev->dev);
840	if (ret < 0) {
841		pm_runtime_put_autosuspend(ddev->dev);
842		return ret;
843	}
844
845	for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
846		ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
847		if (ret)
848			break;
849	}
850	if (ret == -ENOENT) {
851		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
852		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
853		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
854		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
855		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
856		size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
857	}
858
859	if (size == 0)
860		size = sysfs_emit(buf, "\n");
861
862	pm_runtime_mark_last_busy(ddev->dev);
863	pm_runtime_put_autosuspend(ddev->dev);
864
865	return size;
866}
867
868/**
869 * DOC: pp_features
870 *
871 * The amdgpu driver provides a sysfs API for adjusting what powerplay
872 * features to be enabled. The file pp_features is used for this. And
873 * this is only available for Vega10 and later dGPUs.
874 *
875 * Reading back the file will show you the followings:
876 * - Current ppfeature masks
877 * - List of the all supported powerplay features with their naming,
878 *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
879 *
880 * To manually enable or disable a specific feature, just set or clear
881 * the corresponding bit from original ppfeature masks and input the
882 * new ppfeature masks.
883 */
884static ssize_t amdgpu_set_pp_features(struct device *dev,
885				      struct device_attribute *attr,
886				      const char *buf,
887				      size_t count)
888{
889	struct drm_device *ddev = dev_get_drvdata(dev);
890	struct amdgpu_device *adev = drm_to_adev(ddev);
891	uint64_t featuremask;
892	int ret;
893
894	if (amdgpu_in_reset(adev))
895		return -EPERM;
896	if (adev->in_suspend && !adev->in_runpm)
897		return -EPERM;
898
899	ret = kstrtou64(buf, 0, &featuremask);
900	if (ret)
901		return -EINVAL;
902
903	ret = pm_runtime_get_sync(ddev->dev);
904	if (ret < 0) {
905		pm_runtime_put_autosuspend(ddev->dev);
906		return ret;
907	}
908
909	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
910
911	pm_runtime_mark_last_busy(ddev->dev);
912	pm_runtime_put_autosuspend(ddev->dev);
913
914	if (ret)
915		return -EINVAL;
916
917	return count;
918}
919
920static ssize_t amdgpu_get_pp_features(struct device *dev,
921				      struct device_attribute *attr,
922				      char *buf)
923{
924	struct drm_device *ddev = dev_get_drvdata(dev);
925	struct amdgpu_device *adev = drm_to_adev(ddev);
926	ssize_t size;
927	int ret;
928
929	if (amdgpu_in_reset(adev))
930		return -EPERM;
931	if (adev->in_suspend && !adev->in_runpm)
932		return -EPERM;
933
934	ret = pm_runtime_get_sync(ddev->dev);
935	if (ret < 0) {
936		pm_runtime_put_autosuspend(ddev->dev);
937		return ret;
938	}
939
940	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
941	if (size <= 0)
942		size = sysfs_emit(buf, "\n");
943
944	pm_runtime_mark_last_busy(ddev->dev);
945	pm_runtime_put_autosuspend(ddev->dev);
946
947	return size;
948}
949
950/**
951 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
952 *
953 * The amdgpu driver provides a sysfs API for adjusting what power levels
954 * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
955 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
956 * this.
957 *
958 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
959 * Vega10 and later ASICs.
960 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
961 *
962 * Reading back the files will show you the available power levels within
963 * the power state and the clock information for those levels.
964 *
965 * To manually adjust these states, first select manual using
966 * power_dpm_force_performance_level.
967 * Secondly, enter a new value for each level by inputing a string that
968 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
969 * E.g.,
970 *
971 * .. code-block:: bash
972 *
973 *	echo "4 5 6" > pp_dpm_sclk
974 *
975 * will enable sclk levels 4, 5, and 6.
976 *
977 * NOTE: change to the dcefclk max dpm level is not supported now
978 */
979
980static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
981		enum pp_clock_type type,
982		char *buf)
983{
984	struct drm_device *ddev = dev_get_drvdata(dev);
985	struct amdgpu_device *adev = drm_to_adev(ddev);
986	int size = 0;
987	int ret = 0;
988
989	if (amdgpu_in_reset(adev))
990		return -EPERM;
991	if (adev->in_suspend && !adev->in_runpm)
992		return -EPERM;
993
994	ret = pm_runtime_get_sync(ddev->dev);
995	if (ret < 0) {
996		pm_runtime_put_autosuspend(ddev->dev);
997		return ret;
998	}
999
1000	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1001	if (ret == -ENOENT)
1002		size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1003
1004	if (size == 0)
1005		size = sysfs_emit(buf, "\n");
1006
1007	pm_runtime_mark_last_busy(ddev->dev);
1008	pm_runtime_put_autosuspend(ddev->dev);
1009
1010	return size;
1011}
1012
1013/*
1014 * Worst case: 32 bits individually specified, in octal at 12 characters
1015 * per line (+1 for \n).
1016 */
1017#define AMDGPU_MASK_BUF_MAX	(32 * 13)
1018
1019static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1020{
1021	int ret;
1022	unsigned long level;
1023	char *sub_str = NULL;
1024	char *tmp;
1025	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1026	const char delimiter[3] = {' ', '\n', '\0'};
1027	size_t bytes;
1028
1029	*mask = 0;
1030
1031	bytes = min(count, sizeof(buf_cpy) - 1);
1032	memcpy(buf_cpy, buf, bytes);
1033	buf_cpy[bytes] = '\0';
1034	tmp = buf_cpy;
1035	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1036		if (strlen(sub_str)) {
1037			ret = kstrtoul(sub_str, 0, &level);
1038			if (ret || level > 31)
1039				return -EINVAL;
1040			*mask |= 1 << level;
1041		} else
1042			break;
1043	}
1044
1045	return 0;
1046}
1047
1048static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1049		enum pp_clock_type type,
1050		const char *buf,
1051		size_t count)
1052{
1053	struct drm_device *ddev = dev_get_drvdata(dev);
1054	struct amdgpu_device *adev = drm_to_adev(ddev);
1055	int ret;
1056	uint32_t mask = 0;
1057
1058	if (amdgpu_in_reset(adev))
1059		return -EPERM;
1060	if (adev->in_suspend && !adev->in_runpm)
1061		return -EPERM;
1062
1063	ret = amdgpu_read_mask(buf, count, &mask);
1064	if (ret)
1065		return ret;
1066
1067	ret = pm_runtime_get_sync(ddev->dev);
1068	if (ret < 0) {
1069		pm_runtime_put_autosuspend(ddev->dev);
1070		return ret;
1071	}
1072
1073	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1074
1075	pm_runtime_mark_last_busy(ddev->dev);
1076	pm_runtime_put_autosuspend(ddev->dev);
1077
1078	if (ret)
1079		return -EINVAL;
1080
1081	return count;
1082}
1083
1084static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1085		struct device_attribute *attr,
1086		char *buf)
1087{
1088	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1089}
1090
1091static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1092		struct device_attribute *attr,
1093		const char *buf,
1094		size_t count)
1095{
1096	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1097}
1098
1099static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1100		struct device_attribute *attr,
1101		char *buf)
1102{
1103	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1104}
1105
1106static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1107		struct device_attribute *attr,
1108		const char *buf,
1109		size_t count)
1110{
1111	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1112}
1113
1114static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1115		struct device_attribute *attr,
1116		char *buf)
1117{
1118	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1119}
1120
1121static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1122		struct device_attribute *attr,
1123		const char *buf,
1124		size_t count)
1125{
1126	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1127}
1128
1129static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1130		struct device_attribute *attr,
1131		char *buf)
1132{
1133	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1134}
1135
1136static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1137		struct device_attribute *attr,
1138		const char *buf,
1139		size_t count)
1140{
1141	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1142}
1143
1144static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1145		struct device_attribute *attr,
1146		char *buf)
1147{
1148	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1149}
1150
1151static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1152		struct device_attribute *attr,
1153		const char *buf,
1154		size_t count)
1155{
1156	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1157}
1158
1159static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1160		struct device_attribute *attr,
1161		char *buf)
1162{
1163	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1164}
1165
1166static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1167		struct device_attribute *attr,
1168		const char *buf,
1169		size_t count)
1170{
1171	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1172}
1173
1174static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1175		struct device_attribute *attr,
1176		char *buf)
1177{
1178	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1179}
1180
1181static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1182		struct device_attribute *attr,
1183		const char *buf,
1184		size_t count)
1185{
1186	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1187}
1188
1189static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1190		struct device_attribute *attr,
1191		char *buf)
1192{
1193	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1194}
1195
1196static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1197		struct device_attribute *attr,
1198		const char *buf,
1199		size_t count)
1200{
1201	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1202}
1203
1204static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1205		struct device_attribute *attr,
1206		char *buf)
1207{
1208	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1209}
1210
1211static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1212		struct device_attribute *attr,
1213		const char *buf,
1214		size_t count)
1215{
1216	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1217}
1218
1219static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1220		struct device_attribute *attr,
1221		char *buf)
1222{
1223	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1224}
1225
1226static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1227		struct device_attribute *attr,
1228		const char *buf,
1229		size_t count)
1230{
1231	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1232}
1233
1234static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1235		struct device_attribute *attr,
1236		char *buf)
1237{
1238	struct drm_device *ddev = dev_get_drvdata(dev);
1239	struct amdgpu_device *adev = drm_to_adev(ddev);
1240	uint32_t value = 0;
1241	int ret;
1242
1243	if (amdgpu_in_reset(adev))
1244		return -EPERM;
1245	if (adev->in_suspend && !adev->in_runpm)
1246		return -EPERM;
1247
1248	ret = pm_runtime_get_sync(ddev->dev);
1249	if (ret < 0) {
1250		pm_runtime_put_autosuspend(ddev->dev);
1251		return ret;
1252	}
1253
1254	value = amdgpu_dpm_get_sclk_od(adev);
1255
1256	pm_runtime_mark_last_busy(ddev->dev);
1257	pm_runtime_put_autosuspend(ddev->dev);
1258
1259	return sysfs_emit(buf, "%d\n", value);
1260}
1261
1262static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1263		struct device_attribute *attr,
1264		const char *buf,
1265		size_t count)
1266{
1267	struct drm_device *ddev = dev_get_drvdata(dev);
1268	struct amdgpu_device *adev = drm_to_adev(ddev);
1269	int ret;
1270	long int value;
1271
1272	if (amdgpu_in_reset(adev))
1273		return -EPERM;
1274	if (adev->in_suspend && !adev->in_runpm)
1275		return -EPERM;
1276
1277	ret = kstrtol(buf, 0, &value);
1278
1279	if (ret)
1280		return -EINVAL;
1281
1282	ret = pm_runtime_get_sync(ddev->dev);
1283	if (ret < 0) {
1284		pm_runtime_put_autosuspend(ddev->dev);
1285		return ret;
1286	}
1287
1288	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1289
1290	pm_runtime_mark_last_busy(ddev->dev);
1291	pm_runtime_put_autosuspend(ddev->dev);
1292
1293	return count;
1294}
1295
1296static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1297		struct device_attribute *attr,
1298		char *buf)
1299{
1300	struct drm_device *ddev = dev_get_drvdata(dev);
1301	struct amdgpu_device *adev = drm_to_adev(ddev);
1302	uint32_t value = 0;
1303	int ret;
1304
1305	if (amdgpu_in_reset(adev))
1306		return -EPERM;
1307	if (adev->in_suspend && !adev->in_runpm)
1308		return -EPERM;
1309
1310	ret = pm_runtime_get_sync(ddev->dev);
1311	if (ret < 0) {
1312		pm_runtime_put_autosuspend(ddev->dev);
1313		return ret;
1314	}
1315
1316	value = amdgpu_dpm_get_mclk_od(adev);
1317
1318	pm_runtime_mark_last_busy(ddev->dev);
1319	pm_runtime_put_autosuspend(ddev->dev);
1320
1321	return sysfs_emit(buf, "%d\n", value);
1322}
1323
1324static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1325		struct device_attribute *attr,
1326		const char *buf,
1327		size_t count)
1328{
1329	struct drm_device *ddev = dev_get_drvdata(dev);
1330	struct amdgpu_device *adev = drm_to_adev(ddev);
1331	int ret;
1332	long int value;
1333
1334	if (amdgpu_in_reset(adev))
1335		return -EPERM;
1336	if (adev->in_suspend && !adev->in_runpm)
1337		return -EPERM;
1338
1339	ret = kstrtol(buf, 0, &value);
1340
1341	if (ret)
1342		return -EINVAL;
1343
1344	ret = pm_runtime_get_sync(ddev->dev);
1345	if (ret < 0) {
1346		pm_runtime_put_autosuspend(ddev->dev);
1347		return ret;
1348	}
1349
1350	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1351
1352	pm_runtime_mark_last_busy(ddev->dev);
1353	pm_runtime_put_autosuspend(ddev->dev);
1354
1355	return count;
1356}
1357
1358/**
1359 * DOC: pp_power_profile_mode
1360 *
1361 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1362 * related to switching between power levels in a power state.  The file
1363 * pp_power_profile_mode is used for this.
1364 *
1365 * Reading this file outputs a list of all of the predefined power profiles
1366 * and the relevant heuristics settings for that profile.
1367 *
1368 * To select a profile or create a custom profile, first select manual using
1369 * power_dpm_force_performance_level.  Writing the number of a predefined
1370 * profile to pp_power_profile_mode will enable those heuristics.  To
1371 * create a custom set of heuristics, write a string of numbers to the file
1372 * starting with the number of the custom profile along with a setting
1373 * for each heuristic parameter.  Due to differences across asic families
1374 * the heuristic parameters vary from family to family.
1375 *
1376 */
1377
1378static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1379		struct device_attribute *attr,
1380		char *buf)
1381{
1382	struct drm_device *ddev = dev_get_drvdata(dev);
1383	struct amdgpu_device *adev = drm_to_adev(ddev);
1384	ssize_t size;
1385	int ret;
1386
1387	if (amdgpu_in_reset(adev))
1388		return -EPERM;
1389	if (adev->in_suspend && !adev->in_runpm)
1390		return -EPERM;
1391
1392	ret = pm_runtime_get_sync(ddev->dev);
1393	if (ret < 0) {
1394		pm_runtime_put_autosuspend(ddev->dev);
1395		return ret;
1396	}
1397
1398	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1399	if (size <= 0)
1400		size = sysfs_emit(buf, "\n");
1401
1402	pm_runtime_mark_last_busy(ddev->dev);
1403	pm_runtime_put_autosuspend(ddev->dev);
1404
1405	return size;
1406}
1407
1408
1409static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1410		struct device_attribute *attr,
1411		const char *buf,
1412		size_t count)
1413{
1414	int ret;
1415	struct drm_device *ddev = dev_get_drvdata(dev);
1416	struct amdgpu_device *adev = drm_to_adev(ddev);
1417	uint32_t parameter_size = 0;
1418	long parameter[64];
1419	char *sub_str, buf_cpy[128];
1420	char *tmp_str;
1421	uint32_t i = 0;
1422	char tmp[2];
1423	long int profile_mode = 0;
1424	const char delimiter[3] = {' ', '\n', '\0'};
1425
1426	if (amdgpu_in_reset(adev))
1427		return -EPERM;
1428	if (adev->in_suspend && !adev->in_runpm)
1429		return -EPERM;
1430
1431	tmp[0] = *(buf);
1432	tmp[1] = '\0';
1433	ret = kstrtol(tmp, 0, &profile_mode);
1434	if (ret)
1435		return -EINVAL;
1436
1437	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1438		if (count < 2 || count > 127)
1439			return -EINVAL;
1440		while (isspace(*++buf))
1441			i++;
1442		memcpy(buf_cpy, buf, count-i);
1443		tmp_str = buf_cpy;
1444		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1445			if (strlen(sub_str) == 0)
1446				continue;
1447			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1448			if (ret)
1449				return -EINVAL;
1450			parameter_size++;
1451			while (isspace(*tmp_str))
1452				tmp_str++;
1453		}
1454	}
1455	parameter[parameter_size] = profile_mode;
1456
1457	ret = pm_runtime_get_sync(ddev->dev);
1458	if (ret < 0) {
1459		pm_runtime_put_autosuspend(ddev->dev);
1460		return ret;
1461	}
1462
1463	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1464
1465	pm_runtime_mark_last_busy(ddev->dev);
1466	pm_runtime_put_autosuspend(ddev->dev);
1467
1468	if (!ret)
1469		return count;
1470
1471	return -EINVAL;
1472}
1473
1474static unsigned int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1475						    enum amd_pp_sensors sensor,
1476						    void *query)
1477{
1478	int r, size = sizeof(uint32_t);
1479
1480	if (amdgpu_in_reset(adev))
1481		return -EPERM;
1482	if (adev->in_suspend && !adev->in_runpm)
1483		return -EPERM;
1484
1485	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1486	if (r < 0) {
1487		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1488		return r;
1489	}
1490
1491	/* get the sensor value */
1492	r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1493
1494	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1495	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1496
1497	return r;
1498}
1499
1500/**
1501 * DOC: gpu_busy_percent
1502 *
1503 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1504 * is as a percentage.  The file gpu_busy_percent is used for this.
1505 * The SMU firmware computes a percentage of load based on the
1506 * aggregate activity level in the IP cores.
1507 */
1508static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1509					   struct device_attribute *attr,
1510					   char *buf)
1511{
1512	struct drm_device *ddev = dev_get_drvdata(dev);
1513	struct amdgpu_device *adev = drm_to_adev(ddev);
1514	unsigned int value;
1515	int r;
1516
1517	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1518	if (r)
1519		return r;
1520
1521	return sysfs_emit(buf, "%d\n", value);
1522}
1523
1524/**
1525 * DOC: mem_busy_percent
1526 *
1527 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1528 * is as a percentage.  The file mem_busy_percent is used for this.
1529 * The SMU firmware computes a percentage of load based on the
1530 * aggregate activity level in the IP cores.
1531 */
1532static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1533					   struct device_attribute *attr,
1534					   char *buf)
1535{
1536	struct drm_device *ddev = dev_get_drvdata(dev);
1537	struct amdgpu_device *adev = drm_to_adev(ddev);
1538	unsigned int value;
1539	int r;
1540
1541	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1542	if (r)
1543		return r;
1544
1545	return sysfs_emit(buf, "%d\n", value);
1546}
1547
1548/**
1549 * DOC: pcie_bw
1550 *
1551 * The amdgpu driver provides a sysfs API for estimating how much data
1552 * has been received and sent by the GPU in the last second through PCIe.
1553 * The file pcie_bw is used for this.
1554 * The Perf counters count the number of received and sent messages and return
1555 * those values, as well as the maximum payload size of a PCIe packet (mps).
1556 * Note that it is not possible to easily and quickly obtain the size of each
1557 * packet transmitted, so we output the max payload size (mps) to allow for
1558 * quick estimation of the PCIe bandwidth usage
1559 */
1560static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1561		struct device_attribute *attr,
1562		char *buf)
1563{
1564	struct drm_device *ddev = dev_get_drvdata(dev);
1565	struct amdgpu_device *adev = drm_to_adev(ddev);
1566	uint64_t count0 = 0, count1 = 0;
1567	int ret;
1568
1569	if (amdgpu_in_reset(adev))
1570		return -EPERM;
1571	if (adev->in_suspend && !adev->in_runpm)
1572		return -EPERM;
1573
1574	if (adev->flags & AMD_IS_APU)
1575		return -ENODATA;
1576
1577	if (!adev->asic_funcs->get_pcie_usage)
1578		return -ENODATA;
1579
1580	ret = pm_runtime_get_sync(ddev->dev);
1581	if (ret < 0) {
1582		pm_runtime_put_autosuspend(ddev->dev);
1583		return ret;
1584	}
1585
1586	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1587
1588	pm_runtime_mark_last_busy(ddev->dev);
1589	pm_runtime_put_autosuspend(ddev->dev);
1590
1591	return sysfs_emit(buf, "%llu %llu %i\n",
1592			  count0, count1, pcie_get_mps(adev->pdev));
1593}
1594
1595/**
1596 * DOC: unique_id
1597 *
1598 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1599 * The file unique_id is used for this.
1600 * This will provide a Unique ID that will persist from machine to machine
1601 *
1602 * NOTE: This will only work for GFX9 and newer. This file will be absent
1603 * on unsupported ASICs (GFX8 and older)
1604 */
1605static ssize_t amdgpu_get_unique_id(struct device *dev,
1606		struct device_attribute *attr,
1607		char *buf)
1608{
1609	struct drm_device *ddev = dev_get_drvdata(dev);
1610	struct amdgpu_device *adev = drm_to_adev(ddev);
1611
1612	if (amdgpu_in_reset(adev))
1613		return -EPERM;
1614	if (adev->in_suspend && !adev->in_runpm)
1615		return -EPERM;
1616
1617	if (adev->unique_id)
1618		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1619
1620	return 0;
1621}
1622
1623/**
1624 * DOC: thermal_throttling_logging
1625 *
1626 * Thermal throttling pulls down the clock frequency and thus the performance.
1627 * It's an useful mechanism to protect the chip from overheating. Since it
1628 * impacts performance, the user controls whether it is enabled and if so,
1629 * the log frequency.
1630 *
1631 * Reading back the file shows you the status(enabled or disabled) and
1632 * the interval(in seconds) between each thermal logging.
1633 *
1634 * Writing an integer to the file, sets a new logging interval, in seconds.
1635 * The value should be between 1 and 3600. If the value is less than 1,
1636 * thermal logging is disabled. Values greater than 3600 are ignored.
1637 */
1638static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1639						     struct device_attribute *attr,
1640						     char *buf)
1641{
1642	struct drm_device *ddev = dev_get_drvdata(dev);
1643	struct amdgpu_device *adev = drm_to_adev(ddev);
1644
1645	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1646			  adev_to_drm(adev)->unique,
1647			  atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1648			  adev->throttling_logging_rs.interval / HZ + 1);
1649}
1650
1651static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1652						     struct device_attribute *attr,
1653						     const char *buf,
1654						     size_t count)
1655{
1656	struct drm_device *ddev = dev_get_drvdata(dev);
1657	struct amdgpu_device *adev = drm_to_adev(ddev);
1658	long throttling_logging_interval;
1659	unsigned long flags;
1660	int ret = 0;
1661
1662	ret = kstrtol(buf, 0, &throttling_logging_interval);
1663	if (ret)
1664		return ret;
1665
1666	if (throttling_logging_interval > 3600)
1667		return -EINVAL;
1668
1669	if (throttling_logging_interval > 0) {
1670		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1671		/*
1672		 * Reset the ratelimit timer internals.
1673		 * This can effectively restart the timer.
1674		 */
1675		adev->throttling_logging_rs.interval =
1676			(throttling_logging_interval - 1) * HZ;
1677		adev->throttling_logging_rs.begin = 0;
1678		adev->throttling_logging_rs.printed = 0;
1679		adev->throttling_logging_rs.missed = 0;
1680		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1681
1682		atomic_set(&adev->throttling_logging_enabled, 1);
1683	} else {
1684		atomic_set(&adev->throttling_logging_enabled, 0);
1685	}
1686
1687	return count;
1688}
1689
1690/**
1691 * DOC: apu_thermal_cap
1692 *
1693 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1694 * limit temperature in millidegrees Celsius
1695 *
1696 * Reading back the file shows you core limit value
1697 *
1698 * Writing an integer to the file, sets a new thermal limit. The value
1699 * should be between 0 and 100. If the value is less than 0 or greater
1700 * than 100, then the write request will be ignored.
1701 */
1702static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1703					 struct device_attribute *attr,
1704					 char *buf)
1705{
1706	int ret, size;
1707	u32 limit;
1708	struct drm_device *ddev = dev_get_drvdata(dev);
1709	struct amdgpu_device *adev = drm_to_adev(ddev);
1710
1711	ret = pm_runtime_get_sync(ddev->dev);
1712	if (ret < 0) {
1713		pm_runtime_put_autosuspend(ddev->dev);
1714		return ret;
1715	}
1716
1717	ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1718	if (!ret)
1719		size = sysfs_emit(buf, "%u\n", limit);
1720	else
1721		size = sysfs_emit(buf, "failed to get thermal limit\n");
1722
1723	pm_runtime_mark_last_busy(ddev->dev);
1724	pm_runtime_put_autosuspend(ddev->dev);
1725
1726	return size;
1727}
1728
1729static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1730					 struct device_attribute *attr,
1731					 const char *buf,
1732					 size_t count)
1733{
1734	int ret;
1735	u32 value;
1736	struct drm_device *ddev = dev_get_drvdata(dev);
1737	struct amdgpu_device *adev = drm_to_adev(ddev);
1738
1739	ret = kstrtou32(buf, 10, &value);
1740	if (ret)
1741		return ret;
1742
1743	if (value > 100) {
1744		dev_err(dev, "Invalid argument !\n");
1745		return -EINVAL;
1746	}
1747
1748	ret = pm_runtime_get_sync(ddev->dev);
1749	if (ret < 0) {
1750		pm_runtime_put_autosuspend(ddev->dev);
1751		return ret;
1752	}
1753
1754	ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1755	if (ret) {
1756		dev_err(dev, "failed to update thermal limit\n");
1757		return ret;
1758	}
1759
1760	pm_runtime_mark_last_busy(ddev->dev);
1761	pm_runtime_put_autosuspend(ddev->dev);
1762
1763	return count;
1764}
1765
1766/**
1767 * DOC: gpu_metrics
1768 *
1769 * The amdgpu driver provides a sysfs API for retrieving current gpu
1770 * metrics data. The file gpu_metrics is used for this. Reading the
1771 * file will dump all the current gpu metrics data.
1772 *
1773 * These data include temperature, frequency, engines utilization,
1774 * power consume, throttler status, fan speed and cpu core statistics(
1775 * available for APU only). That's it will give a snapshot of all sensors
1776 * at the same time.
1777 */
1778static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1779				      struct device_attribute *attr,
1780				      char *buf)
1781{
1782	struct drm_device *ddev = dev_get_drvdata(dev);
1783	struct amdgpu_device *adev = drm_to_adev(ddev);
1784	void *gpu_metrics;
1785	ssize_t size = 0;
1786	int ret;
1787
1788	if (amdgpu_in_reset(adev))
1789		return -EPERM;
1790	if (adev->in_suspend && !adev->in_runpm)
1791		return -EPERM;
1792
1793	ret = pm_runtime_get_sync(ddev->dev);
1794	if (ret < 0) {
1795		pm_runtime_put_autosuspend(ddev->dev);
1796		return ret;
1797	}
1798
1799	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1800	if (size <= 0)
1801		goto out;
1802
1803	if (size >= PAGE_SIZE)
1804		size = PAGE_SIZE - 1;
1805
1806	memcpy(buf, gpu_metrics, size);
1807
1808out:
1809	pm_runtime_mark_last_busy(ddev->dev);
1810	pm_runtime_put_autosuspend(ddev->dev);
1811
1812	return size;
1813}
1814
1815static int amdgpu_show_powershift_percent(struct device *dev,
1816					char *buf, enum amd_pp_sensors sensor)
1817{
1818	struct drm_device *ddev = dev_get_drvdata(dev);
1819	struct amdgpu_device *adev = drm_to_adev(ddev);
1820	uint32_t ss_power;
1821	int r = 0, i;
1822
1823	r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1824	if (r == -EOPNOTSUPP) {
1825		/* sensor not available on dGPU, try to read from APU */
1826		adev = NULL;
1827		mutex_lock(&mgpu_info.mutex);
1828		for (i = 0; i < mgpu_info.num_gpu; i++) {
1829			if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1830				adev = mgpu_info.gpu_ins[i].adev;
1831				break;
1832			}
1833		}
1834		mutex_unlock(&mgpu_info.mutex);
1835		if (adev)
1836			r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1837	}
1838
1839	if (r)
1840		return r;
1841
1842	return sysfs_emit(buf, "%u%%\n", ss_power);
1843}
1844
1845/**
1846 * DOC: smartshift_apu_power
1847 *
1848 * The amdgpu driver provides a sysfs API for reporting APU power
1849 * shift in percentage if platform supports smartshift. Value 0 means that
1850 * there is no powershift and values between [1-100] means that the power
1851 * is shifted to APU, the percentage of boost is with respect to APU power
1852 * limit on the platform.
1853 */
1854
1855static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1856					       char *buf)
1857{
1858	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1859}
1860
1861/**
1862 * DOC: smartshift_dgpu_power
1863 *
1864 * The amdgpu driver provides a sysfs API for reporting dGPU power
1865 * shift in percentage if platform supports smartshift. Value 0 means that
1866 * there is no powershift and values between [1-100] means that the power is
1867 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1868 * limit on the platform.
1869 */
1870
1871static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1872						char *buf)
1873{
1874	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1875}
1876
1877/**
1878 * DOC: smartshift_bias
1879 *
1880 * The amdgpu driver provides a sysfs API for reporting the
1881 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1882 * and the default is 0. -100 sets maximum preference to APU
1883 * and 100 sets max perference to dGPU.
1884 */
1885
1886static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1887					  struct device_attribute *attr,
1888					  char *buf)
1889{
1890	int r = 0;
1891
1892	r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1893
1894	return r;
1895}
1896
1897static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1898					  struct device_attribute *attr,
1899					  const char *buf, size_t count)
1900{
1901	struct drm_device *ddev = dev_get_drvdata(dev);
1902	struct amdgpu_device *adev = drm_to_adev(ddev);
1903	int r = 0;
1904	int bias = 0;
1905
1906	if (amdgpu_in_reset(adev))
1907		return -EPERM;
1908	if (adev->in_suspend && !adev->in_runpm)
1909		return -EPERM;
1910
1911	r = pm_runtime_get_sync(ddev->dev);
1912	if (r < 0) {
1913		pm_runtime_put_autosuspend(ddev->dev);
1914		return r;
1915	}
1916
1917	r = kstrtoint(buf, 10, &bias);
1918	if (r)
1919		goto out;
1920
1921	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1922		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1923	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1924		bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1925
1926	amdgpu_smartshift_bias = bias;
1927	r = count;
1928
1929	/* TODO: update bias level with SMU message */
1930
1931out:
1932	pm_runtime_mark_last_busy(ddev->dev);
1933	pm_runtime_put_autosuspend(ddev->dev);
1934	return r;
1935}
1936
1937static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1938				uint32_t mask, enum amdgpu_device_attr_states *states)
1939{
1940	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1941		*states = ATTR_STATE_UNSUPPORTED;
1942
1943	return 0;
1944}
1945
1946static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1947			       uint32_t mask, enum amdgpu_device_attr_states *states)
1948{
1949	uint32_t ss_power;
1950
1951	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
1952		*states = ATTR_STATE_UNSUPPORTED;
1953	else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1954		 (void *)&ss_power))
1955		*states = ATTR_STATE_UNSUPPORTED;
1956	else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1957		 (void *)&ss_power))
1958		*states = ATTR_STATE_UNSUPPORTED;
1959
1960	return 0;
1961}
1962
1963static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1964	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1965	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1966	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1967	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1968	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1969	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1970	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1971	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1972	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1973	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1974	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1975	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1976	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1977	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1978	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1979	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1980	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
1981	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
1982	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1983	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC),
1984	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1985	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1986	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
1987	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1988	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1989	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1990	AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1991	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1992	AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,			ATTR_FLAG_BASIC,
1993			      .attr_update = ss_power_attr_update),
1994	AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,			ATTR_FLAG_BASIC,
1995			      .attr_update = ss_power_attr_update),
1996	AMDGPU_DEVICE_ATTR_RW(smartshift_bias,				ATTR_FLAG_BASIC,
1997			      .attr_update = ss_bias_attr_update),
1998};
1999
2000static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2001			       uint32_t mask, enum amdgpu_device_attr_states *states)
2002{
2003	struct device_attribute *dev_attr = &attr->dev_attr;
2004	uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
2005	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
2006	const char *attr_name = dev_attr->attr.name;
2007
2008	if (!(attr->flags & mask)) {
2009		*states = ATTR_STATE_UNSUPPORTED;
2010		return 0;
2011	}
2012
2013#define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))
2014
2015	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2016		if (gc_ver < IP_VERSION(9, 0, 0))
2017			*states = ATTR_STATE_UNSUPPORTED;
2018	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2019		if (gc_ver < IP_VERSION(9, 0, 0) ||
2020		    !amdgpu_device_has_display_hardware(adev))
2021			*states = ATTR_STATE_UNSUPPORTED;
2022	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2023		if (mp1_ver < IP_VERSION(10, 0, 0))
2024			*states = ATTR_STATE_UNSUPPORTED;
2025	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2026		*states = ATTR_STATE_UNSUPPORTED;
2027		if (amdgpu_dpm_is_overdrive_supported(adev))
2028			*states = ATTR_STATE_SUPPORTED;
2029	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2030		if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
2031			*states = ATTR_STATE_UNSUPPORTED;
2032	} else if (DEVICE_ATTR_IS(pcie_bw)) {
2033		/* PCIe Perf counters won't work on APU nodes */
2034		if (adev->flags & AMD_IS_APU)
2035			*states = ATTR_STATE_UNSUPPORTED;
2036	} else if (DEVICE_ATTR_IS(unique_id)) {
2037		switch (gc_ver) {
2038		case IP_VERSION(9, 0, 1):
2039		case IP_VERSION(9, 4, 0):
2040		case IP_VERSION(9, 4, 1):
2041		case IP_VERSION(9, 4, 2):
2042		case IP_VERSION(9, 4, 3):
2043		case IP_VERSION(10, 3, 0):
2044		case IP_VERSION(11, 0, 0):
2045		case IP_VERSION(11, 0, 1):
2046		case IP_VERSION(11, 0, 2):
2047		case IP_VERSION(11, 0, 3):
2048			*states = ATTR_STATE_SUPPORTED;
2049			break;
2050		default:
2051			*states = ATTR_STATE_UNSUPPORTED;
2052		}
2053	} else if (DEVICE_ATTR_IS(pp_features)) {
2054		if ((adev->flags & AMD_IS_APU &&
2055		     gc_ver != IP_VERSION(9, 4, 3)) ||
2056		    gc_ver < IP_VERSION(9, 0, 0))
2057			*states = ATTR_STATE_UNSUPPORTED;
2058	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
2059		if (gc_ver < IP_VERSION(9, 1, 0))
2060			*states = ATTR_STATE_UNSUPPORTED;
2061	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2062		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2063		      gc_ver == IP_VERSION(10, 3, 0) ||
2064		      gc_ver == IP_VERSION(10, 1, 2) ||
2065		      gc_ver == IP_VERSION(11, 0, 0) ||
2066		      gc_ver == IP_VERSION(11, 0, 2) ||
2067		      gc_ver == IP_VERSION(11, 0, 3) ||
2068		      gc_ver == IP_VERSION(9, 4, 3)))
2069			*states = ATTR_STATE_UNSUPPORTED;
2070	} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2071		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2072			   gc_ver == IP_VERSION(10, 3, 0) ||
2073			   gc_ver == IP_VERSION(11, 0, 2) ||
2074			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2075			*states = ATTR_STATE_UNSUPPORTED;
2076	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2077		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2078		      gc_ver == IP_VERSION(10, 3, 0) ||
2079		      gc_ver == IP_VERSION(10, 1, 2) ||
2080		      gc_ver == IP_VERSION(11, 0, 0) ||
2081		      gc_ver == IP_VERSION(11, 0, 2) ||
2082		      gc_ver == IP_VERSION(11, 0, 3) ||
2083		      gc_ver == IP_VERSION(9, 4, 3)))
2084			*states = ATTR_STATE_UNSUPPORTED;
2085	} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2086		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2087			   gc_ver == IP_VERSION(10, 3, 0) ||
2088			   gc_ver == IP_VERSION(11, 0, 2) ||
2089			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2090			*states = ATTR_STATE_UNSUPPORTED;
2091	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2092		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2093			*states = ATTR_STATE_UNSUPPORTED;
2094		else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
2095			*states = ATTR_STATE_UNSUPPORTED;
2096	}
2097
2098	switch (gc_ver) {
2099	case IP_VERSION(9, 4, 1):
2100	case IP_VERSION(9, 4, 2):
2101		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2102		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2103		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
2104		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
2105			dev_attr->attr.mode &= ~S_IWUGO;
2106			dev_attr->store = NULL;
2107		}
2108		break;
2109	case IP_VERSION(10, 3, 0):
2110		if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2111		    amdgpu_sriov_vf(adev)) {
2112			dev_attr->attr.mode &= ~0222;
2113			dev_attr->store = NULL;
2114		}
2115		break;
2116	default:
2117		break;
2118	}
2119
2120	if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2121		/* SMU MP1 does not support dcefclk level setting */
2122		if (gc_ver >= IP_VERSION(10, 0, 0)) {
2123			dev_attr->attr.mode &= ~S_IWUGO;
2124			dev_attr->store = NULL;
2125		}
2126	}
2127
2128	/* setting should not be allowed from VF if not in one VF mode */
2129	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2130		dev_attr->attr.mode &= ~S_IWUGO;
2131		dev_attr->store = NULL;
2132	}
2133
2134#undef DEVICE_ATTR_IS
2135
2136	return 0;
2137}
2138
2139
2140static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2141				     struct amdgpu_device_attr *attr,
2142				     uint32_t mask, struct list_head *attr_list)
2143{
2144	int ret = 0;
2145	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2146	struct amdgpu_device_attr_entry *attr_entry;
2147	struct device_attribute *dev_attr;
2148	const char *name;
2149
2150	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2151			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2152
2153	if (!attr)
2154		return -EINVAL;
2155
2156	dev_attr = &attr->dev_attr;
2157	name = dev_attr->attr.name;
2158
2159	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2160
2161	ret = attr_update(adev, attr, mask, &attr_states);
2162	if (ret) {
2163		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2164			name, ret);
2165		return ret;
2166	}
2167
2168	if (attr_states == ATTR_STATE_UNSUPPORTED)
2169		return 0;
2170
2171	ret = device_create_file(adev->dev, dev_attr);
2172	if (ret) {
2173		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2174			name, ret);
2175	}
2176
2177	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2178	if (!attr_entry)
2179		return -ENOMEM;
2180
2181	attr_entry->attr = attr;
2182	INIT_LIST_HEAD(&attr_entry->entry);
2183
2184	list_add_tail(&attr_entry->entry, attr_list);
2185
2186	return ret;
2187}
2188
2189static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2190{
2191	struct device_attribute *dev_attr = &attr->dev_attr;
2192
2193	device_remove_file(adev->dev, dev_attr);
2194}
2195
2196static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2197					     struct list_head *attr_list);
2198
2199static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2200					    struct amdgpu_device_attr *attrs,
2201					    uint32_t counts,
2202					    uint32_t mask,
2203					    struct list_head *attr_list)
2204{
2205	int ret = 0;
2206	uint32_t i = 0;
2207
2208	for (i = 0; i < counts; i++) {
2209		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2210		if (ret)
2211			goto failed;
2212	}
2213
2214	return 0;
2215
2216failed:
2217	amdgpu_device_attr_remove_groups(adev, attr_list);
2218
2219	return ret;
2220}
2221
2222static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2223					     struct list_head *attr_list)
2224{
2225	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2226
2227	if (list_empty(attr_list))
2228		return ;
2229
2230	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2231		amdgpu_device_attr_remove(adev, entry->attr);
2232		list_del(&entry->entry);
2233		kfree(entry);
2234	}
2235}
2236
2237static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2238				      struct device_attribute *attr,
2239				      char *buf)
2240{
2241	struct amdgpu_device *adev = dev_get_drvdata(dev);
2242	int channel = to_sensor_dev_attr(attr)->index;
2243	int r, temp = 0;
2244
2245	if (channel >= PP_TEMP_MAX)
2246		return -EINVAL;
2247
2248	switch (channel) {
2249	case PP_TEMP_JUNCTION:
2250		/* get current junction temperature */
2251		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2252					   (void *)&temp);
2253		break;
2254	case PP_TEMP_EDGE:
2255		/* get current edge temperature */
2256		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2257					   (void *)&temp);
2258		break;
2259	case PP_TEMP_MEM:
2260		/* get current memory temperature */
2261		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2262					   (void *)&temp);
2263		break;
2264	default:
2265		r = -EINVAL;
2266		break;
2267	}
2268
2269	if (r)
2270		return r;
2271
2272	return sysfs_emit(buf, "%d\n", temp);
2273}
2274
2275static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2276					     struct device_attribute *attr,
2277					     char *buf)
2278{
2279	struct amdgpu_device *adev = dev_get_drvdata(dev);
2280	int hyst = to_sensor_dev_attr(attr)->index;
2281	int temp;
2282
2283	if (hyst)
2284		temp = adev->pm.dpm.thermal.min_temp;
2285	else
2286		temp = adev->pm.dpm.thermal.max_temp;
2287
2288	return sysfs_emit(buf, "%d\n", temp);
2289}
2290
2291static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2292					     struct device_attribute *attr,
2293					     char *buf)
2294{
2295	struct amdgpu_device *adev = dev_get_drvdata(dev);
2296	int hyst = to_sensor_dev_attr(attr)->index;
2297	int temp;
2298
2299	if (hyst)
2300		temp = adev->pm.dpm.thermal.min_hotspot_temp;
2301	else
2302		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2303
2304	return sysfs_emit(buf, "%d\n", temp);
2305}
2306
2307static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2308					     struct device_attribute *attr,
2309					     char *buf)
2310{
2311	struct amdgpu_device *adev = dev_get_drvdata(dev);
2312	int hyst = to_sensor_dev_attr(attr)->index;
2313	int temp;
2314
2315	if (hyst)
2316		temp = adev->pm.dpm.thermal.min_mem_temp;
2317	else
2318		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2319
2320	return sysfs_emit(buf, "%d\n", temp);
2321}
2322
2323static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2324					     struct device_attribute *attr,
2325					     char *buf)
2326{
2327	int channel = to_sensor_dev_attr(attr)->index;
2328
2329	if (channel >= PP_TEMP_MAX)
2330		return -EINVAL;
2331
2332	return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2333}
2334
2335static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2336					     struct device_attribute *attr,
2337					     char *buf)
2338{
2339	struct amdgpu_device *adev = dev_get_drvdata(dev);
2340	int channel = to_sensor_dev_attr(attr)->index;
2341	int temp = 0;
2342
2343	if (channel >= PP_TEMP_MAX)
2344		return -EINVAL;
2345
2346	switch (channel) {
2347	case PP_TEMP_JUNCTION:
2348		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2349		break;
2350	case PP_TEMP_EDGE:
2351		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2352		break;
2353	case PP_TEMP_MEM:
2354		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2355		break;
2356	}
2357
2358	return sysfs_emit(buf, "%d\n", temp);
2359}
2360
2361static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2362					    struct device_attribute *attr,
2363					    char *buf)
2364{
2365	struct amdgpu_device *adev = dev_get_drvdata(dev);
2366	u32 pwm_mode = 0;
2367	int ret;
2368
2369	if (amdgpu_in_reset(adev))
2370		return -EPERM;
2371	if (adev->in_suspend && !adev->in_runpm)
2372		return -EPERM;
2373
2374	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2375	if (ret < 0) {
2376		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2377		return ret;
2378	}
2379
2380	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2381
2382	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2383	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2384
2385	if (ret)
2386		return -EINVAL;
2387
2388	return sysfs_emit(buf, "%u\n", pwm_mode);
2389}
2390
2391static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2392					    struct device_attribute *attr,
2393					    const char *buf,
2394					    size_t count)
2395{
2396	struct amdgpu_device *adev = dev_get_drvdata(dev);
2397	int err, ret;
2398	int value;
2399
2400	if (amdgpu_in_reset(adev))
2401		return -EPERM;
2402	if (adev->in_suspend && !adev->in_runpm)
2403		return -EPERM;
2404
2405	err = kstrtoint(buf, 10, &value);
2406	if (err)
2407		return err;
2408
2409	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2410	if (ret < 0) {
2411		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2412		return ret;
2413	}
2414
2415	ret = amdgpu_dpm_set_fan_control_mode(adev, value);
2416
2417	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2418	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2419
2420	if (ret)
2421		return -EINVAL;
2422
2423	return count;
2424}
2425
2426static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2427					 struct device_attribute *attr,
2428					 char *buf)
2429{
2430	return sysfs_emit(buf, "%i\n", 0);
2431}
2432
2433static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2434					 struct device_attribute *attr,
2435					 char *buf)
2436{
2437	return sysfs_emit(buf, "%i\n", 255);
2438}
2439
2440static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2441				     struct device_attribute *attr,
2442				     const char *buf, size_t count)
2443{
2444	struct amdgpu_device *adev = dev_get_drvdata(dev);
2445	int err;
2446	u32 value;
2447	u32 pwm_mode;
2448
2449	if (amdgpu_in_reset(adev))
2450		return -EPERM;
2451	if (adev->in_suspend && !adev->in_runpm)
2452		return -EPERM;
2453
2454	err = kstrtou32(buf, 10, &value);
2455	if (err)
2456		return err;
2457
2458	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2459	if (err < 0) {
2460		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2461		return err;
2462	}
2463
2464	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2465	if (err)
2466		goto out;
2467
2468	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2469		pr_info("manual fan speed control should be enabled first\n");
2470		err = -EINVAL;
2471		goto out;
2472	}
2473
2474	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2475
2476out:
2477	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2478	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2479
2480	if (err)
2481		return err;
2482
2483	return count;
2484}
2485
2486static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2487				     struct device_attribute *attr,
2488				     char *buf)
2489{
2490	struct amdgpu_device *adev = dev_get_drvdata(dev);
2491	int err;
2492	u32 speed = 0;
2493
2494	if (amdgpu_in_reset(adev))
2495		return -EPERM;
2496	if (adev->in_suspend && !adev->in_runpm)
2497		return -EPERM;
2498
2499	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2500	if (err < 0) {
2501		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2502		return err;
2503	}
2504
2505	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2506
2507	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2508	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2509
2510	if (err)
2511		return err;
2512
2513	return sysfs_emit(buf, "%i\n", speed);
2514}
2515
2516static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2517					   struct device_attribute *attr,
2518					   char *buf)
2519{
2520	struct amdgpu_device *adev = dev_get_drvdata(dev);
2521	int err;
2522	u32 speed = 0;
2523
2524	if (amdgpu_in_reset(adev))
2525		return -EPERM;
2526	if (adev->in_suspend && !adev->in_runpm)
2527		return -EPERM;
2528
2529	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2530	if (err < 0) {
2531		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2532		return err;
2533	}
2534
2535	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2536
2537	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2538	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2539
2540	if (err)
2541		return err;
2542
2543	return sysfs_emit(buf, "%i\n", speed);
2544}
2545
2546static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2547					 struct device_attribute *attr,
2548					 char *buf)
2549{
2550	struct amdgpu_device *adev = dev_get_drvdata(dev);
2551	u32 min_rpm = 0;
2552	int r;
2553
2554	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2555				   (void *)&min_rpm);
2556
2557	if (r)
2558		return r;
2559
2560	return sysfs_emit(buf, "%d\n", min_rpm);
2561}
2562
2563static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2564					 struct device_attribute *attr,
2565					 char *buf)
2566{
2567	struct amdgpu_device *adev = dev_get_drvdata(dev);
2568	u32 max_rpm = 0;
2569	int r;
2570
2571	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2572				   (void *)&max_rpm);
2573
2574	if (r)
2575		return r;
2576
2577	return sysfs_emit(buf, "%d\n", max_rpm);
2578}
2579
2580static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2581					   struct device_attribute *attr,
2582					   char *buf)
2583{
2584	struct amdgpu_device *adev = dev_get_drvdata(dev);
2585	int err;
2586	u32 rpm = 0;
2587
2588	if (amdgpu_in_reset(adev))
2589		return -EPERM;
2590	if (adev->in_suspend && !adev->in_runpm)
2591		return -EPERM;
2592
2593	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2594	if (err < 0) {
2595		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2596		return err;
2597	}
2598
2599	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2600
2601	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2602	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2603
2604	if (err)
2605		return err;
2606
2607	return sysfs_emit(buf, "%i\n", rpm);
2608}
2609
2610static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2611				     struct device_attribute *attr,
2612				     const char *buf, size_t count)
2613{
2614	struct amdgpu_device *adev = dev_get_drvdata(dev);
2615	int err;
2616	u32 value;
2617	u32 pwm_mode;
2618
2619	if (amdgpu_in_reset(adev))
2620		return -EPERM;
2621	if (adev->in_suspend && !adev->in_runpm)
2622		return -EPERM;
2623
2624	err = kstrtou32(buf, 10, &value);
2625	if (err)
2626		return err;
2627
2628	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2629	if (err < 0) {
2630		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2631		return err;
2632	}
2633
2634	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2635	if (err)
2636		goto out;
2637
2638	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2639		err = -ENODATA;
2640		goto out;
2641	}
2642
2643	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2644
2645out:
2646	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2647	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2648
2649	if (err)
2650		return err;
2651
2652	return count;
2653}
2654
2655static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2656					    struct device_attribute *attr,
2657					    char *buf)
2658{
2659	struct amdgpu_device *adev = dev_get_drvdata(dev);
2660	u32 pwm_mode = 0;
2661	int ret;
2662
2663	if (amdgpu_in_reset(adev))
2664		return -EPERM;
2665	if (adev->in_suspend && !adev->in_runpm)
2666		return -EPERM;
2667
2668	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2669	if (ret < 0) {
2670		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2671		return ret;
2672	}
2673
2674	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2675
2676	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2677	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2678
2679	if (ret)
2680		return -EINVAL;
2681
2682	return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2683}
2684
2685static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2686					    struct device_attribute *attr,
2687					    const char *buf,
2688					    size_t count)
2689{
2690	struct amdgpu_device *adev = dev_get_drvdata(dev);
2691	int err;
2692	int value;
2693	u32 pwm_mode;
2694
2695	if (amdgpu_in_reset(adev))
2696		return -EPERM;
2697	if (adev->in_suspend && !adev->in_runpm)
2698		return -EPERM;
2699
2700	err = kstrtoint(buf, 10, &value);
2701	if (err)
2702		return err;
2703
2704	if (value == 0)
2705		pwm_mode = AMD_FAN_CTRL_AUTO;
2706	else if (value == 1)
2707		pwm_mode = AMD_FAN_CTRL_MANUAL;
2708	else
2709		return -EINVAL;
2710
2711	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2712	if (err < 0) {
2713		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2714		return err;
2715	}
2716
2717	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2718
2719	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2720	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2721
2722	if (err)
2723		return -EINVAL;
2724
2725	return count;
2726}
2727
2728static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2729					struct device_attribute *attr,
2730					char *buf)
2731{
2732	struct amdgpu_device *adev = dev_get_drvdata(dev);
2733	u32 vddgfx;
2734	int r;
2735
2736	/* get the voltage */
2737	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
2738				   (void *)&vddgfx);
2739	if (r)
2740		return r;
2741
2742	return sysfs_emit(buf, "%d\n", vddgfx);
2743}
2744
2745static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2746					      struct device_attribute *attr,
2747					      char *buf)
2748{
2749	return sysfs_emit(buf, "vddgfx\n");
2750}
2751
2752static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2753				       struct device_attribute *attr,
2754				       char *buf)
2755{
2756	struct amdgpu_device *adev = dev_get_drvdata(dev);
2757	u32 vddnb;
2758	int r;
2759
2760	/* only APUs have vddnb */
2761	if  (!(adev->flags & AMD_IS_APU))
2762		return -EINVAL;
2763
2764	/* get the voltage */
2765	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
2766				   (void *)&vddnb);
2767	if (r)
2768		return r;
2769
2770	return sysfs_emit(buf, "%d\n", vddnb);
2771}
2772
2773static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2774					      struct device_attribute *attr,
2775					      char *buf)
2776{
2777	return sysfs_emit(buf, "vddnb\n");
2778}
2779
2780static unsigned int amdgpu_hwmon_get_power(struct device *dev,
2781					   enum amd_pp_sensors sensor)
2782{
2783	struct amdgpu_device *adev = dev_get_drvdata(dev);
2784	unsigned int uw;
2785	u32 query = 0;
2786	int r;
2787
2788	r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
2789	if (r)
2790		return r;
2791
2792	/* convert to microwatts */
2793	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2794
2795	return uw;
2796}
2797
2798static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2799					   struct device_attribute *attr,
2800					   char *buf)
2801{
2802	unsigned int val;
2803
2804	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
2805	if (val < 0)
2806		return val;
2807
2808	return sysfs_emit(buf, "%u\n", val);
2809}
2810
2811static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
2812					     struct device_attribute *attr,
2813					     char *buf)
2814{
2815	unsigned int val;
2816
2817	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
2818	if (val < 0)
2819		return val;
2820
2821	return sysfs_emit(buf, "%u\n", val);
2822}
2823
2824static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2825					 struct device_attribute *attr,
2826					 char *buf)
2827{
2828	return sysfs_emit(buf, "%i\n", 0);
2829}
2830
2831
2832static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2833					struct device_attribute *attr,
2834					char *buf,
2835					enum pp_power_limit_level pp_limit_level)
2836{
2837	struct amdgpu_device *adev = dev_get_drvdata(dev);
2838	enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2839	uint32_t limit;
2840	ssize_t size;
2841	int r;
2842
2843	if (amdgpu_in_reset(adev))
2844		return -EPERM;
2845	if (adev->in_suspend && !adev->in_runpm)
2846		return -EPERM;
2847
2848	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2849	if (r < 0) {
2850		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2851		return r;
2852	}
2853
2854	r = amdgpu_dpm_get_power_limit(adev, &limit,
2855				      pp_limit_level, power_type);
2856
2857	if (!r)
2858		size = sysfs_emit(buf, "%u\n", limit * 1000000);
2859	else
2860		size = sysfs_emit(buf, "\n");
2861
2862	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2863	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2864
2865	return size;
2866}
2867
2868
2869static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2870					 struct device_attribute *attr,
2871					 char *buf)
2872{
2873	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
2874
2875}
2876
2877static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2878					 struct device_attribute *attr,
2879					 char *buf)
2880{
2881	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
2882
2883}
2884
2885static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
2886					 struct device_attribute *attr,
2887					 char *buf)
2888{
2889	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
2890
2891}
2892
2893static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
2894					 struct device_attribute *attr,
2895					 char *buf)
2896{
2897	struct amdgpu_device *adev = dev_get_drvdata(dev);
2898	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
2899
2900	if (gc_ver == IP_VERSION(10, 3, 1))
2901		return sysfs_emit(buf, "%s\n",
2902				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
2903				  "fastPPT" : "slowPPT");
2904	else
2905		return sysfs_emit(buf, "PPT\n");
2906}
2907
2908static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2909		struct device_attribute *attr,
2910		const char *buf,
2911		size_t count)
2912{
2913	struct amdgpu_device *adev = dev_get_drvdata(dev);
2914	int limit_type = to_sensor_dev_attr(attr)->index;
2915	int err;
2916	u32 value;
2917
2918	if (amdgpu_in_reset(adev))
2919		return -EPERM;
2920	if (adev->in_suspend && !adev->in_runpm)
2921		return -EPERM;
2922
2923	if (amdgpu_sriov_vf(adev))
2924		return -EINVAL;
2925
2926	err = kstrtou32(buf, 10, &value);
2927	if (err)
2928		return err;
2929
2930	value = value / 1000000; /* convert to Watt */
2931	value |= limit_type << 24;
2932
2933	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2934	if (err < 0) {
2935		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2936		return err;
2937	}
2938
2939	err = amdgpu_dpm_set_power_limit(adev, value);
2940
2941	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2942	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2943
2944	if (err)
2945		return err;
2946
2947	return count;
2948}
2949
2950static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2951				      struct device_attribute *attr,
2952				      char *buf)
2953{
2954	struct amdgpu_device *adev = dev_get_drvdata(dev);
2955	uint32_t sclk;
2956	int r;
2957
2958	/* get the sclk */
2959	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2960				   (void *)&sclk);
2961	if (r)
2962		return r;
2963
2964	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
2965}
2966
2967static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2968					    struct device_attribute *attr,
2969					    char *buf)
2970{
2971	return sysfs_emit(buf, "sclk\n");
2972}
2973
2974static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2975				      struct device_attribute *attr,
2976				      char *buf)
2977{
2978	struct amdgpu_device *adev = dev_get_drvdata(dev);
2979	uint32_t mclk;
2980	int r;
2981
2982	/* get the sclk */
2983	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2984				   (void *)&mclk);
2985	if (r)
2986		return r;
2987
2988	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
2989}
2990
2991static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2992					    struct device_attribute *attr,
2993					    char *buf)
2994{
2995	return sysfs_emit(buf, "mclk\n");
2996}
2997
2998/**
2999 * DOC: hwmon
3000 *
3001 * The amdgpu driver exposes the following sensor interfaces:
3002 *
3003 * - GPU temperature (via the on-die sensor)
3004 *
3005 * - GPU voltage
3006 *
3007 * - Northbridge voltage (APUs only)
3008 *
3009 * - GPU power
3010 *
3011 * - GPU fan
3012 *
3013 * - GPU gfx/compute engine clock
3014 *
3015 * - GPU memory clock (dGPU only)
3016 *
3017 * hwmon interfaces for GPU temperature:
3018 *
3019 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3020 *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3021 *
3022 * - temp[1-3]_label: temperature channel label
3023 *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3024 *
3025 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3026 *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3027 *
3028 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3029 *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3030 *
3031 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3032 *   - these are supported on SOC15 dGPUs only
3033 *
3034 * hwmon interfaces for GPU voltage:
3035 *
3036 * - in0_input: the voltage on the GPU in millivolts
3037 *
3038 * - in1_input: the voltage on the Northbridge in millivolts
3039 *
3040 * hwmon interfaces for GPU power:
3041 *
3042 * - power1_average: average power used by the SoC in microWatts.  On APUs this includes the CPU.
3043 *
3044 * - power1_input: instantaneous power used by the SoC in microWatts.  On APUs this includes the CPU.
3045 *
3046 * - power1_cap_min: minimum cap supported in microWatts
3047 *
3048 * - power1_cap_max: maximum cap supported in microWatts
3049 *
3050 * - power1_cap: selected power cap in microWatts
3051 *
3052 * hwmon interfaces for GPU fan:
3053 *
3054 * - pwm1: pulse width modulation fan level (0-255)
3055 *
3056 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3057 *
3058 * - pwm1_min: pulse width modulation fan control minimum level (0)
3059 *
3060 * - pwm1_max: pulse width modulation fan control maximum level (255)
3061 *
3062 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3063 *
3064 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3065 *
3066 * - fan1_input: fan speed in RPM
3067 *
3068 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3069 *
3070 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3071 *
3072 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3073 *       That will get the former one overridden.
3074 *
3075 * hwmon interfaces for GPU clocks:
3076 *
3077 * - freq1_input: the gfx/compute clock in hertz
3078 *
3079 * - freq2_input: the memory clock in hertz
3080 *
3081 * You can use hwmon tools like sensors to view this information on your system.
3082 *
3083 */
3084
3085static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3086static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3087static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3088static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3089static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3090static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3091static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3092static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3093static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3094static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3095static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3096static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3097static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3098static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3099static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3100static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3101static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3102static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3103static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3104static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3105static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3106static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3107static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3108static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3109static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3110static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3111static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3112static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3113static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3114static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3115static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3116static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3117static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3118static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3119static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3120static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3121static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3122static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3123static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3124static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3125static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3126static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3127static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3128static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3129static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3130
3131static struct attribute *hwmon_attributes[] = {
3132	&sensor_dev_attr_temp1_input.dev_attr.attr,
3133	&sensor_dev_attr_temp1_crit.dev_attr.attr,
3134	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3135	&sensor_dev_attr_temp2_input.dev_attr.attr,
3136	&sensor_dev_attr_temp2_crit.dev_attr.attr,
3137	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3138	&sensor_dev_attr_temp3_input.dev_attr.attr,
3139	&sensor_dev_attr_temp3_crit.dev_attr.attr,
3140	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3141	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3142	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3143	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3144	&sensor_dev_attr_temp1_label.dev_attr.attr,
3145	&sensor_dev_attr_temp2_label.dev_attr.attr,
3146	&sensor_dev_attr_temp3_label.dev_attr.attr,
3147	&sensor_dev_attr_pwm1.dev_attr.attr,
3148	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3149	&sensor_dev_attr_pwm1_min.dev_attr.attr,
3150	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3151	&sensor_dev_attr_fan1_input.dev_attr.attr,
3152	&sensor_dev_attr_fan1_min.dev_attr.attr,
3153	&sensor_dev_attr_fan1_max.dev_attr.attr,
3154	&sensor_dev_attr_fan1_target.dev_attr.attr,
3155	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3156	&sensor_dev_attr_in0_input.dev_attr.attr,
3157	&sensor_dev_attr_in0_label.dev_attr.attr,
3158	&sensor_dev_attr_in1_input.dev_attr.attr,
3159	&sensor_dev_attr_in1_label.dev_attr.attr,
3160	&sensor_dev_attr_power1_average.dev_attr.attr,
3161	&sensor_dev_attr_power1_input.dev_attr.attr,
3162	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3163	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3164	&sensor_dev_attr_power1_cap.dev_attr.attr,
3165	&sensor_dev_attr_power1_cap_default.dev_attr.attr,
3166	&sensor_dev_attr_power1_label.dev_attr.attr,
3167	&sensor_dev_attr_power2_average.dev_attr.attr,
3168	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
3169	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
3170	&sensor_dev_attr_power2_cap.dev_attr.attr,
3171	&sensor_dev_attr_power2_cap_default.dev_attr.attr,
3172	&sensor_dev_attr_power2_label.dev_attr.attr,
3173	&sensor_dev_attr_freq1_input.dev_attr.attr,
3174	&sensor_dev_attr_freq1_label.dev_attr.attr,
3175	&sensor_dev_attr_freq2_input.dev_attr.attr,
3176	&sensor_dev_attr_freq2_label.dev_attr.attr,
3177	NULL
3178};
3179
3180static umode_t hwmon_attributes_visible(struct kobject *kobj,
3181					struct attribute *attr, int index)
3182{
3183	struct device *dev = kobj_to_dev(kobj);
3184	struct amdgpu_device *adev = dev_get_drvdata(dev);
3185	umode_t effective_mode = attr->mode;
3186	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3187	uint32_t tmp;
3188
3189	/* under multi-vf mode, the hwmon attributes are all not supported */
3190	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3191		return 0;
3192
3193	/* under pp one vf mode manage of hwmon attributes is not supported */
3194	if (amdgpu_sriov_is_pp_one_vf(adev))
3195		effective_mode &= ~S_IWUSR;
3196
3197	/* Skip fan attributes if fan is not present */
3198	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3199	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3200	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3201	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3202	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3203	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3204	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3205	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3206	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3207		return 0;
3208
3209	/* Skip fan attributes on APU */
3210	if ((adev->flags & AMD_IS_APU) &&
3211	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3212	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3213	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3214	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3215	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3216	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3217	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3218	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3219	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3220		return 0;
3221
3222	/* Skip crit temp on APU */
3223	if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3224	    (gc_ver == IP_VERSION(9, 4, 3))) &&
3225	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3226	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3227		return 0;
3228
3229	/* Skip limit attributes if DPM is not enabled */
3230	if (!adev->pm.dpm_enabled &&
3231	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3232	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3233	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3234	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3235	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3236	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3237	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3238	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3239	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3240	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3241	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3242		return 0;
3243
3244	/* mask fan attributes if we have no bindings for this asic to expose */
3245	if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3246	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3247	    ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3248	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3249		effective_mode &= ~S_IRUGO;
3250
3251	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3252	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3253	      ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3254	      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3255		effective_mode &= ~S_IWUSR;
3256
3257	/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3258	if (((adev->family == AMDGPU_FAMILY_SI) ||
3259	     ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3260	      (gc_ver != IP_VERSION(9, 4, 3)))) &&
3261	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3262	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3263	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3264	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3265		return 0;
3266
3267	/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3268	if (((adev->family == AMDGPU_FAMILY_SI) ||
3269	     ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3270	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3271		return 0;
3272
3273	/* not all products support both average and instantaneous */
3274	if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3275	    amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3276		return 0;
3277	if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3278	    amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3279		return 0;
3280
3281	/* hide max/min values if we can't both query and manage the fan */
3282	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3283	      (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3284	      (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3285	      (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3286	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3287	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3288		return 0;
3289
3290	if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3291	     (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3292	     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3293	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3294		return 0;
3295
3296	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
3297	     adev->family == AMDGPU_FAMILY_KV ||	/* not implemented yet */
3298	     (gc_ver == IP_VERSION(9, 4, 3))) &&
3299	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3300	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3301		return 0;
3302
3303	/* only APUs other than gc 9,4,3 have vddnb */
3304	if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
3305	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3306	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3307		return 0;
3308
3309	/* no mclk on APUs other than gc 9,4,3*/
3310	if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3311	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3312	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3313		return 0;
3314
3315	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3316	    (gc_ver != IP_VERSION(9, 4, 3)) &&
3317	    (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3318	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3319	     attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3320	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3321	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3322	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3323		return 0;
3324
3325	/* hotspot temperature for gc 9,4,3*/
3326	if ((gc_ver == IP_VERSION(9, 4, 3)) &&
3327	    (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3328	     attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
3329		return 0;
3330
3331	/* only SOC15 dGPUs support hotspot and mem temperatures */
3332	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
3333	    (gc_ver == IP_VERSION(9, 4, 3))) &&
3334	     (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3335	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3336	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3337	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3338	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3339		return 0;
3340
3341	/* only Vangogh has fast PPT limit and power labels */
3342	if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3343	    (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3344	     attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3345	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3346	     attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3347	     attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3348	     attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3349		return 0;
3350
3351	return effective_mode;
3352}
3353
3354static const struct attribute_group hwmon_attrgroup = {
3355	.attrs = hwmon_attributes,
3356	.is_visible = hwmon_attributes_visible,
3357};
3358
3359static const struct attribute_group *hwmon_groups[] = {
3360	&hwmon_attrgroup,
3361	NULL
3362};
3363
3364int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3365{
3366	int ret;
3367	uint32_t mask = 0;
3368
3369	if (adev->pm.sysfs_initialized)
3370		return 0;
3371
3372	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3373
3374	if (adev->pm.dpm_enabled == 0)
3375		return 0;
3376
3377	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3378								   DRIVER_NAME, adev,
3379								   hwmon_groups);
3380	if (IS_ERR(adev->pm.int_hwmon_dev)) {
3381		ret = PTR_ERR(adev->pm.int_hwmon_dev);
3382		dev_err(adev->dev,
3383			"Unable to register hwmon device: %d\n", ret);
3384		return ret;
3385	}
3386
3387	switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3388	case SRIOV_VF_MODE_ONE_VF:
3389		mask = ATTR_FLAG_ONEVF;
3390		break;
3391	case SRIOV_VF_MODE_MULTI_VF:
3392		mask = 0;
3393		break;
3394	case SRIOV_VF_MODE_BARE_METAL:
3395	default:
3396		mask = ATTR_FLAG_MASK_ALL;
3397		break;
3398	}
3399
3400	ret = amdgpu_device_attr_create_groups(adev,
3401					       amdgpu_device_attrs,
3402					       ARRAY_SIZE(amdgpu_device_attrs),
3403					       mask,
3404					       &adev->pm.pm_attr_list);
3405	if (ret)
3406		return ret;
3407
3408	adev->pm.sysfs_initialized = true;
3409
3410	return 0;
3411}
3412
3413void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3414{
3415	if (adev->pm.int_hwmon_dev)
3416		hwmon_device_unregister(adev->pm.int_hwmon_dev);
3417
3418	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3419}
3420
3421/*
3422 * Debugfs info
3423 */
3424#if defined(CONFIG_DEBUG_FS)
3425
3426static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3427					   struct amdgpu_device *adev)
3428{
3429	uint16_t *p_val;
3430	uint32_t size;
3431	int i;
3432	uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
3433
3434	if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
3435		p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
3436				GFP_KERNEL);
3437
3438		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3439					    (void *)p_val, &size)) {
3440			for (i = 0; i < num_cpu_cores; i++)
3441				seq_printf(m, "\t%u MHz (CPU%d)\n",
3442					   *(p_val + i), i);
3443		}
3444
3445		kfree(p_val);
3446	}
3447}
3448
3449static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3450{
3451	uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
3452	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3453	uint32_t value;
3454	uint64_t value64 = 0;
3455	uint32_t query = 0;
3456	int size;
3457
3458	/* GPU Clocks */
3459	size = sizeof(value);
3460	seq_printf(m, "GFX Clocks and Power:\n");
3461
3462	amdgpu_debugfs_prints_cpu_info(m, adev);
3463
3464	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3465		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3466	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3467		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3468	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3469		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3470	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3471		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3472	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3473		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3474	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3475		seq_printf(m, "\t%u mV (VDDNB)\n", value);
3476	size = sizeof(uint32_t);
3477	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
3478		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3479	size = sizeof(uint32_t);
3480	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
3481		seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
3482	size = sizeof(value);
3483	seq_printf(m, "\n");
3484
3485	/* GPU Temp */
3486	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3487		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3488
3489	/* GPU Load */
3490	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3491		seq_printf(m, "GPU Load: %u %%\n", value);
3492	/* MEM Load */
3493	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3494		seq_printf(m, "MEM Load: %u %%\n", value);
3495
3496	seq_printf(m, "\n");
3497
3498	/* SMC feature mask */
3499	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3500		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3501
3502	/* ASICs greater than CHIP_VEGA20 supports these sensors */
3503	if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
3504		/* VCN clocks */
3505		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3506			if (!value) {
3507				seq_printf(m, "VCN: Disabled\n");
3508			} else {
3509				seq_printf(m, "VCN: Enabled\n");
3510				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3511					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3512				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3513					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3514			}
3515		}
3516		seq_printf(m, "\n");
3517	} else {
3518		/* UVD clocks */
3519		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3520			if (!value) {
3521				seq_printf(m, "UVD: Disabled\n");
3522			} else {
3523				seq_printf(m, "UVD: Enabled\n");
3524				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3525					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3526				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3527					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3528			}
3529		}
3530		seq_printf(m, "\n");
3531
3532		/* VCE clocks */
3533		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3534			if (!value) {
3535				seq_printf(m, "VCE: Disabled\n");
3536			} else {
3537				seq_printf(m, "VCE: Enabled\n");
3538				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3539					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3540			}
3541		}
3542	}
3543
3544	return 0;
3545}
3546
3547static const struct cg_flag_name clocks[] = {
3548	{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
3549	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
3550	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
3551	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
3552	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
3553	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
3554	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
3555	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
3556	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
3557	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
3558	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
3559	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
3560	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
3561	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
3562	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
3563	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
3564	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
3565	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
3566	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
3567	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
3568	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
3569	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
3570	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
3571	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
3572	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
3573	{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
3574	{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
3575	{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
3576	{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
3577	{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
3578	{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
3579	{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
3580	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
3581	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
3582	{0, NULL},
3583};
3584
3585static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
3586{
3587	int i;
3588
3589	for (i = 0; clocks[i].flag; i++)
3590		seq_printf(m, "\t%s: %s\n", clocks[i].name,
3591			   (flags & clocks[i].flag) ? "On" : "Off");
3592}
3593
3594static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3595{
3596	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3597	struct drm_device *dev = adev_to_drm(adev);
3598	u64 flags = 0;
3599	int r;
3600
3601	if (amdgpu_in_reset(adev))
3602		return -EPERM;
3603	if (adev->in_suspend && !adev->in_runpm)
3604		return -EPERM;
3605
3606	r = pm_runtime_get_sync(dev->dev);
3607	if (r < 0) {
3608		pm_runtime_put_autosuspend(dev->dev);
3609		return r;
3610	}
3611
3612	if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
3613		r = amdgpu_debugfs_pm_info_pp(m, adev);
3614		if (r)
3615			goto out;
3616	}
3617
3618	amdgpu_device_ip_get_clockgating_state(adev, &flags);
3619
3620	seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
3621	amdgpu_parse_cg_state(m, flags);
3622	seq_printf(m, "\n");
3623
3624out:
3625	pm_runtime_mark_last_busy(dev->dev);
3626	pm_runtime_put_autosuspend(dev->dev);
3627
3628	return r;
3629}
3630
3631DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3632
3633/*
3634 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
3635 *
3636 * Reads debug memory region allocated to PMFW
3637 */
3638static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3639					 size_t size, loff_t *pos)
3640{
3641	struct amdgpu_device *adev = file_inode(f)->i_private;
3642	size_t smu_prv_buf_size;
3643	void *smu_prv_buf;
3644	int ret = 0;
3645
3646	if (amdgpu_in_reset(adev))
3647		return -EPERM;
3648	if (adev->in_suspend && !adev->in_runpm)
3649		return -EPERM;
3650
3651	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
3652	if (ret)
3653		return ret;
3654
3655	if (!smu_prv_buf || !smu_prv_buf_size)
3656		return -EINVAL;
3657
3658	return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3659				       smu_prv_buf_size);
3660}
3661
3662static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3663	.owner = THIS_MODULE,
3664	.open = simple_open,
3665	.read = amdgpu_pm_prv_buffer_read,
3666	.llseek = default_llseek,
3667};
3668
3669#endif
3670
3671void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3672{
3673#if defined(CONFIG_DEBUG_FS)
3674	struct drm_minor *minor = adev_to_drm(adev)->primary;
3675	struct dentry *root = minor->debugfs_root;
3676
3677	if (!adev->pm.dpm_enabled)
3678		return;
3679
3680	debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3681			    &amdgpu_debugfs_pm_info_fops);
3682
3683	if (adev->pm.smu_prv_buffer_size > 0)
3684		debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3685					 adev,
3686					 &amdgpu_debugfs_pm_prv_buffer_fops,
3687					 adev->pm.smu_prv_buffer_size);
3688
3689	amdgpu_dpm_stb_debug_fs_init(adev);
3690#endif
3691}
3692