cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_pm.c (104186B)


      1/*
      2 * Copyright 2017 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: Rafał Miłecki <zajec5@gmail.com>
     23 *          Alex Deucher <alexdeucher@gmail.com>
     24 */
     25
     26#include "amdgpu.h"
     27#include "amdgpu_drv.h"
     28#include "amdgpu_pm.h"
     29#include "amdgpu_dpm.h"
     30#include "atom.h"
     31#include <linux/pci.h>
     32#include <linux/hwmon.h>
     33#include <linux/hwmon-sysfs.h>
     34#include <linux/nospec.h>
     35#include <linux/pm_runtime.h>
     36#include <asm/processor.h>
     37
     38static const struct cg_flag_name clocks[] = {
     39	{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
     40	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
     41	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
     42	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
     43	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
     44	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
     45	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
     46	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
     47	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
     48	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
     49	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
     50	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
     51	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
     52	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
     53	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
     54	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
     55	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
     56	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
     57	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
     58	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
     59	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
     60	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
     61	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
     62	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
     63	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
     64	{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
     65	{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
     66	{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
     67	{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
     68	{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
     69	{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
     70	{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
     71	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
     72	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
     73	{0, NULL},
     74};
     75
     76static const struct hwmon_temp_label {
     77	enum PP_HWMON_TEMP channel;
     78	const char *label;
     79} temp_label[] = {
     80	{PP_TEMP_EDGE, "edge"},
     81	{PP_TEMP_JUNCTION, "junction"},
     82	{PP_TEMP_MEM, "mem"},
     83};
     84
     85const char * const amdgpu_pp_profile_name[] = {
     86	"BOOTUP_DEFAULT",
     87	"3D_FULL_SCREEN",
     88	"POWER_SAVING",
     89	"VIDEO",
     90	"VR",
     91	"COMPUTE",
     92	"CUSTOM",
     93	"WINDOW_3D",
     94};
     95
     96/**
     97 * DOC: power_dpm_state
     98 *
     99 * The power_dpm_state file is a legacy interface and is only provided for
    100 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
    101 * certain power related parameters.  The file power_dpm_state is used for this.
    102 * It accepts the following arguments:
    103 *
    104 * - battery
    105 *
    106 * - balanced
    107 *
    108 * - performance
    109 *
    110 * battery
    111 *
    112 * On older GPUs, the vbios provided a special power state for battery
    113 * operation.  Selecting battery switched to this state.  This is no
    114 * longer provided on newer GPUs so the option does nothing in that case.
    115 *
    116 * balanced
    117 *
    118 * On older GPUs, the vbios provided a special power state for balanced
    119 * operation.  Selecting balanced switched to this state.  This is no
    120 * longer provided on newer GPUs so the option does nothing in that case.
    121 *
    122 * performance
    123 *
    124 * On older GPUs, the vbios provided a special power state for performance
    125 * operation.  Selecting performance switched to this state.  This is no
    126 * longer provided on newer GPUs so the option does nothing in that case.
    127 *
    128 */
    129
    130static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
    131					  struct device_attribute *attr,
    132					  char *buf)
    133{
    134	struct drm_device *ddev = dev_get_drvdata(dev);
    135	struct amdgpu_device *adev = drm_to_adev(ddev);
    136	enum amd_pm_state_type pm;
    137	int ret;
    138
    139	if (amdgpu_in_reset(adev))
    140		return -EPERM;
    141	if (adev->in_suspend && !adev->in_runpm)
    142		return -EPERM;
    143
    144	ret = pm_runtime_get_sync(ddev->dev);
    145	if (ret < 0) {
    146		pm_runtime_put_autosuspend(ddev->dev);
    147		return ret;
    148	}
    149
    150	amdgpu_dpm_get_current_power_state(adev, &pm);
    151
    152	pm_runtime_mark_last_busy(ddev->dev);
    153	pm_runtime_put_autosuspend(ddev->dev);
    154
    155	return sysfs_emit(buf, "%s\n",
    156			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
    157			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
    158}
    159
    160static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
    161					  struct device_attribute *attr,
    162					  const char *buf,
    163					  size_t count)
    164{
    165	struct drm_device *ddev = dev_get_drvdata(dev);
    166	struct amdgpu_device *adev = drm_to_adev(ddev);
    167	enum amd_pm_state_type  state;
    168	int ret;
    169
    170	if (amdgpu_in_reset(adev))
    171		return -EPERM;
    172	if (adev->in_suspend && !adev->in_runpm)
    173		return -EPERM;
    174
    175	if (strncmp("battery", buf, strlen("battery")) == 0)
    176		state = POWER_STATE_TYPE_BATTERY;
    177	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
    178		state = POWER_STATE_TYPE_BALANCED;
    179	else if (strncmp("performance", buf, strlen("performance")) == 0)
    180		state = POWER_STATE_TYPE_PERFORMANCE;
    181	else
    182		return -EINVAL;
    183
    184	ret = pm_runtime_get_sync(ddev->dev);
    185	if (ret < 0) {
    186		pm_runtime_put_autosuspend(ddev->dev);
    187		return ret;
    188	}
    189
    190	amdgpu_dpm_set_power_state(adev, state);
    191
    192	pm_runtime_mark_last_busy(ddev->dev);
    193	pm_runtime_put_autosuspend(ddev->dev);
    194
    195	return count;
    196}
    197
    198
    199/**
    200 * DOC: power_dpm_force_performance_level
    201 *
    202 * The amdgpu driver provides a sysfs API for adjusting certain power
    203 * related parameters.  The file power_dpm_force_performance_level is
    204 * used for this.  It accepts the following arguments:
    205 *
    206 * - auto
    207 *
    208 * - low
    209 *
    210 * - high
    211 *
    212 * - manual
    213 *
    214 * - profile_standard
    215 *
    216 * - profile_min_sclk
    217 *
    218 * - profile_min_mclk
    219 *
    220 * - profile_peak
    221 *
    222 * auto
    223 *
    224 * When auto is selected, the driver will attempt to dynamically select
    225 * the optimal power profile for current conditions in the driver.
    226 *
    227 * low
    228 *
    229 * When low is selected, the clocks are forced to the lowest power state.
    230 *
    231 * high
    232 *
    233 * When high is selected, the clocks are forced to the highest power state.
    234 *
    235 * manual
    236 *
    237 * When manual is selected, the user can manually adjust which power states
    238 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
    239 * and pp_dpm_pcie files and adjust the power state transition heuristics
    240 * via the pp_power_profile_mode sysfs file.
    241 *
    242 * profile_standard
    243 * profile_min_sclk
    244 * profile_min_mclk
    245 * profile_peak
    246 *
    247 * When the profiling modes are selected, clock and power gating are
    248 * disabled and the clocks are set for different profiling cases. This
    249 * mode is recommended for profiling specific work loads where you do
    250 * not want clock or power gating for clock fluctuation to interfere
    251 * with your results. profile_standard sets the clocks to a fixed clock
    252 * level which varies from asic to asic.  profile_min_sclk forces the sclk
    253 * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
    254 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
    255 *
    256 */
    257
    258static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
    259							    struct device_attribute *attr,
    260							    char *buf)
    261{
    262	struct drm_device *ddev = dev_get_drvdata(dev);
    263	struct amdgpu_device *adev = drm_to_adev(ddev);
    264	enum amd_dpm_forced_level level = 0xff;
    265	int ret;
    266
    267	if (amdgpu_in_reset(adev))
    268		return -EPERM;
    269	if (adev->in_suspend && !adev->in_runpm)
    270		return -EPERM;
    271
    272	ret = pm_runtime_get_sync(ddev->dev);
    273	if (ret < 0) {
    274		pm_runtime_put_autosuspend(ddev->dev);
    275		return ret;
    276	}
    277
    278	level = amdgpu_dpm_get_performance_level(adev);
    279
    280	pm_runtime_mark_last_busy(ddev->dev);
    281	pm_runtime_put_autosuspend(ddev->dev);
    282
    283	return sysfs_emit(buf, "%s\n",
    284			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
    285			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
    286			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
    287			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
    288			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
    289			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
    290			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
    291			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
    292			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
    293			  "unknown");
    294}
    295
    296static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
    297							    struct device_attribute *attr,
    298							    const char *buf,
    299							    size_t count)
    300{
    301	struct drm_device *ddev = dev_get_drvdata(dev);
    302	struct amdgpu_device *adev = drm_to_adev(ddev);
    303	enum amd_dpm_forced_level level;
    304	int ret = 0;
    305
    306	if (amdgpu_in_reset(adev))
    307		return -EPERM;
    308	if (adev->in_suspend && !adev->in_runpm)
    309		return -EPERM;
    310
    311	if (strncmp("low", buf, strlen("low")) == 0) {
    312		level = AMD_DPM_FORCED_LEVEL_LOW;
    313	} else if (strncmp("high", buf, strlen("high")) == 0) {
    314		level = AMD_DPM_FORCED_LEVEL_HIGH;
    315	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
    316		level = AMD_DPM_FORCED_LEVEL_AUTO;
    317	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
    318		level = AMD_DPM_FORCED_LEVEL_MANUAL;
    319	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
    320		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
    321	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
    322		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
    323	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
    324		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
    325	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
    326		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
    327	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
    328		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
    329	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
    330		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
    331	}  else {
    332		return -EINVAL;
    333	}
    334
    335	ret = pm_runtime_get_sync(ddev->dev);
    336	if (ret < 0) {
    337		pm_runtime_put_autosuspend(ddev->dev);
    338		return ret;
    339	}
    340
    341	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
    342	if (amdgpu_dpm_force_performance_level(adev, level)) {
    343		pm_runtime_mark_last_busy(ddev->dev);
    344		pm_runtime_put_autosuspend(ddev->dev);
    345		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
    346		return -EINVAL;
    347	}
    348	/* override whatever a user ctx may have set */
    349	adev->pm.stable_pstate_ctx = NULL;
    350	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
    351
    352	pm_runtime_mark_last_busy(ddev->dev);
    353	pm_runtime_put_autosuspend(ddev->dev);
    354
    355	return count;
    356}
    357
    358static ssize_t amdgpu_get_pp_num_states(struct device *dev,
    359		struct device_attribute *attr,
    360		char *buf)
    361{
    362	struct drm_device *ddev = dev_get_drvdata(dev);
    363	struct amdgpu_device *adev = drm_to_adev(ddev);
    364	struct pp_states_info data;
    365	uint32_t i;
    366	int buf_len, ret;
    367
    368	if (amdgpu_in_reset(adev))
    369		return -EPERM;
    370	if (adev->in_suspend && !adev->in_runpm)
    371		return -EPERM;
    372
    373	ret = pm_runtime_get_sync(ddev->dev);
    374	if (ret < 0) {
    375		pm_runtime_put_autosuspend(ddev->dev);
    376		return ret;
    377	}
    378
    379	if (amdgpu_dpm_get_pp_num_states(adev, &data))
    380		memset(&data, 0, sizeof(data));
    381
    382	pm_runtime_mark_last_busy(ddev->dev);
    383	pm_runtime_put_autosuspend(ddev->dev);
    384
    385	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
    386	for (i = 0; i < data.nums; i++)
    387		buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
    388				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
    389				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
    390				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
    391				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
    392
    393	return buf_len;
    394}
    395
    396static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
    397		struct device_attribute *attr,
    398		char *buf)
    399{
    400	struct drm_device *ddev = dev_get_drvdata(dev);
    401	struct amdgpu_device *adev = drm_to_adev(ddev);
    402	struct pp_states_info data = {0};
    403	enum amd_pm_state_type pm = 0;
    404	int i = 0, ret = 0;
    405
    406	if (amdgpu_in_reset(adev))
    407		return -EPERM;
    408	if (adev->in_suspend && !adev->in_runpm)
    409		return -EPERM;
    410
    411	ret = pm_runtime_get_sync(ddev->dev);
    412	if (ret < 0) {
    413		pm_runtime_put_autosuspend(ddev->dev);
    414		return ret;
    415	}
    416
    417	amdgpu_dpm_get_current_power_state(adev, &pm);
    418
    419	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
    420
    421	pm_runtime_mark_last_busy(ddev->dev);
    422	pm_runtime_put_autosuspend(ddev->dev);
    423
    424	if (ret)
    425		return ret;
    426
    427	for (i = 0; i < data.nums; i++) {
    428		if (pm == data.states[i])
    429			break;
    430	}
    431
    432	if (i == data.nums)
    433		i = -EINVAL;
    434
    435	return sysfs_emit(buf, "%d\n", i);
    436}
    437
    438static ssize_t amdgpu_get_pp_force_state(struct device *dev,
    439		struct device_attribute *attr,
    440		char *buf)
    441{
    442	struct drm_device *ddev = dev_get_drvdata(dev);
    443	struct amdgpu_device *adev = drm_to_adev(ddev);
    444
    445	if (amdgpu_in_reset(adev))
    446		return -EPERM;
    447	if (adev->in_suspend && !adev->in_runpm)
    448		return -EPERM;
    449
    450	if (adev->pm.pp_force_state_enabled)
    451		return amdgpu_get_pp_cur_state(dev, attr, buf);
    452	else
    453		return sysfs_emit(buf, "\n");
    454}
    455
    456static ssize_t amdgpu_set_pp_force_state(struct device *dev,
    457		struct device_attribute *attr,
    458		const char *buf,
    459		size_t count)
    460{
    461	struct drm_device *ddev = dev_get_drvdata(dev);
    462	struct amdgpu_device *adev = drm_to_adev(ddev);
    463	enum amd_pm_state_type state = 0;
    464	struct pp_states_info data;
    465	unsigned long idx;
    466	int ret;
    467
    468	if (amdgpu_in_reset(adev))
    469		return -EPERM;
    470	if (adev->in_suspend && !adev->in_runpm)
    471		return -EPERM;
    472
    473	adev->pm.pp_force_state_enabled = false;
    474
    475	if (strlen(buf) == 1)
    476		return count;
    477
    478	ret = kstrtoul(buf, 0, &idx);
    479	if (ret || idx >= ARRAY_SIZE(data.states))
    480		return -EINVAL;
    481
    482	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
    483
    484	ret = pm_runtime_get_sync(ddev->dev);
    485	if (ret < 0) {
    486		pm_runtime_put_autosuspend(ddev->dev);
    487		return ret;
    488	}
    489
    490	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
    491	if (ret)
    492		goto err_out;
    493
    494	state = data.states[idx];
    495
    496	/* only set user selected power states */
    497	if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
    498	    state != POWER_STATE_TYPE_DEFAULT) {
    499		ret = amdgpu_dpm_dispatch_task(adev,
    500				AMD_PP_TASK_ENABLE_USER_STATE, &state);
    501		if (ret)
    502			goto err_out;
    503
    504		adev->pm.pp_force_state_enabled = true;
    505	}
    506
    507	pm_runtime_mark_last_busy(ddev->dev);
    508	pm_runtime_put_autosuspend(ddev->dev);
    509
    510	return count;
    511
    512err_out:
    513	pm_runtime_mark_last_busy(ddev->dev);
    514	pm_runtime_put_autosuspend(ddev->dev);
    515	return ret;
    516}
    517
    518/**
    519 * DOC: pp_table
    520 *
    521 * The amdgpu driver provides a sysfs API for uploading new powerplay
    522 * tables.  The file pp_table is used for this.  Reading the file
    523 * will dump the current power play table.  Writing to the file
    524 * will attempt to upload a new powerplay table and re-initialize
    525 * powerplay using that new table.
    526 *
    527 */
    528
    529static ssize_t amdgpu_get_pp_table(struct device *dev,
    530		struct device_attribute *attr,
    531		char *buf)
    532{
    533	struct drm_device *ddev = dev_get_drvdata(dev);
    534	struct amdgpu_device *adev = drm_to_adev(ddev);
    535	char *table = NULL;
    536	int size, ret;
    537
    538	if (amdgpu_in_reset(adev))
    539		return -EPERM;
    540	if (adev->in_suspend && !adev->in_runpm)
    541		return -EPERM;
    542
    543	ret = pm_runtime_get_sync(ddev->dev);
    544	if (ret < 0) {
    545		pm_runtime_put_autosuspend(ddev->dev);
    546		return ret;
    547	}
    548
    549	size = amdgpu_dpm_get_pp_table(adev, &table);
    550
    551	pm_runtime_mark_last_busy(ddev->dev);
    552	pm_runtime_put_autosuspend(ddev->dev);
    553
    554	if (size <= 0)
    555		return size;
    556
    557	if (size >= PAGE_SIZE)
    558		size = PAGE_SIZE - 1;
    559
    560	memcpy(buf, table, size);
    561
    562	return size;
    563}
    564
    565static ssize_t amdgpu_set_pp_table(struct device *dev,
    566		struct device_attribute *attr,
    567		const char *buf,
    568		size_t count)
    569{
    570	struct drm_device *ddev = dev_get_drvdata(dev);
    571	struct amdgpu_device *adev = drm_to_adev(ddev);
    572	int ret = 0;
    573
    574	if (amdgpu_in_reset(adev))
    575		return -EPERM;
    576	if (adev->in_suspend && !adev->in_runpm)
    577		return -EPERM;
    578
    579	ret = pm_runtime_get_sync(ddev->dev);
    580	if (ret < 0) {
    581		pm_runtime_put_autosuspend(ddev->dev);
    582		return ret;
    583	}
    584
    585	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
    586
    587	pm_runtime_mark_last_busy(ddev->dev);
    588	pm_runtime_put_autosuspend(ddev->dev);
    589
    590	if (ret)
    591		return ret;
    592
    593	return count;
    594}
    595
    596/**
    597 * DOC: pp_od_clk_voltage
    598 *
    599 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
    600 * in each power level within a power state.  The pp_od_clk_voltage is used for
    601 * this.
    602 *
    603 * Note that the actual memory controller clock rate are exposed, not
    604 * the effective memory clock of the DRAMs. To translate it, use the
    605 * following formula:
    606 *
    607 * Clock conversion (Mhz):
    608 *
    609 * HBM: effective_memory_clock = memory_controller_clock * 1
    610 *
    611 * G5: effective_memory_clock = memory_controller_clock * 1
    612 *
    613 * G6: effective_memory_clock = memory_controller_clock * 2
    614 *
    615 * DRAM data rate (MT/s):
    616 *
    617 * HBM: effective_memory_clock * 2 = data_rate
    618 *
    619 * G5: effective_memory_clock * 4 = data_rate
    620 *
    621 * G6: effective_memory_clock * 8 = data_rate
    622 *
    623 * Bandwidth (MB/s):
    624 *
    625 * data_rate * vram_bit_width / 8 = memory_bandwidth
    626 *
    627 * Some examples:
    628 *
    629 * G5 on RX460:
    630 *
    631 * memory_controller_clock = 1750 Mhz
    632 *
    633 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
    634 *
    635 * data rate = 1750 * 4 = 7000 MT/s
    636 *
    637 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
    638 *
    639 * G6 on RX5700:
    640 *
    641 * memory_controller_clock = 875 Mhz
    642 *
    643 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
    644 *
    645 * data rate = 1750 * 8 = 14000 MT/s
    646 *
    647 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
    648 *
    649 * < For Vega10 and previous ASICs >
    650 *
    651 * Reading the file will display:
    652 *
    653 * - a list of engine clock levels and voltages labeled OD_SCLK
    654 *
    655 * - a list of memory clock levels and voltages labeled OD_MCLK
    656 *
    657 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
    658 *
    659 * To manually adjust these settings, first select manual using
    660 * power_dpm_force_performance_level. Enter a new value for each
    661 * level by writing a string that contains "s/m level clock voltage" to
    662 * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
    663 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
    664 * 810 mV.  When you have edited all of the states as needed, write
    665 * "c" (commit) to the file to commit your changes.  If you want to reset to the
    666 * default power levels, write "r" (reset) to the file to reset them.
    667 *
    668 *
    669 * < For Vega20 and newer ASICs >
    670 *
    671 * Reading the file will display:
    672 *
    673 * - minimum and maximum engine clock labeled OD_SCLK
    674 *
    675 * - minimum(not available for Vega20 and Navi1x) and maximum memory
    676 *   clock labeled OD_MCLK
    677 *
    678 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
    679 *   They can be used to calibrate the sclk voltage curve.
    680 *
    681 * - voltage offset(in mV) applied on target voltage calculation.
    682 *   This is available for Sienna Cichlid, Navy Flounder and Dimgrey
    683 *   Cavefish. For these ASICs, the target voltage calculation can be
    684 *   illustrated by "voltage = voltage calculated from v/f curve +
    685 *   overdrive vddgfx offset"
    686 *
    687 * - a list of valid ranges for sclk, mclk, and voltage curve points
    688 *   labeled OD_RANGE
    689 *
    690 * < For APUs >
    691 *
    692 * Reading the file will display:
    693 *
    694 * - minimum and maximum engine clock labeled OD_SCLK
    695 *
    696 * - a list of valid ranges for sclk labeled OD_RANGE
    697 *
    698 * < For VanGogh >
    699 *
    700 * Reading the file will display:
    701 *
    702 * - minimum and maximum engine clock labeled OD_SCLK
    703 * - minimum and maximum core clocks labeled OD_CCLK
    704 *
    705 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
    706 *
    707 * To manually adjust these settings:
    708 *
    709 * - First select manual using power_dpm_force_performance_level
    710 *
    711 * - For clock frequency setting, enter a new value by writing a
    712 *   string that contains "s/m index clock" to the file. The index
    713 *   should be 0 if to set minimum clock. And 1 if to set maximum
    714 *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
    715 *   "m 1 800" will update maximum mclk to be 800Mhz. For core
    716 *   clocks on VanGogh, the string contains "p core index clock".
    717 *   E.g., "p 2 0 800" would set the minimum core clock on core
    718 *   2 to 800Mhz.
    719 *
    720 *   For sclk voltage curve, enter the new values by writing a
    721 *   string that contains "vc point clock voltage" to the file. The
    722 *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
    723 *   update point1 with clock set as 300Mhz and voltage as
    724 *   600mV. "vc 2 1000 1000" will update point3 with clock set
    725 *   as 1000Mhz and voltage 1000mV.
    726 *
    727 *   To update the voltage offset applied for gfxclk/voltage calculation,
    728 *   enter the new value by writing a string that contains "vo offset".
    729 *   This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
    730 *   And the offset can be a positive or negative value.
    731 *
    732 * - When you have edited all of the states as needed, write "c" (commit)
    733 *   to the file to commit your changes
    734 *
    735 * - If you want to reset to the default power levels, write "r" (reset)
    736 *   to the file to reset them
    737 *
    738 */
    739
    740static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
    741		struct device_attribute *attr,
    742		const char *buf,
    743		size_t count)
    744{
    745	struct drm_device *ddev = dev_get_drvdata(dev);
    746	struct amdgpu_device *adev = drm_to_adev(ddev);
    747	int ret;
    748	uint32_t parameter_size = 0;
    749	long parameter[64];
    750	char buf_cpy[128];
    751	char *tmp_str;
    752	char *sub_str;
    753	const char delimiter[3] = {' ', '\n', '\0'};
    754	uint32_t type;
    755
    756	if (amdgpu_in_reset(adev))
    757		return -EPERM;
    758	if (adev->in_suspend && !adev->in_runpm)
    759		return -EPERM;
    760
    761	if (count > 127)
    762		return -EINVAL;
    763
    764	if (*buf == 's')
    765		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
    766	else if (*buf == 'p')
    767		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
    768	else if (*buf == 'm')
    769		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
    770	else if(*buf == 'r')
    771		type = PP_OD_RESTORE_DEFAULT_TABLE;
    772	else if (*buf == 'c')
    773		type = PP_OD_COMMIT_DPM_TABLE;
    774	else if (!strncmp(buf, "vc", 2))
    775		type = PP_OD_EDIT_VDDC_CURVE;
    776	else if (!strncmp(buf, "vo", 2))
    777		type = PP_OD_EDIT_VDDGFX_OFFSET;
    778	else
    779		return -EINVAL;
    780
    781	memcpy(buf_cpy, buf, count+1);
    782
    783	tmp_str = buf_cpy;
    784
    785	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
    786	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
    787		tmp_str++;
    788	while (isspace(*++tmp_str));
    789
    790	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
    791		if (strlen(sub_str) == 0)
    792			continue;
    793		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
    794		if (ret)
    795			return -EINVAL;
    796		parameter_size++;
    797
    798		while (isspace(*tmp_str))
    799			tmp_str++;
    800	}
    801
    802	ret = pm_runtime_get_sync(ddev->dev);
    803	if (ret < 0) {
    804		pm_runtime_put_autosuspend(ddev->dev);
    805		return ret;
    806	}
    807
    808	if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
    809					      type,
    810					      parameter,
    811					      parameter_size))
    812		goto err_out;
    813
    814	if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
    815					  parameter, parameter_size))
    816		goto err_out;
    817
    818	if (type == PP_OD_COMMIT_DPM_TABLE) {
    819		if (amdgpu_dpm_dispatch_task(adev,
    820					     AMD_PP_TASK_READJUST_POWER_STATE,
    821					     NULL))
    822			goto err_out;
    823	}
    824
    825	pm_runtime_mark_last_busy(ddev->dev);
    826	pm_runtime_put_autosuspend(ddev->dev);
    827
    828	return count;
    829
    830err_out:
    831	pm_runtime_mark_last_busy(ddev->dev);
    832	pm_runtime_put_autosuspend(ddev->dev);
    833	return -EINVAL;
    834}
    835
    836static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
    837		struct device_attribute *attr,
    838		char *buf)
    839{
    840	struct drm_device *ddev = dev_get_drvdata(dev);
    841	struct amdgpu_device *adev = drm_to_adev(ddev);
    842	int size = 0;
    843	int ret;
    844	enum pp_clock_type od_clocks[6] = {
    845		OD_SCLK,
    846		OD_MCLK,
    847		OD_VDDC_CURVE,
    848		OD_RANGE,
    849		OD_VDDGFX_OFFSET,
    850		OD_CCLK,
    851	};
    852	uint clk_index;
    853
    854	if (amdgpu_in_reset(adev))
    855		return -EPERM;
    856	if (adev->in_suspend && !adev->in_runpm)
    857		return -EPERM;
    858
    859	ret = pm_runtime_get_sync(ddev->dev);
    860	if (ret < 0) {
    861		pm_runtime_put_autosuspend(ddev->dev);
    862		return ret;
    863	}
    864
    865	for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
    866		ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
    867		if (ret)
    868			break;
    869	}
    870	if (ret == -ENOENT) {
    871		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
    872		if (size > 0) {
    873			size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
    874			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
    875			size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
    876			size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
    877			size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
    878		}
    879	}
    880
    881	if (size == 0)
    882		size = sysfs_emit(buf, "\n");
    883
    884	pm_runtime_mark_last_busy(ddev->dev);
    885	pm_runtime_put_autosuspend(ddev->dev);
    886
    887	return size;
    888}
    889
    890/**
    891 * DOC: pp_features
    892 *
    893 * The amdgpu driver provides a sysfs API for adjusting what powerplay
    894 * features to be enabled. The file pp_features is used for this. And
    895 * this is only available for Vega10 and later dGPUs.
    896 *
    897 * Reading back the file will show you the followings:
    898 * - Current ppfeature masks
    899 * - List of the all supported powerplay features with their naming,
    900 *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
    901 *
    902 * To manually enable or disable a specific feature, just set or clear
    903 * the corresponding bit from original ppfeature masks and input the
    904 * new ppfeature masks.
    905 */
    906static ssize_t amdgpu_set_pp_features(struct device *dev,
    907				      struct device_attribute *attr,
    908				      const char *buf,
    909				      size_t count)
    910{
    911	struct drm_device *ddev = dev_get_drvdata(dev);
    912	struct amdgpu_device *adev = drm_to_adev(ddev);
    913	uint64_t featuremask;
    914	int ret;
    915
    916	if (amdgpu_in_reset(adev))
    917		return -EPERM;
    918	if (adev->in_suspend && !adev->in_runpm)
    919		return -EPERM;
    920
    921	ret = kstrtou64(buf, 0, &featuremask);
    922	if (ret)
    923		return -EINVAL;
    924
    925	ret = pm_runtime_get_sync(ddev->dev);
    926	if (ret < 0) {
    927		pm_runtime_put_autosuspend(ddev->dev);
    928		return ret;
    929	}
    930
    931	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
    932
    933	pm_runtime_mark_last_busy(ddev->dev);
    934	pm_runtime_put_autosuspend(ddev->dev);
    935
    936	if (ret)
    937		return -EINVAL;
    938
    939	return count;
    940}
    941
    942static ssize_t amdgpu_get_pp_features(struct device *dev,
    943				      struct device_attribute *attr,
    944				      char *buf)
    945{
    946	struct drm_device *ddev = dev_get_drvdata(dev);
    947	struct amdgpu_device *adev = drm_to_adev(ddev);
    948	ssize_t size;
    949	int ret;
    950
    951	if (amdgpu_in_reset(adev))
    952		return -EPERM;
    953	if (adev->in_suspend && !adev->in_runpm)
    954		return -EPERM;
    955
    956	ret = pm_runtime_get_sync(ddev->dev);
    957	if (ret < 0) {
    958		pm_runtime_put_autosuspend(ddev->dev);
    959		return ret;
    960	}
    961
    962	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
    963	if (size <= 0)
    964		size = sysfs_emit(buf, "\n");
    965
    966	pm_runtime_mark_last_busy(ddev->dev);
    967	pm_runtime_put_autosuspend(ddev->dev);
    968
    969	return size;
    970}
    971
    972/**
    973 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
    974 *
    975 * The amdgpu driver provides a sysfs API for adjusting what power levels
    976 * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
    977 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
    978 * this.
    979 *
    980 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
    981 * Vega10 and later ASICs.
    982 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
    983 *
    984 * Reading back the files will show you the available power levels within
    985 * the power state and the clock information for those levels.
    986 *
    987 * To manually adjust these states, first select manual using
    988 * power_dpm_force_performance_level.
    989 * Secondly, enter a new value for each level by inputing a string that
    990 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
    991 * E.g.,
    992 *
    993 * .. code-block:: bash
    994 *
    995 *	echo "4 5 6" > pp_dpm_sclk
    996 *
    997 * will enable sclk levels 4, 5, and 6.
    998 *
    999 * NOTE: change to the dcefclk max dpm level is not supported now
   1000 */
   1001
   1002static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
   1003		enum pp_clock_type type,
   1004		char *buf)
   1005{
   1006	struct drm_device *ddev = dev_get_drvdata(dev);
   1007	struct amdgpu_device *adev = drm_to_adev(ddev);
   1008	int size = 0;
   1009	int ret = 0;
   1010
   1011	if (amdgpu_in_reset(adev))
   1012		return -EPERM;
   1013	if (adev->in_suspend && !adev->in_runpm)
   1014		return -EPERM;
   1015
   1016	ret = pm_runtime_get_sync(ddev->dev);
   1017	if (ret < 0) {
   1018		pm_runtime_put_autosuspend(ddev->dev);
   1019		return ret;
   1020	}
   1021
   1022	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
   1023	if (ret == -ENOENT)
   1024		size = amdgpu_dpm_print_clock_levels(adev, type, buf);
   1025
   1026	if (size == 0)
   1027		size = sysfs_emit(buf, "\n");
   1028
   1029	pm_runtime_mark_last_busy(ddev->dev);
   1030	pm_runtime_put_autosuspend(ddev->dev);
   1031
   1032	return size;
   1033}
   1034
   1035/*
   1036 * Worst case: 32 bits individually specified, in octal at 12 characters
   1037 * per line (+1 for \n).
   1038 */
   1039#define AMDGPU_MASK_BUF_MAX	(32 * 13)
   1040
   1041static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
   1042{
   1043	int ret;
   1044	unsigned long level;
   1045	char *sub_str = NULL;
   1046	char *tmp;
   1047	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
   1048	const char delimiter[3] = {' ', '\n', '\0'};
   1049	size_t bytes;
   1050
   1051	*mask = 0;
   1052
   1053	bytes = min(count, sizeof(buf_cpy) - 1);
   1054	memcpy(buf_cpy, buf, bytes);
   1055	buf_cpy[bytes] = '\0';
   1056	tmp = buf_cpy;
   1057	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
   1058		if (strlen(sub_str)) {
   1059			ret = kstrtoul(sub_str, 0, &level);
   1060			if (ret || level > 31)
   1061				return -EINVAL;
   1062			*mask |= 1 << level;
   1063		} else
   1064			break;
   1065	}
   1066
   1067	return 0;
   1068}
   1069
   1070static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
   1071		enum pp_clock_type type,
   1072		const char *buf,
   1073		size_t count)
   1074{
   1075	struct drm_device *ddev = dev_get_drvdata(dev);
   1076	struct amdgpu_device *adev = drm_to_adev(ddev);
   1077	int ret;
   1078	uint32_t mask = 0;
   1079
   1080	if (amdgpu_in_reset(adev))
   1081		return -EPERM;
   1082	if (adev->in_suspend && !adev->in_runpm)
   1083		return -EPERM;
   1084
   1085	ret = amdgpu_read_mask(buf, count, &mask);
   1086	if (ret)
   1087		return ret;
   1088
   1089	ret = pm_runtime_get_sync(ddev->dev);
   1090	if (ret < 0) {
   1091		pm_runtime_put_autosuspend(ddev->dev);
   1092		return ret;
   1093	}
   1094
   1095	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
   1096
   1097	pm_runtime_mark_last_busy(ddev->dev);
   1098	pm_runtime_put_autosuspend(ddev->dev);
   1099
   1100	if (ret)
   1101		return -EINVAL;
   1102
   1103	return count;
   1104}
   1105
   1106static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
   1107		struct device_attribute *attr,
   1108		char *buf)
   1109{
   1110	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
   1111}
   1112
   1113static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
   1114		struct device_attribute *attr,
   1115		const char *buf,
   1116		size_t count)
   1117{
   1118	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
   1119}
   1120
   1121static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
   1122		struct device_attribute *attr,
   1123		char *buf)
   1124{
   1125	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
   1126}
   1127
   1128static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
   1129		struct device_attribute *attr,
   1130		const char *buf,
   1131		size_t count)
   1132{
   1133	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
   1134}
   1135
   1136static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
   1137		struct device_attribute *attr,
   1138		char *buf)
   1139{
   1140	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
   1141}
   1142
   1143static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
   1144		struct device_attribute *attr,
   1145		const char *buf,
   1146		size_t count)
   1147{
   1148	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
   1149}
   1150
   1151static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
   1152		struct device_attribute *attr,
   1153		char *buf)
   1154{
   1155	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
   1156}
   1157
   1158static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
   1159		struct device_attribute *attr,
   1160		const char *buf,
   1161		size_t count)
   1162{
   1163	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
   1164}
   1165
   1166static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
   1167		struct device_attribute *attr,
   1168		char *buf)
   1169{
   1170	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
   1171}
   1172
   1173static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
   1174		struct device_attribute *attr,
   1175		const char *buf,
   1176		size_t count)
   1177{
   1178	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
   1179}
   1180
   1181static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
   1182		struct device_attribute *attr,
   1183		char *buf)
   1184{
   1185	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
   1186}
   1187
   1188static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
   1189		struct device_attribute *attr,
   1190		const char *buf,
   1191		size_t count)
   1192{
   1193	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
   1194}
   1195
   1196static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
   1197		struct device_attribute *attr,
   1198		char *buf)
   1199{
   1200	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
   1201}
   1202
   1203static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
   1204		struct device_attribute *attr,
   1205		const char *buf,
   1206		size_t count)
   1207{
   1208	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
   1209}
   1210
   1211static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
   1212		struct device_attribute *attr,
   1213		char *buf)
   1214{
   1215	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
   1216}
   1217
   1218static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
   1219		struct device_attribute *attr,
   1220		const char *buf,
   1221		size_t count)
   1222{
   1223	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
   1224}
   1225
   1226static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
   1227		struct device_attribute *attr,
   1228		char *buf)
   1229{
   1230	struct drm_device *ddev = dev_get_drvdata(dev);
   1231	struct amdgpu_device *adev = drm_to_adev(ddev);
   1232	uint32_t value = 0;
   1233	int ret;
   1234
   1235	if (amdgpu_in_reset(adev))
   1236		return -EPERM;
   1237	if (adev->in_suspend && !adev->in_runpm)
   1238		return -EPERM;
   1239
   1240	ret = pm_runtime_get_sync(ddev->dev);
   1241	if (ret < 0) {
   1242		pm_runtime_put_autosuspend(ddev->dev);
   1243		return ret;
   1244	}
   1245
   1246	value = amdgpu_dpm_get_sclk_od(adev);
   1247
   1248	pm_runtime_mark_last_busy(ddev->dev);
   1249	pm_runtime_put_autosuspend(ddev->dev);
   1250
   1251	return sysfs_emit(buf, "%d\n", value);
   1252}
   1253
   1254static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
   1255		struct device_attribute *attr,
   1256		const char *buf,
   1257		size_t count)
   1258{
   1259	struct drm_device *ddev = dev_get_drvdata(dev);
   1260	struct amdgpu_device *adev = drm_to_adev(ddev);
   1261	int ret;
   1262	long int value;
   1263
   1264	if (amdgpu_in_reset(adev))
   1265		return -EPERM;
   1266	if (adev->in_suspend && !adev->in_runpm)
   1267		return -EPERM;
   1268
   1269	ret = kstrtol(buf, 0, &value);
   1270
   1271	if (ret)
   1272		return -EINVAL;
   1273
   1274	ret = pm_runtime_get_sync(ddev->dev);
   1275	if (ret < 0) {
   1276		pm_runtime_put_autosuspend(ddev->dev);
   1277		return ret;
   1278	}
   1279
   1280	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
   1281
   1282	pm_runtime_mark_last_busy(ddev->dev);
   1283	pm_runtime_put_autosuspend(ddev->dev);
   1284
   1285	return count;
   1286}
   1287
   1288static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
   1289		struct device_attribute *attr,
   1290		char *buf)
   1291{
   1292	struct drm_device *ddev = dev_get_drvdata(dev);
   1293	struct amdgpu_device *adev = drm_to_adev(ddev);
   1294	uint32_t value = 0;
   1295	int ret;
   1296
   1297	if (amdgpu_in_reset(adev))
   1298		return -EPERM;
   1299	if (adev->in_suspend && !adev->in_runpm)
   1300		return -EPERM;
   1301
   1302	ret = pm_runtime_get_sync(ddev->dev);
   1303	if (ret < 0) {
   1304		pm_runtime_put_autosuspend(ddev->dev);
   1305		return ret;
   1306	}
   1307
   1308	value = amdgpu_dpm_get_mclk_od(adev);
   1309
   1310	pm_runtime_mark_last_busy(ddev->dev);
   1311	pm_runtime_put_autosuspend(ddev->dev);
   1312
   1313	return sysfs_emit(buf, "%d\n", value);
   1314}
   1315
   1316static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
   1317		struct device_attribute *attr,
   1318		const char *buf,
   1319		size_t count)
   1320{
   1321	struct drm_device *ddev = dev_get_drvdata(dev);
   1322	struct amdgpu_device *adev = drm_to_adev(ddev);
   1323	int ret;
   1324	long int value;
   1325
   1326	if (amdgpu_in_reset(adev))
   1327		return -EPERM;
   1328	if (adev->in_suspend && !adev->in_runpm)
   1329		return -EPERM;
   1330
   1331	ret = kstrtol(buf, 0, &value);
   1332
   1333	if (ret)
   1334		return -EINVAL;
   1335
   1336	ret = pm_runtime_get_sync(ddev->dev);
   1337	if (ret < 0) {
   1338		pm_runtime_put_autosuspend(ddev->dev);
   1339		return ret;
   1340	}
   1341
   1342	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
   1343
   1344	pm_runtime_mark_last_busy(ddev->dev);
   1345	pm_runtime_put_autosuspend(ddev->dev);
   1346
   1347	return count;
   1348}
   1349
   1350/**
   1351 * DOC: pp_power_profile_mode
   1352 *
   1353 * The amdgpu driver provides a sysfs API for adjusting the heuristics
   1354 * related to switching between power levels in a power state.  The file
   1355 * pp_power_profile_mode is used for this.
   1356 *
   1357 * Reading this file outputs a list of all of the predefined power profiles
   1358 * and the relevant heuristics settings for that profile.
   1359 *
   1360 * To select a profile or create a custom profile, first select manual using
   1361 * power_dpm_force_performance_level.  Writing the number of a predefined
   1362 * profile to pp_power_profile_mode will enable those heuristics.  To
   1363 * create a custom set of heuristics, write a string of numbers to the file
   1364 * starting with the number of the custom profile along with a setting
   1365 * for each heuristic parameter.  Due to differences across asic families
   1366 * the heuristic parameters vary from family to family.
   1367 *
   1368 */
   1369
   1370static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
   1371		struct device_attribute *attr,
   1372		char *buf)
   1373{
   1374	struct drm_device *ddev = dev_get_drvdata(dev);
   1375	struct amdgpu_device *adev = drm_to_adev(ddev);
   1376	ssize_t size;
   1377	int ret;
   1378
   1379	if (amdgpu_in_reset(adev))
   1380		return -EPERM;
   1381	if (adev->in_suspend && !adev->in_runpm)
   1382		return -EPERM;
   1383
   1384	ret = pm_runtime_get_sync(ddev->dev);
   1385	if (ret < 0) {
   1386		pm_runtime_put_autosuspend(ddev->dev);
   1387		return ret;
   1388	}
   1389
   1390	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
   1391	if (size <= 0)
   1392		size = sysfs_emit(buf, "\n");
   1393
   1394	pm_runtime_mark_last_busy(ddev->dev);
   1395	pm_runtime_put_autosuspend(ddev->dev);
   1396
   1397	return size;
   1398}
   1399
   1400
   1401static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
   1402		struct device_attribute *attr,
   1403		const char *buf,
   1404		size_t count)
   1405{
   1406	int ret;
   1407	struct drm_device *ddev = dev_get_drvdata(dev);
   1408	struct amdgpu_device *adev = drm_to_adev(ddev);
   1409	uint32_t parameter_size = 0;
   1410	long parameter[64];
   1411	char *sub_str, buf_cpy[128];
   1412	char *tmp_str;
   1413	uint32_t i = 0;
   1414	char tmp[2];
   1415	long int profile_mode = 0;
   1416	const char delimiter[3] = {' ', '\n', '\0'};
   1417
   1418	if (amdgpu_in_reset(adev))
   1419		return -EPERM;
   1420	if (adev->in_suspend && !adev->in_runpm)
   1421		return -EPERM;
   1422
   1423	tmp[0] = *(buf);
   1424	tmp[1] = '\0';
   1425	ret = kstrtol(tmp, 0, &profile_mode);
   1426	if (ret)
   1427		return -EINVAL;
   1428
   1429	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
   1430		if (count < 2 || count > 127)
   1431			return -EINVAL;
   1432		while (isspace(*++buf))
   1433			i++;
   1434		memcpy(buf_cpy, buf, count-i);
   1435		tmp_str = buf_cpy;
   1436		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
   1437			if (strlen(sub_str) == 0)
   1438				continue;
   1439			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
   1440			if (ret)
   1441				return -EINVAL;
   1442			parameter_size++;
   1443			while (isspace(*tmp_str))
   1444				tmp_str++;
   1445		}
   1446	}
   1447	parameter[parameter_size] = profile_mode;
   1448
   1449	ret = pm_runtime_get_sync(ddev->dev);
   1450	if (ret < 0) {
   1451		pm_runtime_put_autosuspend(ddev->dev);
   1452		return ret;
   1453	}
   1454
   1455	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
   1456
   1457	pm_runtime_mark_last_busy(ddev->dev);
   1458	pm_runtime_put_autosuspend(ddev->dev);
   1459
   1460	if (!ret)
   1461		return count;
   1462
   1463	return -EINVAL;
   1464}
   1465
   1466/**
   1467 * DOC: gpu_busy_percent
   1468 *
   1469 * The amdgpu driver provides a sysfs API for reading how busy the GPU
   1470 * is as a percentage.  The file gpu_busy_percent is used for this.
   1471 * The SMU firmware computes a percentage of load based on the
   1472 * aggregate activity level in the IP cores.
   1473 */
   1474static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
   1475					   struct device_attribute *attr,
   1476					   char *buf)
   1477{
   1478	struct drm_device *ddev = dev_get_drvdata(dev);
   1479	struct amdgpu_device *adev = drm_to_adev(ddev);
   1480	int r, value, size = sizeof(value);
   1481
   1482	if (amdgpu_in_reset(adev))
   1483		return -EPERM;
   1484	if (adev->in_suspend && !adev->in_runpm)
   1485		return -EPERM;
   1486
   1487	r = pm_runtime_get_sync(ddev->dev);
   1488	if (r < 0) {
   1489		pm_runtime_put_autosuspend(ddev->dev);
   1490		return r;
   1491	}
   1492
   1493	/* read the IP busy sensor */
   1494	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
   1495				   (void *)&value, &size);
   1496
   1497	pm_runtime_mark_last_busy(ddev->dev);
   1498	pm_runtime_put_autosuspend(ddev->dev);
   1499
   1500	if (r)
   1501		return r;
   1502
   1503	return sysfs_emit(buf, "%d\n", value);
   1504}
   1505
   1506/**
   1507 * DOC: mem_busy_percent
   1508 *
   1509 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
   1510 * is as a percentage.  The file mem_busy_percent is used for this.
   1511 * The SMU firmware computes a percentage of load based on the
   1512 * aggregate activity level in the IP cores.
   1513 */
   1514static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
   1515					   struct device_attribute *attr,
   1516					   char *buf)
   1517{
   1518	struct drm_device *ddev = dev_get_drvdata(dev);
   1519	struct amdgpu_device *adev = drm_to_adev(ddev);
   1520	int r, value, size = sizeof(value);
   1521
   1522	if (amdgpu_in_reset(adev))
   1523		return -EPERM;
   1524	if (adev->in_suspend && !adev->in_runpm)
   1525		return -EPERM;
   1526
   1527	r = pm_runtime_get_sync(ddev->dev);
   1528	if (r < 0) {
   1529		pm_runtime_put_autosuspend(ddev->dev);
   1530		return r;
   1531	}
   1532
   1533	/* read the IP busy sensor */
   1534	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
   1535				   (void *)&value, &size);
   1536
   1537	pm_runtime_mark_last_busy(ddev->dev);
   1538	pm_runtime_put_autosuspend(ddev->dev);
   1539
   1540	if (r)
   1541		return r;
   1542
   1543	return sysfs_emit(buf, "%d\n", value);
   1544}
   1545
   1546/**
   1547 * DOC: pcie_bw
   1548 *
   1549 * The amdgpu driver provides a sysfs API for estimating how much data
   1550 * has been received and sent by the GPU in the last second through PCIe.
   1551 * The file pcie_bw is used for this.
   1552 * The Perf counters count the number of received and sent messages and return
   1553 * those values, as well as the maximum payload size of a PCIe packet (mps).
   1554 * Note that it is not possible to easily and quickly obtain the size of each
   1555 * packet transmitted, so we output the max payload size (mps) to allow for
   1556 * quick estimation of the PCIe bandwidth usage
   1557 */
   1558static ssize_t amdgpu_get_pcie_bw(struct device *dev,
   1559		struct device_attribute *attr,
   1560		char *buf)
   1561{
   1562	struct drm_device *ddev = dev_get_drvdata(dev);
   1563	struct amdgpu_device *adev = drm_to_adev(ddev);
   1564	uint64_t count0 = 0, count1 = 0;
   1565	int ret;
   1566
   1567	if (amdgpu_in_reset(adev))
   1568		return -EPERM;
   1569	if (adev->in_suspend && !adev->in_runpm)
   1570		return -EPERM;
   1571
   1572	if (adev->flags & AMD_IS_APU)
   1573		return -ENODATA;
   1574
   1575	if (!adev->asic_funcs->get_pcie_usage)
   1576		return -ENODATA;
   1577
   1578	ret = pm_runtime_get_sync(ddev->dev);
   1579	if (ret < 0) {
   1580		pm_runtime_put_autosuspend(ddev->dev);
   1581		return ret;
   1582	}
   1583
   1584	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
   1585
   1586	pm_runtime_mark_last_busy(ddev->dev);
   1587	pm_runtime_put_autosuspend(ddev->dev);
   1588
   1589	return sysfs_emit(buf, "%llu %llu %i\n",
   1590			  count0, count1, pcie_get_mps(adev->pdev));
   1591}
   1592
   1593/**
   1594 * DOC: unique_id
   1595 *
   1596 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
   1597 * The file unique_id is used for this.
   1598 * This will provide a Unique ID that will persist from machine to machine
   1599 *
   1600 * NOTE: This will only work for GFX9 and newer. This file will be absent
   1601 * on unsupported ASICs (GFX8 and older)
   1602 */
   1603static ssize_t amdgpu_get_unique_id(struct device *dev,
   1604		struct device_attribute *attr,
   1605		char *buf)
   1606{
   1607	struct drm_device *ddev = dev_get_drvdata(dev);
   1608	struct amdgpu_device *adev = drm_to_adev(ddev);
   1609
   1610	if (amdgpu_in_reset(adev))
   1611		return -EPERM;
   1612	if (adev->in_suspend && !adev->in_runpm)
   1613		return -EPERM;
   1614
   1615	if (adev->unique_id)
   1616		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
   1617
   1618	return 0;
   1619}
   1620
   1621/**
   1622 * DOC: thermal_throttling_logging
   1623 *
   1624 * Thermal throttling pulls down the clock frequency and thus the performance.
   1625 * It's an useful mechanism to protect the chip from overheating. Since it
   1626 * impacts performance, the user controls whether it is enabled and if so,
   1627 * the log frequency.
   1628 *
   1629 * Reading back the file shows you the status(enabled or disabled) and
   1630 * the interval(in seconds) between each thermal logging.
   1631 *
   1632 * Writing an integer to the file, sets a new logging interval, in seconds.
   1633 * The value should be between 1 and 3600. If the value is less than 1,
   1634 * thermal logging is disabled. Values greater than 3600 are ignored.
   1635 */
   1636static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
   1637						     struct device_attribute *attr,
   1638						     char *buf)
   1639{
   1640	struct drm_device *ddev = dev_get_drvdata(dev);
   1641	struct amdgpu_device *adev = drm_to_adev(ddev);
   1642
   1643	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
   1644			  adev_to_drm(adev)->unique,
   1645			  atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
   1646			  adev->throttling_logging_rs.interval / HZ + 1);
   1647}
   1648
   1649static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
   1650						     struct device_attribute *attr,
   1651						     const char *buf,
   1652						     size_t count)
   1653{
   1654	struct drm_device *ddev = dev_get_drvdata(dev);
   1655	struct amdgpu_device *adev = drm_to_adev(ddev);
   1656	long throttling_logging_interval;
   1657	unsigned long flags;
   1658	int ret = 0;
   1659
   1660	ret = kstrtol(buf, 0, &throttling_logging_interval);
   1661	if (ret)
   1662		return ret;
   1663
   1664	if (throttling_logging_interval > 3600)
   1665		return -EINVAL;
   1666
   1667	if (throttling_logging_interval > 0) {
   1668		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
   1669		/*
   1670		 * Reset the ratelimit timer internals.
   1671		 * This can effectively restart the timer.
   1672		 */
   1673		adev->throttling_logging_rs.interval =
   1674			(throttling_logging_interval - 1) * HZ;
   1675		adev->throttling_logging_rs.begin = 0;
   1676		adev->throttling_logging_rs.printed = 0;
   1677		adev->throttling_logging_rs.missed = 0;
   1678		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
   1679
   1680		atomic_set(&adev->throttling_logging_enabled, 1);
   1681	} else {
   1682		atomic_set(&adev->throttling_logging_enabled, 0);
   1683	}
   1684
   1685	return count;
   1686}
   1687
   1688/**
   1689 * DOC: gpu_metrics
   1690 *
   1691 * The amdgpu driver provides a sysfs API for retrieving current gpu
   1692 * metrics data. The file gpu_metrics is used for this. Reading the
   1693 * file will dump all the current gpu metrics data.
   1694 *
   1695 * These data include temperature, frequency, engines utilization,
   1696 * power consume, throttler status, fan speed and cpu core statistics(
   1697 * available for APU only). That's it will give a snapshot of all sensors
   1698 * at the same time.
   1699 */
   1700static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
   1701				      struct device_attribute *attr,
   1702				      char *buf)
   1703{
   1704	struct drm_device *ddev = dev_get_drvdata(dev);
   1705	struct amdgpu_device *adev = drm_to_adev(ddev);
   1706	void *gpu_metrics;
   1707	ssize_t size = 0;
   1708	int ret;
   1709
   1710	if (amdgpu_in_reset(adev))
   1711		return -EPERM;
   1712	if (adev->in_suspend && !adev->in_runpm)
   1713		return -EPERM;
   1714
   1715	ret = pm_runtime_get_sync(ddev->dev);
   1716	if (ret < 0) {
   1717		pm_runtime_put_autosuspend(ddev->dev);
   1718		return ret;
   1719	}
   1720
   1721	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
   1722	if (size <= 0)
   1723		goto out;
   1724
   1725	if (size >= PAGE_SIZE)
   1726		size = PAGE_SIZE - 1;
   1727
   1728	memcpy(buf, gpu_metrics, size);
   1729
   1730out:
   1731	pm_runtime_mark_last_busy(ddev->dev);
   1732	pm_runtime_put_autosuspend(ddev->dev);
   1733
   1734	return size;
   1735}
   1736
   1737static int amdgpu_device_read_powershift(struct amdgpu_device *adev,
   1738						uint32_t *ss_power, bool dgpu_share)
   1739{
   1740	struct drm_device *ddev = adev_to_drm(adev);
   1741	uint32_t size;
   1742	int r = 0;
   1743
   1744	if (amdgpu_in_reset(adev))
   1745		return -EPERM;
   1746	if (adev->in_suspend && !adev->in_runpm)
   1747		return -EPERM;
   1748
   1749	r = pm_runtime_get_sync(ddev->dev);
   1750	if (r < 0) {
   1751		pm_runtime_put_autosuspend(ddev->dev);
   1752		return r;
   1753	}
   1754
   1755	if (dgpu_share)
   1756		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
   1757				   (void *)ss_power, &size);
   1758	else
   1759		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
   1760				   (void *)ss_power, &size);
   1761
   1762	pm_runtime_mark_last_busy(ddev->dev);
   1763	pm_runtime_put_autosuspend(ddev->dev);
   1764	return r;
   1765}
   1766
   1767static int amdgpu_show_powershift_percent(struct device *dev,
   1768					char *buf, bool dgpu_share)
   1769{
   1770	struct drm_device *ddev = dev_get_drvdata(dev);
   1771	struct amdgpu_device *adev = drm_to_adev(ddev);
   1772	uint32_t ss_power;
   1773	int r = 0, i;
   1774
   1775	r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
   1776	if (r == -EOPNOTSUPP) {
   1777		/* sensor not available on dGPU, try to read from APU */
   1778		adev = NULL;
   1779		mutex_lock(&mgpu_info.mutex);
   1780		for (i = 0; i < mgpu_info.num_gpu; i++) {
   1781			if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
   1782				adev = mgpu_info.gpu_ins[i].adev;
   1783				break;
   1784			}
   1785		}
   1786		mutex_unlock(&mgpu_info.mutex);
   1787		if (adev)
   1788			r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
   1789	}
   1790
   1791	if (!r)
   1792		r = sysfs_emit(buf, "%u%%\n", ss_power);
   1793
   1794	return r;
   1795}
   1796/**
   1797 * DOC: smartshift_apu_power
   1798 *
   1799 * The amdgpu driver provides a sysfs API for reporting APU power
   1800 * shift in percentage if platform supports smartshift. Value 0 means that
   1801 * there is no powershift and values between [1-100] means that the power
   1802 * is shifted to APU, the percentage of boost is with respect to APU power
   1803 * limit on the platform.
   1804 */
   1805
   1806static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
   1807					       char *buf)
   1808{
   1809	return amdgpu_show_powershift_percent(dev, buf, false);
   1810}
   1811
   1812/**
   1813 * DOC: smartshift_dgpu_power
   1814 *
   1815 * The amdgpu driver provides a sysfs API for reporting dGPU power
   1816 * shift in percentage if platform supports smartshift. Value 0 means that
   1817 * there is no powershift and values between [1-100] means that the power is
   1818 * shifted to dGPU, the percentage of boost is with respect to dGPU power
   1819 * limit on the platform.
   1820 */
   1821
   1822static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
   1823						char *buf)
   1824{
   1825	return amdgpu_show_powershift_percent(dev, buf, true);
   1826}
   1827
   1828/**
   1829 * DOC: smartshift_bias
   1830 *
   1831 * The amdgpu driver provides a sysfs API for reporting the
   1832 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
   1833 * and the default is 0. -100 sets maximum preference to APU
   1834 * and 100 sets max perference to dGPU.
   1835 */
   1836
   1837static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
   1838					  struct device_attribute *attr,
   1839					  char *buf)
   1840{
   1841	int r = 0;
   1842
   1843	r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
   1844
   1845	return r;
   1846}
   1847
   1848static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
   1849					  struct device_attribute *attr,
   1850					  const char *buf, size_t count)
   1851{
   1852	struct drm_device *ddev = dev_get_drvdata(dev);
   1853	struct amdgpu_device *adev = drm_to_adev(ddev);
   1854	int r = 0;
   1855	int bias = 0;
   1856
   1857	if (amdgpu_in_reset(adev))
   1858		return -EPERM;
   1859	if (adev->in_suspend && !adev->in_runpm)
   1860		return -EPERM;
   1861
   1862	r = pm_runtime_get_sync(ddev->dev);
   1863	if (r < 0) {
   1864		pm_runtime_put_autosuspend(ddev->dev);
   1865		return r;
   1866	}
   1867
   1868	r = kstrtoint(buf, 10, &bias);
   1869	if (r)
   1870		goto out;
   1871
   1872	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
   1873		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
   1874	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
   1875		bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
   1876
   1877	amdgpu_smartshift_bias = bias;
   1878	r = count;
   1879
   1880	/* TODO: update bias level with SMU message */
   1881
   1882out:
   1883	pm_runtime_mark_last_busy(ddev->dev);
   1884	pm_runtime_put_autosuspend(ddev->dev);
   1885	return r;
   1886}
   1887
   1888
   1889static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
   1890				uint32_t mask, enum amdgpu_device_attr_states *states)
   1891{
   1892	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
   1893		*states = ATTR_STATE_UNSUPPORTED;
   1894
   1895	return 0;
   1896}
   1897
   1898static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
   1899			       uint32_t mask, enum amdgpu_device_attr_states *states)
   1900{
   1901	uint32_t ss_power, size;
   1902
   1903	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
   1904		*states = ATTR_STATE_UNSUPPORTED;
   1905	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
   1906		 (void *)&ss_power, &size))
   1907		*states = ATTR_STATE_UNSUPPORTED;
   1908	else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
   1909		 (void *)&ss_power, &size))
   1910		*states = ATTR_STATE_UNSUPPORTED;
   1911
   1912	return 0;
   1913}
   1914
   1915static struct amdgpu_device_attr amdgpu_device_attrs[] = {
   1916	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1917	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1918	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1919	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1920	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1921	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1922	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1923	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1924	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1925	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1926	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1927	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1928	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1929	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1930	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
   1931	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
   1932	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1933	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC),
   1934	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1935	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1936	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
   1937	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1938	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1939	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1940	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
   1941	AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,			ATTR_FLAG_BASIC,
   1942			      .attr_update = ss_power_attr_update),
   1943	AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,			ATTR_FLAG_BASIC,
   1944			      .attr_update = ss_power_attr_update),
   1945	AMDGPU_DEVICE_ATTR_RW(smartshift_bias,				ATTR_FLAG_BASIC,
   1946			      .attr_update = ss_bias_attr_update),
   1947};
   1948
   1949static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
   1950			       uint32_t mask, enum amdgpu_device_attr_states *states)
   1951{
   1952	struct device_attribute *dev_attr = &attr->dev_attr;
   1953	uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
   1954	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
   1955	const char *attr_name = dev_attr->attr.name;
   1956
   1957	if (!(attr->flags & mask)) {
   1958		*states = ATTR_STATE_UNSUPPORTED;
   1959		return 0;
   1960	}
   1961
   1962#define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))
   1963
   1964	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
   1965		if (gc_ver < IP_VERSION(9, 0, 0))
   1966			*states = ATTR_STATE_UNSUPPORTED;
   1967	} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
   1968		if (gc_ver < IP_VERSION(9, 0, 0) ||
   1969		    gc_ver == IP_VERSION(9, 4, 1) ||
   1970		    gc_ver == IP_VERSION(9, 4, 2))
   1971			*states = ATTR_STATE_UNSUPPORTED;
   1972	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
   1973		if (mp1_ver < IP_VERSION(10, 0, 0))
   1974			*states = ATTR_STATE_UNSUPPORTED;
   1975	} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
   1976		*states = ATTR_STATE_UNSUPPORTED;
   1977		if (amdgpu_dpm_is_overdrive_supported(adev))
   1978			*states = ATTR_STATE_SUPPORTED;
   1979	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
   1980		if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
   1981			*states = ATTR_STATE_UNSUPPORTED;
   1982	} else if (DEVICE_ATTR_IS(pcie_bw)) {
   1983		/* PCIe Perf counters won't work on APU nodes */
   1984		if (adev->flags & AMD_IS_APU)
   1985			*states = ATTR_STATE_UNSUPPORTED;
   1986	} else if (DEVICE_ATTR_IS(unique_id)) {
   1987		switch (gc_ver) {
   1988		case IP_VERSION(9, 0, 1):
   1989		case IP_VERSION(9, 4, 0):
   1990		case IP_VERSION(9, 4, 1):
   1991		case IP_VERSION(9, 4, 2):
   1992		case IP_VERSION(10, 3, 0):
   1993		case IP_VERSION(11, 0, 0):
   1994			*states = ATTR_STATE_SUPPORTED;
   1995			break;
   1996		default:
   1997			*states = ATTR_STATE_UNSUPPORTED;
   1998		}
   1999	} else if (DEVICE_ATTR_IS(pp_features)) {
   2000		if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
   2001			*states = ATTR_STATE_UNSUPPORTED;
   2002	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
   2003		if (gc_ver < IP_VERSION(9, 1, 0))
   2004			*states = ATTR_STATE_UNSUPPORTED;
   2005	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
   2006		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
   2007		      gc_ver == IP_VERSION(10, 3, 0) ||
   2008		      gc_ver == IP_VERSION(10, 1, 2) ||
   2009		      gc_ver == IP_VERSION(11, 0, 0) ||
   2010		      gc_ver == IP_VERSION(11, 0, 2)))
   2011			*states = ATTR_STATE_UNSUPPORTED;
   2012	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
   2013		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
   2014		      gc_ver == IP_VERSION(10, 3, 0) ||
   2015		      gc_ver == IP_VERSION(10, 1, 2) ||
   2016		      gc_ver == IP_VERSION(11, 0, 0) ||
   2017		      gc_ver == IP_VERSION(11, 0, 2)))
   2018			*states = ATTR_STATE_UNSUPPORTED;
   2019	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
   2020		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
   2021			*states = ATTR_STATE_UNSUPPORTED;
   2022		else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
   2023			*states = ATTR_STATE_UNSUPPORTED;
   2024	}
   2025
   2026	switch (gc_ver) {
   2027	case IP_VERSION(9, 4, 1):
   2028	case IP_VERSION(9, 4, 2):
   2029		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
   2030		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
   2031		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
   2032		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
   2033			dev_attr->attr.mode &= ~S_IWUGO;
   2034			dev_attr->store = NULL;
   2035		}
   2036		break;
   2037	case IP_VERSION(10, 3, 0):
   2038		if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
   2039		    amdgpu_sriov_vf(adev)) {
   2040			dev_attr->attr.mode &= ~0222;
   2041			dev_attr->store = NULL;
   2042		}
   2043		break;
   2044	default:
   2045		break;
   2046	}
   2047
   2048	if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
   2049		/* SMU MP1 does not support dcefclk level setting */
   2050		if (gc_ver >= IP_VERSION(10, 0, 0)) {
   2051			dev_attr->attr.mode &= ~S_IWUGO;
   2052			dev_attr->store = NULL;
   2053		}
   2054	}
   2055
   2056	/* setting should not be allowed from VF if not in one VF mode */
   2057	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
   2058		dev_attr->attr.mode &= ~S_IWUGO;
   2059		dev_attr->store = NULL;
   2060	}
   2061
   2062#undef DEVICE_ATTR_IS
   2063
   2064	return 0;
   2065}
   2066
   2067
   2068static int amdgpu_device_attr_create(struct amdgpu_device *adev,
   2069				     struct amdgpu_device_attr *attr,
   2070				     uint32_t mask, struct list_head *attr_list)
   2071{
   2072	int ret = 0;
   2073	struct device_attribute *dev_attr = &attr->dev_attr;
   2074	const char *name = dev_attr->attr.name;
   2075	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
   2076	struct amdgpu_device_attr_entry *attr_entry;
   2077
   2078	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
   2079			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
   2080
   2081	BUG_ON(!attr);
   2082
   2083	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
   2084
   2085	ret = attr_update(adev, attr, mask, &attr_states);
   2086	if (ret) {
   2087		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
   2088			name, ret);
   2089		return ret;
   2090	}
   2091
   2092	if (attr_states == ATTR_STATE_UNSUPPORTED)
   2093		return 0;
   2094
   2095	ret = device_create_file(adev->dev, dev_attr);
   2096	if (ret) {
   2097		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
   2098			name, ret);
   2099	}
   2100
   2101	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
   2102	if (!attr_entry)
   2103		return -ENOMEM;
   2104
   2105	attr_entry->attr = attr;
   2106	INIT_LIST_HEAD(&attr_entry->entry);
   2107
   2108	list_add_tail(&attr_entry->entry, attr_list);
   2109
   2110	return ret;
   2111}
   2112
   2113static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
   2114{
   2115	struct device_attribute *dev_attr = &attr->dev_attr;
   2116
   2117	device_remove_file(adev->dev, dev_attr);
   2118}
   2119
   2120static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
   2121					     struct list_head *attr_list);
   2122
   2123static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
   2124					    struct amdgpu_device_attr *attrs,
   2125					    uint32_t counts,
   2126					    uint32_t mask,
   2127					    struct list_head *attr_list)
   2128{
   2129	int ret = 0;
   2130	uint32_t i = 0;
   2131
   2132	for (i = 0; i < counts; i++) {
   2133		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
   2134		if (ret)
   2135			goto failed;
   2136	}
   2137
   2138	return 0;
   2139
   2140failed:
   2141	amdgpu_device_attr_remove_groups(adev, attr_list);
   2142
   2143	return ret;
   2144}
   2145
   2146static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
   2147					     struct list_head *attr_list)
   2148{
   2149	struct amdgpu_device_attr_entry *entry, *entry_tmp;
   2150
   2151	if (list_empty(attr_list))
   2152		return ;
   2153
   2154	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
   2155		amdgpu_device_attr_remove(adev, entry->attr);
   2156		list_del(&entry->entry);
   2157		kfree(entry);
   2158	}
   2159}
   2160
   2161static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
   2162				      struct device_attribute *attr,
   2163				      char *buf)
   2164{
   2165	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2166	int channel = to_sensor_dev_attr(attr)->index;
   2167	int r, temp = 0, size = sizeof(temp);
   2168
   2169	if (amdgpu_in_reset(adev))
   2170		return -EPERM;
   2171	if (adev->in_suspend && !adev->in_runpm)
   2172		return -EPERM;
   2173
   2174	if (channel >= PP_TEMP_MAX)
   2175		return -EINVAL;
   2176
   2177	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2178	if (r < 0) {
   2179		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2180		return r;
   2181	}
   2182
   2183	switch (channel) {
   2184	case PP_TEMP_JUNCTION:
   2185		/* get current junction temperature */
   2186		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
   2187					   (void *)&temp, &size);
   2188		break;
   2189	case PP_TEMP_EDGE:
   2190		/* get current edge temperature */
   2191		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
   2192					   (void *)&temp, &size);
   2193		break;
   2194	case PP_TEMP_MEM:
   2195		/* get current memory temperature */
   2196		r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
   2197					   (void *)&temp, &size);
   2198		break;
   2199	default:
   2200		r = -EINVAL;
   2201		break;
   2202	}
   2203
   2204	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2205	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2206
   2207	if (r)
   2208		return r;
   2209
   2210	return sysfs_emit(buf, "%d\n", temp);
   2211}
   2212
   2213static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
   2214					     struct device_attribute *attr,
   2215					     char *buf)
   2216{
   2217	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2218	int hyst = to_sensor_dev_attr(attr)->index;
   2219	int temp;
   2220
   2221	if (hyst)
   2222		temp = adev->pm.dpm.thermal.min_temp;
   2223	else
   2224		temp = adev->pm.dpm.thermal.max_temp;
   2225
   2226	return sysfs_emit(buf, "%d\n", temp);
   2227}
   2228
   2229static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
   2230					     struct device_attribute *attr,
   2231					     char *buf)
   2232{
   2233	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2234	int hyst = to_sensor_dev_attr(attr)->index;
   2235	int temp;
   2236
   2237	if (hyst)
   2238		temp = adev->pm.dpm.thermal.min_hotspot_temp;
   2239	else
   2240		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
   2241
   2242	return sysfs_emit(buf, "%d\n", temp);
   2243}
   2244
   2245static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
   2246					     struct device_attribute *attr,
   2247					     char *buf)
   2248{
   2249	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2250	int hyst = to_sensor_dev_attr(attr)->index;
   2251	int temp;
   2252
   2253	if (hyst)
   2254		temp = adev->pm.dpm.thermal.min_mem_temp;
   2255	else
   2256		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
   2257
   2258	return sysfs_emit(buf, "%d\n", temp);
   2259}
   2260
   2261static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
   2262					     struct device_attribute *attr,
   2263					     char *buf)
   2264{
   2265	int channel = to_sensor_dev_attr(attr)->index;
   2266
   2267	if (channel >= PP_TEMP_MAX)
   2268		return -EINVAL;
   2269
   2270	return sysfs_emit(buf, "%s\n", temp_label[channel].label);
   2271}
   2272
   2273static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
   2274					     struct device_attribute *attr,
   2275					     char *buf)
   2276{
   2277	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2278	int channel = to_sensor_dev_attr(attr)->index;
   2279	int temp = 0;
   2280
   2281	if (channel >= PP_TEMP_MAX)
   2282		return -EINVAL;
   2283
   2284	switch (channel) {
   2285	case PP_TEMP_JUNCTION:
   2286		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
   2287		break;
   2288	case PP_TEMP_EDGE:
   2289		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
   2290		break;
   2291	case PP_TEMP_MEM:
   2292		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
   2293		break;
   2294	}
   2295
   2296	return sysfs_emit(buf, "%d\n", temp);
   2297}
   2298
   2299static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
   2300					    struct device_attribute *attr,
   2301					    char *buf)
   2302{
   2303	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2304	u32 pwm_mode = 0;
   2305	int ret;
   2306
   2307	if (amdgpu_in_reset(adev))
   2308		return -EPERM;
   2309	if (adev->in_suspend && !adev->in_runpm)
   2310		return -EPERM;
   2311
   2312	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2313	if (ret < 0) {
   2314		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2315		return ret;
   2316	}
   2317
   2318	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
   2319
   2320	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2321	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2322
   2323	if (ret)
   2324		return -EINVAL;
   2325
   2326	return sysfs_emit(buf, "%u\n", pwm_mode);
   2327}
   2328
   2329static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
   2330					    struct device_attribute *attr,
   2331					    const char *buf,
   2332					    size_t count)
   2333{
   2334	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2335	int err, ret;
   2336	int value;
   2337
   2338	if (amdgpu_in_reset(adev))
   2339		return -EPERM;
   2340	if (adev->in_suspend && !adev->in_runpm)
   2341		return -EPERM;
   2342
   2343	err = kstrtoint(buf, 10, &value);
   2344	if (err)
   2345		return err;
   2346
   2347	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2348	if (ret < 0) {
   2349		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2350		return ret;
   2351	}
   2352
   2353	ret = amdgpu_dpm_set_fan_control_mode(adev, value);
   2354
   2355	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2356	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2357
   2358	if (ret)
   2359		return -EINVAL;
   2360
   2361	return count;
   2362}
   2363
   2364static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
   2365					 struct device_attribute *attr,
   2366					 char *buf)
   2367{
   2368	return sysfs_emit(buf, "%i\n", 0);
   2369}
   2370
   2371static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
   2372					 struct device_attribute *attr,
   2373					 char *buf)
   2374{
   2375	return sysfs_emit(buf, "%i\n", 255);
   2376}
   2377
   2378static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
   2379				     struct device_attribute *attr,
   2380				     const char *buf, size_t count)
   2381{
   2382	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2383	int err;
   2384	u32 value;
   2385	u32 pwm_mode;
   2386
   2387	if (amdgpu_in_reset(adev))
   2388		return -EPERM;
   2389	if (adev->in_suspend && !adev->in_runpm)
   2390		return -EPERM;
   2391
   2392	err = kstrtou32(buf, 10, &value);
   2393	if (err)
   2394		return err;
   2395
   2396	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2397	if (err < 0) {
   2398		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2399		return err;
   2400	}
   2401
   2402	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
   2403	if (err)
   2404		goto out;
   2405
   2406	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
   2407		pr_info("manual fan speed control should be enabled first\n");
   2408		err = -EINVAL;
   2409		goto out;
   2410	}
   2411
   2412	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
   2413
   2414out:
   2415	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2416	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2417
   2418	if (err)
   2419		return err;
   2420
   2421	return count;
   2422}
   2423
   2424static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
   2425				     struct device_attribute *attr,
   2426				     char *buf)
   2427{
   2428	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2429	int err;
   2430	u32 speed = 0;
   2431
   2432	if (amdgpu_in_reset(adev))
   2433		return -EPERM;
   2434	if (adev->in_suspend && !adev->in_runpm)
   2435		return -EPERM;
   2436
   2437	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2438	if (err < 0) {
   2439		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2440		return err;
   2441	}
   2442
   2443	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
   2444
   2445	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2446	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2447
   2448	if (err)
   2449		return err;
   2450
   2451	return sysfs_emit(buf, "%i\n", speed);
   2452}
   2453
   2454static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
   2455					   struct device_attribute *attr,
   2456					   char *buf)
   2457{
   2458	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2459	int err;
   2460	u32 speed = 0;
   2461
   2462	if (amdgpu_in_reset(adev))
   2463		return -EPERM;
   2464	if (adev->in_suspend && !adev->in_runpm)
   2465		return -EPERM;
   2466
   2467	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2468	if (err < 0) {
   2469		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2470		return err;
   2471	}
   2472
   2473	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
   2474
   2475	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2476	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2477
   2478	if (err)
   2479		return err;
   2480
   2481	return sysfs_emit(buf, "%i\n", speed);
   2482}
   2483
   2484static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
   2485					 struct device_attribute *attr,
   2486					 char *buf)
   2487{
   2488	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2489	u32 min_rpm = 0;
   2490	u32 size = sizeof(min_rpm);
   2491	int r;
   2492
   2493	if (amdgpu_in_reset(adev))
   2494		return -EPERM;
   2495	if (adev->in_suspend && !adev->in_runpm)
   2496		return -EPERM;
   2497
   2498	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2499	if (r < 0) {
   2500		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2501		return r;
   2502	}
   2503
   2504	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
   2505				   (void *)&min_rpm, &size);
   2506
   2507	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2508	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2509
   2510	if (r)
   2511		return r;
   2512
   2513	return sysfs_emit(buf, "%d\n", min_rpm);
   2514}
   2515
   2516static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
   2517					 struct device_attribute *attr,
   2518					 char *buf)
   2519{
   2520	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2521	u32 max_rpm = 0;
   2522	u32 size = sizeof(max_rpm);
   2523	int r;
   2524
   2525	if (amdgpu_in_reset(adev))
   2526		return -EPERM;
   2527	if (adev->in_suspend && !adev->in_runpm)
   2528		return -EPERM;
   2529
   2530	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2531	if (r < 0) {
   2532		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2533		return r;
   2534	}
   2535
   2536	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
   2537				   (void *)&max_rpm, &size);
   2538
   2539	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2540	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2541
   2542	if (r)
   2543		return r;
   2544
   2545	return sysfs_emit(buf, "%d\n", max_rpm);
   2546}
   2547
   2548static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
   2549					   struct device_attribute *attr,
   2550					   char *buf)
   2551{
   2552	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2553	int err;
   2554	u32 rpm = 0;
   2555
   2556	if (amdgpu_in_reset(adev))
   2557		return -EPERM;
   2558	if (adev->in_suspend && !adev->in_runpm)
   2559		return -EPERM;
   2560
   2561	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2562	if (err < 0) {
   2563		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2564		return err;
   2565	}
   2566
   2567	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
   2568
   2569	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2570	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2571
   2572	if (err)
   2573		return err;
   2574
   2575	return sysfs_emit(buf, "%i\n", rpm);
   2576}
   2577
   2578static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
   2579				     struct device_attribute *attr,
   2580				     const char *buf, size_t count)
   2581{
   2582	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2583	int err;
   2584	u32 value;
   2585	u32 pwm_mode;
   2586
   2587	if (amdgpu_in_reset(adev))
   2588		return -EPERM;
   2589	if (adev->in_suspend && !adev->in_runpm)
   2590		return -EPERM;
   2591
   2592	err = kstrtou32(buf, 10, &value);
   2593	if (err)
   2594		return err;
   2595
   2596	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2597	if (err < 0) {
   2598		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2599		return err;
   2600	}
   2601
   2602	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
   2603	if (err)
   2604		goto out;
   2605
   2606	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
   2607		err = -ENODATA;
   2608		goto out;
   2609	}
   2610
   2611	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
   2612
   2613out:
   2614	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2615	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2616
   2617	if (err)
   2618		return err;
   2619
   2620	return count;
   2621}
   2622
   2623static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
   2624					    struct device_attribute *attr,
   2625					    char *buf)
   2626{
   2627	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2628	u32 pwm_mode = 0;
   2629	int ret;
   2630
   2631	if (amdgpu_in_reset(adev))
   2632		return -EPERM;
   2633	if (adev->in_suspend && !adev->in_runpm)
   2634		return -EPERM;
   2635
   2636	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2637	if (ret < 0) {
   2638		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2639		return ret;
   2640	}
   2641
   2642	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
   2643
   2644	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2645	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2646
   2647	if (ret)
   2648		return -EINVAL;
   2649
   2650	return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
   2651}
   2652
   2653static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
   2654					    struct device_attribute *attr,
   2655					    const char *buf,
   2656					    size_t count)
   2657{
   2658	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2659	int err;
   2660	int value;
   2661	u32 pwm_mode;
   2662
   2663	if (amdgpu_in_reset(adev))
   2664		return -EPERM;
   2665	if (adev->in_suspend && !adev->in_runpm)
   2666		return -EPERM;
   2667
   2668	err = kstrtoint(buf, 10, &value);
   2669	if (err)
   2670		return err;
   2671
   2672	if (value == 0)
   2673		pwm_mode = AMD_FAN_CTRL_AUTO;
   2674	else if (value == 1)
   2675		pwm_mode = AMD_FAN_CTRL_MANUAL;
   2676	else
   2677		return -EINVAL;
   2678
   2679	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2680	if (err < 0) {
   2681		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2682		return err;
   2683	}
   2684
   2685	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
   2686
   2687	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2688	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2689
   2690	if (err)
   2691		return -EINVAL;
   2692
   2693	return count;
   2694}
   2695
   2696static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
   2697					struct device_attribute *attr,
   2698					char *buf)
   2699{
   2700	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2701	u32 vddgfx;
   2702	int r, size = sizeof(vddgfx);
   2703
   2704	if (amdgpu_in_reset(adev))
   2705		return -EPERM;
   2706	if (adev->in_suspend && !adev->in_runpm)
   2707		return -EPERM;
   2708
   2709	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2710	if (r < 0) {
   2711		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2712		return r;
   2713	}
   2714
   2715	/* get the voltage */
   2716	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
   2717				   (void *)&vddgfx, &size);
   2718
   2719	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2720	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2721
   2722	if (r)
   2723		return r;
   2724
   2725	return sysfs_emit(buf, "%d\n", vddgfx);
   2726}
   2727
   2728static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
   2729					      struct device_attribute *attr,
   2730					      char *buf)
   2731{
   2732	return sysfs_emit(buf, "vddgfx\n");
   2733}
   2734
   2735static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
   2736				       struct device_attribute *attr,
   2737				       char *buf)
   2738{
   2739	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2740	u32 vddnb;
   2741	int r, size = sizeof(vddnb);
   2742
   2743	if (amdgpu_in_reset(adev))
   2744		return -EPERM;
   2745	if (adev->in_suspend && !adev->in_runpm)
   2746		return -EPERM;
   2747
   2748	/* only APUs have vddnb */
   2749	if  (!(adev->flags & AMD_IS_APU))
   2750		return -EINVAL;
   2751
   2752	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2753	if (r < 0) {
   2754		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2755		return r;
   2756	}
   2757
   2758	/* get the voltage */
   2759	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
   2760				   (void *)&vddnb, &size);
   2761
   2762	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2763	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2764
   2765	if (r)
   2766		return r;
   2767
   2768	return sysfs_emit(buf, "%d\n", vddnb);
   2769}
   2770
   2771static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
   2772					      struct device_attribute *attr,
   2773					      char *buf)
   2774{
   2775	return sysfs_emit(buf, "vddnb\n");
   2776}
   2777
   2778static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
   2779					   struct device_attribute *attr,
   2780					   char *buf)
   2781{
   2782	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2783	u32 query = 0;
   2784	int r, size = sizeof(u32);
   2785	unsigned uw;
   2786
   2787	if (amdgpu_in_reset(adev))
   2788		return -EPERM;
   2789	if (adev->in_suspend && !adev->in_runpm)
   2790		return -EPERM;
   2791
   2792	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2793	if (r < 0) {
   2794		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2795		return r;
   2796	}
   2797
   2798	/* get the voltage */
   2799	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
   2800				   (void *)&query, &size);
   2801
   2802	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2803	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2804
   2805	if (r)
   2806		return r;
   2807
   2808	/* convert to microwatts */
   2809	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
   2810
   2811	return sysfs_emit(buf, "%u\n", uw);
   2812}
   2813
   2814static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
   2815					 struct device_attribute *attr,
   2816					 char *buf)
   2817{
   2818	return sysfs_emit(buf, "%i\n", 0);
   2819}
   2820
   2821
   2822static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
   2823					struct device_attribute *attr,
   2824					char *buf,
   2825					enum pp_power_limit_level pp_limit_level)
   2826{
   2827	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2828	enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
   2829	uint32_t limit;
   2830	ssize_t size;
   2831	int r;
   2832
   2833	if (amdgpu_in_reset(adev))
   2834		return -EPERM;
   2835	if (adev->in_suspend && !adev->in_runpm)
   2836		return -EPERM;
   2837
   2838	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2839	if (r < 0) {
   2840		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2841		return r;
   2842	}
   2843
   2844	r = amdgpu_dpm_get_power_limit(adev, &limit,
   2845				      pp_limit_level, power_type);
   2846
   2847	if (!r)
   2848		size = sysfs_emit(buf, "%u\n", limit * 1000000);
   2849	else
   2850		size = sysfs_emit(buf, "\n");
   2851
   2852	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2853	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2854
   2855	return size;
   2856}
   2857
   2858
   2859static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
   2860					 struct device_attribute *attr,
   2861					 char *buf)
   2862{
   2863	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
   2864
   2865}
   2866
   2867static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
   2868					 struct device_attribute *attr,
   2869					 char *buf)
   2870{
   2871	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
   2872
   2873}
   2874
   2875static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
   2876					 struct device_attribute *attr,
   2877					 char *buf)
   2878{
   2879	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
   2880
   2881}
   2882
   2883static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
   2884					 struct device_attribute *attr,
   2885					 char *buf)
   2886{
   2887	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2888	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
   2889
   2890	if (gc_ver == IP_VERSION(10, 3, 1))
   2891		return sysfs_emit(buf, "%s\n",
   2892				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
   2893				  "fastPPT" : "slowPPT");
   2894	else
   2895		return sysfs_emit(buf, "PPT\n");
   2896}
   2897
   2898static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
   2899		struct device_attribute *attr,
   2900		const char *buf,
   2901		size_t count)
   2902{
   2903	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2904	int limit_type = to_sensor_dev_attr(attr)->index;
   2905	int err;
   2906	u32 value;
   2907
   2908	if (amdgpu_in_reset(adev))
   2909		return -EPERM;
   2910	if (adev->in_suspend && !adev->in_runpm)
   2911		return -EPERM;
   2912
   2913	if (amdgpu_sriov_vf(adev))
   2914		return -EINVAL;
   2915
   2916	err = kstrtou32(buf, 10, &value);
   2917	if (err)
   2918		return err;
   2919
   2920	value = value / 1000000; /* convert to Watt */
   2921	value |= limit_type << 24;
   2922
   2923	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2924	if (err < 0) {
   2925		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2926		return err;
   2927	}
   2928
   2929	err = amdgpu_dpm_set_power_limit(adev, value);
   2930
   2931	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2932	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2933
   2934	if (err)
   2935		return err;
   2936
   2937	return count;
   2938}
   2939
   2940static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
   2941				      struct device_attribute *attr,
   2942				      char *buf)
   2943{
   2944	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2945	uint32_t sclk;
   2946	int r, size = sizeof(sclk);
   2947
   2948	if (amdgpu_in_reset(adev))
   2949		return -EPERM;
   2950	if (adev->in_suspend && !adev->in_runpm)
   2951		return -EPERM;
   2952
   2953	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2954	if (r < 0) {
   2955		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2956		return r;
   2957	}
   2958
   2959	/* get the sclk */
   2960	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
   2961				   (void *)&sclk, &size);
   2962
   2963	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   2964	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2965
   2966	if (r)
   2967		return r;
   2968
   2969	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
   2970}
   2971
   2972static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
   2973					    struct device_attribute *attr,
   2974					    char *buf)
   2975{
   2976	return sysfs_emit(buf, "sclk\n");
   2977}
   2978
   2979static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
   2980				      struct device_attribute *attr,
   2981				      char *buf)
   2982{
   2983	struct amdgpu_device *adev = dev_get_drvdata(dev);
   2984	uint32_t mclk;
   2985	int r, size = sizeof(mclk);
   2986
   2987	if (amdgpu_in_reset(adev))
   2988		return -EPERM;
   2989	if (adev->in_suspend && !adev->in_runpm)
   2990		return -EPERM;
   2991
   2992	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
   2993	if (r < 0) {
   2994		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   2995		return r;
   2996	}
   2997
   2998	/* get the sclk */
   2999	r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
   3000				   (void *)&mclk, &size);
   3001
   3002	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
   3003	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
   3004
   3005	if (r)
   3006		return r;
   3007
   3008	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
   3009}
   3010
   3011static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
   3012					    struct device_attribute *attr,
   3013					    char *buf)
   3014{
   3015	return sysfs_emit(buf, "mclk\n");
   3016}
   3017
   3018/**
   3019 * DOC: hwmon
   3020 *
   3021 * The amdgpu driver exposes the following sensor interfaces:
   3022 *
   3023 * - GPU temperature (via the on-die sensor)
   3024 *
   3025 * - GPU voltage
   3026 *
   3027 * - Northbridge voltage (APUs only)
   3028 *
   3029 * - GPU power
   3030 *
   3031 * - GPU fan
   3032 *
   3033 * - GPU gfx/compute engine clock
   3034 *
   3035 * - GPU memory clock (dGPU only)
   3036 *
   3037 * hwmon interfaces for GPU temperature:
   3038 *
   3039 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
   3040 *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
   3041 *
   3042 * - temp[1-3]_label: temperature channel label
   3043 *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
   3044 *
   3045 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
   3046 *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
   3047 *
   3048 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
   3049 *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
   3050 *
   3051 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
   3052 *   - these are supported on SOC15 dGPUs only
   3053 *
   3054 * hwmon interfaces for GPU voltage:
   3055 *
   3056 * - in0_input: the voltage on the GPU in millivolts
   3057 *
   3058 * - in1_input: the voltage on the Northbridge in millivolts
   3059 *
   3060 * hwmon interfaces for GPU power:
   3061 *
   3062 * - power1_average: average power used by the GPU in microWatts
   3063 *
   3064 * - power1_cap_min: minimum cap supported in microWatts
   3065 *
   3066 * - power1_cap_max: maximum cap supported in microWatts
   3067 *
   3068 * - power1_cap: selected power cap in microWatts
   3069 *
   3070 * hwmon interfaces for GPU fan:
   3071 *
   3072 * - pwm1: pulse width modulation fan level (0-255)
   3073 *
   3074 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
   3075 *
   3076 * - pwm1_min: pulse width modulation fan control minimum level (0)
   3077 *
   3078 * - pwm1_max: pulse width modulation fan control maximum level (255)
   3079 *
   3080 * - fan1_min: a minimum value Unit: revolution/min (RPM)
   3081 *
   3082 * - fan1_max: a maximum value Unit: revolution/max (RPM)
   3083 *
   3084 * - fan1_input: fan speed in RPM
   3085 *
   3086 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
   3087 *
   3088 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
   3089 *
   3090 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
   3091 *       That will get the former one overridden.
   3092 *
   3093 * hwmon interfaces for GPU clocks:
   3094 *
   3095 * - freq1_input: the gfx/compute clock in hertz
   3096 *
   3097 * - freq2_input: the memory clock in hertz
   3098 *
   3099 * You can use hwmon tools like sensors to view this information on your system.
   3100 *
   3101 */
   3102
   3103static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
   3104static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
   3105static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
   3106static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
   3107static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
   3108static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
   3109static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
   3110static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
   3111static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
   3112static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
   3113static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
   3114static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
   3115static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
   3116static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
   3117static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
   3118static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
   3119static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
   3120static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
   3121static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
   3122static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
   3123static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
   3124static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
   3125static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
   3126static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
   3127static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
   3128static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
   3129static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
   3130static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
   3131static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
   3132static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
   3133static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
   3134static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
   3135static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
   3136static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
   3137static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
   3138static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
   3139static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
   3140static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
   3141static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
   3142static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
   3143static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
   3144static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
   3145static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
   3146static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
   3147
   3148static struct attribute *hwmon_attributes[] = {
   3149	&sensor_dev_attr_temp1_input.dev_attr.attr,
   3150	&sensor_dev_attr_temp1_crit.dev_attr.attr,
   3151	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
   3152	&sensor_dev_attr_temp2_input.dev_attr.attr,
   3153	&sensor_dev_attr_temp2_crit.dev_attr.attr,
   3154	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
   3155	&sensor_dev_attr_temp3_input.dev_attr.attr,
   3156	&sensor_dev_attr_temp3_crit.dev_attr.attr,
   3157	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
   3158	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
   3159	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
   3160	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
   3161	&sensor_dev_attr_temp1_label.dev_attr.attr,
   3162	&sensor_dev_attr_temp2_label.dev_attr.attr,
   3163	&sensor_dev_attr_temp3_label.dev_attr.attr,
   3164	&sensor_dev_attr_pwm1.dev_attr.attr,
   3165	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
   3166	&sensor_dev_attr_pwm1_min.dev_attr.attr,
   3167	&sensor_dev_attr_pwm1_max.dev_attr.attr,
   3168	&sensor_dev_attr_fan1_input.dev_attr.attr,
   3169	&sensor_dev_attr_fan1_min.dev_attr.attr,
   3170	&sensor_dev_attr_fan1_max.dev_attr.attr,
   3171	&sensor_dev_attr_fan1_target.dev_attr.attr,
   3172	&sensor_dev_attr_fan1_enable.dev_attr.attr,
   3173	&sensor_dev_attr_in0_input.dev_attr.attr,
   3174	&sensor_dev_attr_in0_label.dev_attr.attr,
   3175	&sensor_dev_attr_in1_input.dev_attr.attr,
   3176	&sensor_dev_attr_in1_label.dev_attr.attr,
   3177	&sensor_dev_attr_power1_average.dev_attr.attr,
   3178	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
   3179	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
   3180	&sensor_dev_attr_power1_cap.dev_attr.attr,
   3181	&sensor_dev_attr_power1_cap_default.dev_attr.attr,
   3182	&sensor_dev_attr_power1_label.dev_attr.attr,
   3183	&sensor_dev_attr_power2_average.dev_attr.attr,
   3184	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
   3185	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
   3186	&sensor_dev_attr_power2_cap.dev_attr.attr,
   3187	&sensor_dev_attr_power2_cap_default.dev_attr.attr,
   3188	&sensor_dev_attr_power2_label.dev_attr.attr,
   3189	&sensor_dev_attr_freq1_input.dev_attr.attr,
   3190	&sensor_dev_attr_freq1_label.dev_attr.attr,
   3191	&sensor_dev_attr_freq2_input.dev_attr.attr,
   3192	&sensor_dev_attr_freq2_label.dev_attr.attr,
   3193	NULL
   3194};
   3195
   3196static umode_t hwmon_attributes_visible(struct kobject *kobj,
   3197					struct attribute *attr, int index)
   3198{
   3199	struct device *dev = kobj_to_dev(kobj);
   3200	struct amdgpu_device *adev = dev_get_drvdata(dev);
   3201	umode_t effective_mode = attr->mode;
   3202	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
   3203
   3204	/* under multi-vf mode, the hwmon attributes are all not supported */
   3205	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
   3206		return 0;
   3207
   3208	/* under pp one vf mode manage of hwmon attributes is not supported */
   3209	if (amdgpu_sriov_is_pp_one_vf(adev))
   3210		effective_mode &= ~S_IWUSR;
   3211
   3212	/* Skip fan attributes if fan is not present */
   3213	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
   3214	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
   3215	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
   3216	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
   3217	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
   3218	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
   3219	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
   3220	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
   3221	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
   3222		return 0;
   3223
   3224	/* Skip fan attributes on APU */
   3225	if ((adev->flags & AMD_IS_APU) &&
   3226	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
   3227	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
   3228	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
   3229	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
   3230	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
   3231	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
   3232	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
   3233	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
   3234	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
   3235		return 0;
   3236
   3237	/* Skip crit temp on APU */
   3238	if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
   3239	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
   3240	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
   3241		return 0;
   3242
   3243	/* Skip limit attributes if DPM is not enabled */
   3244	if (!adev->pm.dpm_enabled &&
   3245	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
   3246	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
   3247	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
   3248	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
   3249	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
   3250	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
   3251	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
   3252	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
   3253	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
   3254	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
   3255	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
   3256		return 0;
   3257
   3258	/* mask fan attributes if we have no bindings for this asic to expose */
   3259	if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
   3260	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
   3261	    ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
   3262	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
   3263		effective_mode &= ~S_IRUGO;
   3264
   3265	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
   3266	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
   3267	      ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
   3268	      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
   3269		effective_mode &= ~S_IWUSR;
   3270
   3271	/* not implemented yet for GC 10.3.1 APUs */
   3272	if (((adev->family == AMDGPU_FAMILY_SI) ||
   3273	     ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)))) &&
   3274	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
   3275	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
   3276	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
   3277	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
   3278		return 0;
   3279
   3280	/* not implemented yet for APUs having <= GC 9.3.0 */
   3281	if (((adev->family == AMDGPU_FAMILY_SI) ||
   3282	     ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
   3283	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
   3284		return 0;
   3285
   3286	/* hide max/min values if we can't both query and manage the fan */
   3287	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
   3288	      (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
   3289	      (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
   3290	      (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
   3291	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
   3292	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
   3293		return 0;
   3294
   3295	if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
   3296	     (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
   3297	     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
   3298	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
   3299		return 0;
   3300
   3301	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
   3302	     adev->family == AMDGPU_FAMILY_KV) &&	/* not implemented yet */
   3303	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
   3304	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
   3305		return 0;
   3306
   3307	/* only APUs have vddnb */
   3308	if (!(adev->flags & AMD_IS_APU) &&
   3309	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
   3310	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
   3311		return 0;
   3312
   3313	/* no mclk on APUs */
   3314	if ((adev->flags & AMD_IS_APU) &&
   3315	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
   3316	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
   3317		return 0;
   3318
   3319	/* only SOC15 dGPUs support hotspot and mem temperatures */
   3320	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
   3321	    (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
   3322	     attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
   3323	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
   3324	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
   3325	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
   3326	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
   3327	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
   3328	     attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
   3329	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
   3330	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
   3331	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
   3332		return 0;
   3333
   3334	/* only Vangogh has fast PPT limit and power labels */
   3335	if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
   3336	    (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
   3337	     attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
   3338	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
   3339	     attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
   3340	     attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
   3341	     attr == &sensor_dev_attr_power2_label.dev_attr.attr))
   3342		return 0;
   3343
   3344	return effective_mode;
   3345}
   3346
   3347static const struct attribute_group hwmon_attrgroup = {
   3348	.attrs = hwmon_attributes,
   3349	.is_visible = hwmon_attributes_visible,
   3350};
   3351
   3352static const struct attribute_group *hwmon_groups[] = {
   3353	&hwmon_attrgroup,
   3354	NULL
   3355};
   3356
   3357int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
   3358{
   3359	int ret;
   3360	uint32_t mask = 0;
   3361
   3362	if (adev->pm.sysfs_initialized)
   3363		return 0;
   3364
   3365	if (adev->pm.dpm_enabled == 0)
   3366		return 0;
   3367
   3368	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
   3369
   3370	adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
   3371								   DRIVER_NAME, adev,
   3372								   hwmon_groups);
   3373	if (IS_ERR(adev->pm.int_hwmon_dev)) {
   3374		ret = PTR_ERR(adev->pm.int_hwmon_dev);
   3375		dev_err(adev->dev,
   3376			"Unable to register hwmon device: %d\n", ret);
   3377		return ret;
   3378	}
   3379
   3380	switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
   3381	case SRIOV_VF_MODE_ONE_VF:
   3382		mask = ATTR_FLAG_ONEVF;
   3383		break;
   3384	case SRIOV_VF_MODE_MULTI_VF:
   3385		mask = 0;
   3386		break;
   3387	case SRIOV_VF_MODE_BARE_METAL:
   3388	default:
   3389		mask = ATTR_FLAG_MASK_ALL;
   3390		break;
   3391	}
   3392
   3393	ret = amdgpu_device_attr_create_groups(adev,
   3394					       amdgpu_device_attrs,
   3395					       ARRAY_SIZE(amdgpu_device_attrs),
   3396					       mask,
   3397					       &adev->pm.pm_attr_list);
   3398	if (ret)
   3399		return ret;
   3400
   3401	adev->pm.sysfs_initialized = true;
   3402
   3403	return 0;
   3404}
   3405
   3406void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
   3407{
   3408	if (adev->pm.dpm_enabled == 0)
   3409		return;
   3410
   3411	if (adev->pm.int_hwmon_dev)
   3412		hwmon_device_unregister(adev->pm.int_hwmon_dev);
   3413
   3414	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
   3415}
   3416
   3417/*
   3418 * Debugfs info
   3419 */
   3420#if defined(CONFIG_DEBUG_FS)
   3421
   3422static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
   3423					   struct amdgpu_device *adev) {
   3424	uint16_t *p_val;
   3425	uint32_t size;
   3426	int i;
   3427	uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
   3428
   3429	if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
   3430		p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
   3431				GFP_KERNEL);
   3432
   3433		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
   3434					    (void *)p_val, &size)) {
   3435			for (i = 0; i < num_cpu_cores; i++)
   3436				seq_printf(m, "\t%u MHz (CPU%d)\n",
   3437					   *(p_val + i), i);
   3438		}
   3439
   3440		kfree(p_val);
   3441	}
   3442}
   3443
   3444static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
   3445{
   3446	uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
   3447	uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
   3448	uint32_t value;
   3449	uint64_t value64 = 0;
   3450	uint32_t query = 0;
   3451	int size;
   3452
   3453	/* GPU Clocks */
   3454	size = sizeof(value);
   3455	seq_printf(m, "GFX Clocks and Power:\n");
   3456
   3457	amdgpu_debugfs_prints_cpu_info(m, adev);
   3458
   3459	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
   3460		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
   3461	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
   3462		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
   3463	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
   3464		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
   3465	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
   3466		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
   3467	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
   3468		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
   3469	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
   3470		seq_printf(m, "\t%u mV (VDDNB)\n", value);
   3471	size = sizeof(uint32_t);
   3472	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
   3473		seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
   3474	size = sizeof(value);
   3475	seq_printf(m, "\n");
   3476
   3477	/* GPU Temp */
   3478	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
   3479		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
   3480
   3481	/* GPU Load */
   3482	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
   3483		seq_printf(m, "GPU Load: %u %%\n", value);
   3484	/* MEM Load */
   3485	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
   3486		seq_printf(m, "MEM Load: %u %%\n", value);
   3487
   3488	seq_printf(m, "\n");
   3489
   3490	/* SMC feature mask */
   3491	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
   3492		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
   3493
   3494	/* ASICs greater than CHIP_VEGA20 supports these sensors */
   3495	if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
   3496		/* VCN clocks */
   3497		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
   3498			if (!value) {
   3499				seq_printf(m, "VCN: Disabled\n");
   3500			} else {
   3501				seq_printf(m, "VCN: Enabled\n");
   3502				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
   3503					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
   3504				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
   3505					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
   3506			}
   3507		}
   3508		seq_printf(m, "\n");
   3509	} else {
   3510		/* UVD clocks */
   3511		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
   3512			if (!value) {
   3513				seq_printf(m, "UVD: Disabled\n");
   3514			} else {
   3515				seq_printf(m, "UVD: Enabled\n");
   3516				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
   3517					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
   3518				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
   3519					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
   3520			}
   3521		}
   3522		seq_printf(m, "\n");
   3523
   3524		/* VCE clocks */
   3525		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
   3526			if (!value) {
   3527				seq_printf(m, "VCE: Disabled\n");
   3528			} else {
   3529				seq_printf(m, "VCE: Enabled\n");
   3530				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
   3531					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
   3532			}
   3533		}
   3534	}
   3535
   3536	return 0;
   3537}
   3538
   3539static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
   3540{
   3541	int i;
   3542
   3543	for (i = 0; clocks[i].flag; i++)
   3544		seq_printf(m, "\t%s: %s\n", clocks[i].name,
   3545			   (flags & clocks[i].flag) ? "On" : "Off");
   3546}
   3547
   3548static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
   3549{
   3550	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
   3551	struct drm_device *dev = adev_to_drm(adev);
   3552	u64 flags = 0;
   3553	int r;
   3554
   3555	if (amdgpu_in_reset(adev))
   3556		return -EPERM;
   3557	if (adev->in_suspend && !adev->in_runpm)
   3558		return -EPERM;
   3559
   3560	r = pm_runtime_get_sync(dev->dev);
   3561	if (r < 0) {
   3562		pm_runtime_put_autosuspend(dev->dev);
   3563		return r;
   3564	}
   3565
   3566	if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
   3567		r = amdgpu_debugfs_pm_info_pp(m, adev);
   3568		if (r)
   3569			goto out;
   3570	}
   3571
   3572	amdgpu_device_ip_get_clockgating_state(adev, &flags);
   3573
   3574	seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
   3575	amdgpu_parse_cg_state(m, flags);
   3576	seq_printf(m, "\n");
   3577
   3578out:
   3579	pm_runtime_mark_last_busy(dev->dev);
   3580	pm_runtime_put_autosuspend(dev->dev);
   3581
   3582	return r;
   3583}
   3584
   3585DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
   3586
   3587/*
   3588 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
   3589 *
   3590 * Reads debug memory region allocated to PMFW
   3591 */
   3592static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
   3593					 size_t size, loff_t *pos)
   3594{
   3595	struct amdgpu_device *adev = file_inode(f)->i_private;
   3596	size_t smu_prv_buf_size;
   3597	void *smu_prv_buf;
   3598	int ret = 0;
   3599
   3600	if (amdgpu_in_reset(adev))
   3601		return -EPERM;
   3602	if (adev->in_suspend && !adev->in_runpm)
   3603		return -EPERM;
   3604
   3605	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
   3606	if (ret)
   3607		return ret;
   3608
   3609	if (!smu_prv_buf || !smu_prv_buf_size)
   3610		return -EINVAL;
   3611
   3612	return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
   3613				       smu_prv_buf_size);
   3614}
   3615
   3616static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
   3617	.owner = THIS_MODULE,
   3618	.open = simple_open,
   3619	.read = amdgpu_pm_prv_buffer_read,
   3620	.llseek = default_llseek,
   3621};
   3622
   3623#endif
   3624
   3625void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
   3626{
   3627#if defined(CONFIG_DEBUG_FS)
   3628	struct drm_minor *minor = adev_to_drm(adev)->primary;
   3629	struct dentry *root = minor->debugfs_root;
   3630
   3631	if (!adev->pm.dpm_enabled)
   3632		return;
   3633
   3634	debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
   3635			    &amdgpu_debugfs_pm_info_fops);
   3636
   3637	if (adev->pm.smu_prv_buffer_size > 0)
   3638		debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
   3639					 adev,
   3640					 &amdgpu_debugfs_pm_prv_buffer_fops,
   3641					 adev->pm.smu_prv_buffer_size);
   3642
   3643	amdgpu_dpm_stb_debug_fs_init(adev);
   3644#endif
   3645}