cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

radeon_pm.c (58765B)


      1/*
      2 * Permission is hereby granted, free of charge, to any person obtaining a
      3 * copy of this software and associated documentation files (the "Software"),
      4 * to deal in the Software without restriction, including without limitation
      5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      6 * and/or sell copies of the Software, and to permit persons to whom the
      7 * Software is furnished to do so, subject to the following conditions:
      8 *
      9 * The above copyright notice and this permission notice shall be included in
     10 * all copies or substantial portions of the Software.
     11 *
     12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     18 * OTHER DEALINGS IN THE SOFTWARE.
     19 *
     20 * Authors: Rafał Miłecki <zajec5@gmail.com>
     21 *          Alex Deucher <alexdeucher@gmail.com>
     22 */
     23
     24#include <linux/hwmon-sysfs.h>
     25#include <linux/hwmon.h>
     26#include <linux/pci.h>
     27#include <linux/power_supply.h>
     28
     29#include <drm/drm_vblank.h>
     30
     31#include "atom.h"
     32#include "avivod.h"
     33#include "r600_dpm.h"
     34#include "radeon.h"
     35#include "radeon_pm.h"
     36
     37#define RADEON_IDLE_LOOP_MS 100
     38#define RADEON_RECLOCK_DELAY_MS 200
     39#define RADEON_WAIT_VBLANK_TIMEOUT 200
     40
     41static const char *radeon_pm_state_type_name[5] = {
     42	"",
     43	"Powersave",
     44	"Battery",
     45	"Balanced",
     46	"Performance",
     47};
     48
     49static void radeon_dynpm_idle_work_handler(struct work_struct *work);
     50static void radeon_debugfs_pm_init(struct radeon_device *rdev);
     51static bool radeon_pm_in_vbl(struct radeon_device *rdev);
     52static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
     53static void radeon_pm_update_profile(struct radeon_device *rdev);
     54static void radeon_pm_set_clocks(struct radeon_device *rdev);
     55
     56int radeon_pm_get_type_index(struct radeon_device *rdev,
     57			     enum radeon_pm_state_type ps_type,
     58			     int instance)
     59{
     60	int i;
     61	int found_instance = -1;
     62
     63	for (i = 0; i < rdev->pm.num_power_states; i++) {
     64		if (rdev->pm.power_state[i].type == ps_type) {
     65			found_instance++;
     66			if (found_instance == instance)
     67				return i;
     68		}
     69	}
     70	/* return default if no match */
     71	return rdev->pm.default_power_state_index;
     72}
     73
     74void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
     75{
     76	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
     77		mutex_lock(&rdev->pm.mutex);
     78		if (power_supply_is_system_supplied() > 0)
     79			rdev->pm.dpm.ac_power = true;
     80		else
     81			rdev->pm.dpm.ac_power = false;
     82		if (rdev->family == CHIP_ARUBA) {
     83			if (rdev->asic->dpm.enable_bapm)
     84				radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
     85		}
     86		mutex_unlock(&rdev->pm.mutex);
     87	} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
     88		if (rdev->pm.profile == PM_PROFILE_AUTO) {
     89			mutex_lock(&rdev->pm.mutex);
     90			radeon_pm_update_profile(rdev);
     91			radeon_pm_set_clocks(rdev);
     92			mutex_unlock(&rdev->pm.mutex);
     93		}
     94	}
     95}
     96
     97static void radeon_pm_update_profile(struct radeon_device *rdev)
     98{
     99	switch (rdev->pm.profile) {
    100	case PM_PROFILE_DEFAULT:
    101		rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
    102		break;
    103	case PM_PROFILE_AUTO:
    104		if (power_supply_is_system_supplied() > 0) {
    105			if (rdev->pm.active_crtc_count > 1)
    106				rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
    107			else
    108				rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
    109		} else {
    110			if (rdev->pm.active_crtc_count > 1)
    111				rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
    112			else
    113				rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
    114		}
    115		break;
    116	case PM_PROFILE_LOW:
    117		if (rdev->pm.active_crtc_count > 1)
    118			rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
    119		else
    120			rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
    121		break;
    122	case PM_PROFILE_MID:
    123		if (rdev->pm.active_crtc_count > 1)
    124			rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
    125		else
    126			rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
    127		break;
    128	case PM_PROFILE_HIGH:
    129		if (rdev->pm.active_crtc_count > 1)
    130			rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
    131		else
    132			rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
    133		break;
    134	}
    135
    136	if (rdev->pm.active_crtc_count == 0) {
    137		rdev->pm.requested_power_state_index =
    138			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
    139		rdev->pm.requested_clock_mode_index =
    140			rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
    141	} else {
    142		rdev->pm.requested_power_state_index =
    143			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
    144		rdev->pm.requested_clock_mode_index =
    145			rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
    146	}
    147}
    148
    149static void radeon_unmap_vram_bos(struct radeon_device *rdev)
    150{
    151	struct radeon_bo *bo, *n;
    152
    153	if (list_empty(&rdev->gem.objects))
    154		return;
    155
    156	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
    157		if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
    158			ttm_bo_unmap_virtual(&bo->tbo);
    159	}
    160}
    161
    162static void radeon_sync_with_vblank(struct radeon_device *rdev)
    163{
    164	if (rdev->pm.active_crtcs) {
    165		rdev->pm.vblank_sync = false;
    166		wait_event_timeout(
    167			rdev->irq.vblank_queue, rdev->pm.vblank_sync,
    168			msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
    169	}
    170}
    171
    172static void radeon_set_power_state(struct radeon_device *rdev)
    173{
    174	u32 sclk, mclk;
    175	bool misc_after = false;
    176
    177	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
    178	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
    179		return;
    180
    181	if (radeon_gui_idle(rdev)) {
    182		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    183			clock_info[rdev->pm.requested_clock_mode_index].sclk;
    184		if (sclk > rdev->pm.default_sclk)
    185			sclk = rdev->pm.default_sclk;
    186
    187		/* starting with BTC, there is one state that is used for both
    188		 * MH and SH.  Difference is that we always use the high clock index for
    189		 * mclk and vddci.
    190		 */
    191		if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
    192		    (rdev->family >= CHIP_BARTS) &&
    193		    rdev->pm.active_crtc_count &&
    194		    ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
    195		     (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
    196			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    197				clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
    198		else
    199			mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
    200				clock_info[rdev->pm.requested_clock_mode_index].mclk;
    201
    202		if (mclk > rdev->pm.default_mclk)
    203			mclk = rdev->pm.default_mclk;
    204
    205		/* upvolt before raising clocks, downvolt after lowering clocks */
    206		if (sclk < rdev->pm.current_sclk)
    207			misc_after = true;
    208
    209		radeon_sync_with_vblank(rdev);
    210
    211		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
    212			if (!radeon_pm_in_vbl(rdev))
    213				return;
    214		}
    215
    216		radeon_pm_prepare(rdev);
    217
    218		if (!misc_after)
    219			/* voltage, pcie lanes, etc.*/
    220			radeon_pm_misc(rdev);
    221
    222		/* set engine clock */
    223		if (sclk != rdev->pm.current_sclk) {
    224			radeon_pm_debug_check_in_vbl(rdev, false);
    225			radeon_set_engine_clock(rdev, sclk);
    226			radeon_pm_debug_check_in_vbl(rdev, true);
    227			rdev->pm.current_sclk = sclk;
    228			DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
    229		}
    230
    231		/* set memory clock */
    232		if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
    233			radeon_pm_debug_check_in_vbl(rdev, false);
    234			radeon_set_memory_clock(rdev, mclk);
    235			radeon_pm_debug_check_in_vbl(rdev, true);
    236			rdev->pm.current_mclk = mclk;
    237			DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
    238		}
    239
    240		if (misc_after)
    241			/* voltage, pcie lanes, etc.*/
    242			radeon_pm_misc(rdev);
    243
    244		radeon_pm_finish(rdev);
    245
    246		rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
    247		rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
    248	} else
    249		DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
    250}
    251
    252static void radeon_pm_set_clocks(struct radeon_device *rdev)
    253{
    254	struct drm_crtc *crtc;
    255	int i, r;
    256
    257	/* no need to take locks, etc. if nothing's going to change */
    258	if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
    259	    (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
    260		return;
    261
    262	down_write(&rdev->pm.mclk_lock);
    263	mutex_lock(&rdev->ring_lock);
    264
    265	/* wait for the rings to drain */
    266	for (i = 0; i < RADEON_NUM_RINGS; i++) {
    267		struct radeon_ring *ring = &rdev->ring[i];
    268		if (!ring->ready) {
    269			continue;
    270		}
    271		r = radeon_fence_wait_empty(rdev, i);
    272		if (r) {
    273			/* needs a GPU reset dont reset here */
    274			mutex_unlock(&rdev->ring_lock);
    275			up_write(&rdev->pm.mclk_lock);
    276			return;
    277		}
    278	}
    279
    280	radeon_unmap_vram_bos(rdev);
    281
    282	if (rdev->irq.installed) {
    283		i = 0;
    284		drm_for_each_crtc(crtc, rdev->ddev) {
    285			if (rdev->pm.active_crtcs & (1 << i)) {
    286				/* This can fail if a modeset is in progress */
    287				if (drm_crtc_vblank_get(crtc) == 0)
    288					rdev->pm.req_vblank |= (1 << i);
    289				else
    290					DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
    291							 i);
    292			}
    293			i++;
    294		}
    295	}
    296
    297	radeon_set_power_state(rdev);
    298
    299	if (rdev->irq.installed) {
    300		i = 0;
    301		drm_for_each_crtc(crtc, rdev->ddev) {
    302			if (rdev->pm.req_vblank & (1 << i)) {
    303				rdev->pm.req_vblank &= ~(1 << i);
    304				drm_crtc_vblank_put(crtc);
    305			}
    306			i++;
    307		}
    308	}
    309
    310	/* update display watermarks based on new power state */
    311	radeon_update_bandwidth_info(rdev);
    312	if (rdev->pm.active_crtc_count)
    313		radeon_bandwidth_update(rdev);
    314
    315	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
    316
    317	mutex_unlock(&rdev->ring_lock);
    318	up_write(&rdev->pm.mclk_lock);
    319}
    320
    321static void radeon_pm_print_states(struct radeon_device *rdev)
    322{
    323	int i, j;
    324	struct radeon_power_state *power_state;
    325	struct radeon_pm_clock_info *clock_info;
    326
    327	DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
    328	for (i = 0; i < rdev->pm.num_power_states; i++) {
    329		power_state = &rdev->pm.power_state[i];
    330		DRM_DEBUG_DRIVER("State %d: %s\n", i,
    331			radeon_pm_state_type_name[power_state->type]);
    332		if (i == rdev->pm.default_power_state_index)
    333			DRM_DEBUG_DRIVER("\tDefault");
    334		if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
    335			DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
    336		if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
    337			DRM_DEBUG_DRIVER("\tSingle display only\n");
    338		DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
    339		for (j = 0; j < power_state->num_clock_modes; j++) {
    340			clock_info = &(power_state->clock_info[j]);
    341			if (rdev->flags & RADEON_IS_IGP)
    342				DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
    343						 j,
    344						 clock_info->sclk * 10);
    345			else
    346				DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
    347						 j,
    348						 clock_info->sclk * 10,
    349						 clock_info->mclk * 10,
    350						 clock_info->voltage.voltage);
    351		}
    352	}
    353}
    354
    355static ssize_t radeon_get_pm_profile(struct device *dev,
    356				     struct device_attribute *attr,
    357				     char *buf)
    358{
    359	struct drm_device *ddev = dev_get_drvdata(dev);
    360	struct radeon_device *rdev = ddev->dev_private;
    361	int cp = rdev->pm.profile;
    362
    363	return sysfs_emit(buf, "%s\n", (cp == PM_PROFILE_AUTO) ? "auto" :
    364			  (cp == PM_PROFILE_LOW) ? "low" :
    365			  (cp == PM_PROFILE_MID) ? "mid" :
    366			  (cp == PM_PROFILE_HIGH) ? "high" : "default");
    367}
    368
    369static ssize_t radeon_set_pm_profile(struct device *dev,
    370				     struct device_attribute *attr,
    371				     const char *buf,
    372				     size_t count)
    373{
    374	struct drm_device *ddev = dev_get_drvdata(dev);
    375	struct radeon_device *rdev = ddev->dev_private;
    376
    377	/* Can't set profile when the card is off */
    378	if  ((rdev->flags & RADEON_IS_PX) &&
    379	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    380		return -EINVAL;
    381
    382	mutex_lock(&rdev->pm.mutex);
    383	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
    384		if (strncmp("default", buf, strlen("default")) == 0)
    385			rdev->pm.profile = PM_PROFILE_DEFAULT;
    386		else if (strncmp("auto", buf, strlen("auto")) == 0)
    387			rdev->pm.profile = PM_PROFILE_AUTO;
    388		else if (strncmp("low", buf, strlen("low")) == 0)
    389			rdev->pm.profile = PM_PROFILE_LOW;
    390		else if (strncmp("mid", buf, strlen("mid")) == 0)
    391			rdev->pm.profile = PM_PROFILE_MID;
    392		else if (strncmp("high", buf, strlen("high")) == 0)
    393			rdev->pm.profile = PM_PROFILE_HIGH;
    394		else {
    395			count = -EINVAL;
    396			goto fail;
    397		}
    398		radeon_pm_update_profile(rdev);
    399		radeon_pm_set_clocks(rdev);
    400	} else
    401		count = -EINVAL;
    402
    403fail:
    404	mutex_unlock(&rdev->pm.mutex);
    405
    406	return count;
    407}
    408
    409static ssize_t radeon_get_pm_method(struct device *dev,
    410				    struct device_attribute *attr,
    411				    char *buf)
    412{
    413	struct drm_device *ddev = dev_get_drvdata(dev);
    414	struct radeon_device *rdev = ddev->dev_private;
    415	int pm = rdev->pm.pm_method;
    416
    417	return sysfs_emit(buf, "%s\n", (pm == PM_METHOD_DYNPM) ? "dynpm" :
    418			  (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
    419}
    420
    421static ssize_t radeon_set_pm_method(struct device *dev,
    422				    struct device_attribute *attr,
    423				    const char *buf,
    424				    size_t count)
    425{
    426	struct drm_device *ddev = dev_get_drvdata(dev);
    427	struct radeon_device *rdev = ddev->dev_private;
    428
    429	/* Can't set method when the card is off */
    430	if  ((rdev->flags & RADEON_IS_PX) &&
    431	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
    432		count = -EINVAL;
    433		goto fail;
    434	}
    435
    436	/* we don't support the legacy modes with dpm */
    437	if (rdev->pm.pm_method == PM_METHOD_DPM) {
    438		count = -EINVAL;
    439		goto fail;
    440	}
    441
    442	if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
    443		mutex_lock(&rdev->pm.mutex);
    444		rdev->pm.pm_method = PM_METHOD_DYNPM;
    445		rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
    446		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
    447		mutex_unlock(&rdev->pm.mutex);
    448	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
    449		mutex_lock(&rdev->pm.mutex);
    450		/* disable dynpm */
    451		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
    452		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
    453		rdev->pm.pm_method = PM_METHOD_PROFILE;
    454		mutex_unlock(&rdev->pm.mutex);
    455		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
    456	} else {
    457		count = -EINVAL;
    458		goto fail;
    459	}
    460	radeon_pm_compute_clocks(rdev);
    461fail:
    462	return count;
    463}
    464
    465static ssize_t radeon_get_dpm_state(struct device *dev,
    466				    struct device_attribute *attr,
    467				    char *buf)
    468{
    469	struct drm_device *ddev = dev_get_drvdata(dev);
    470	struct radeon_device *rdev = ddev->dev_private;
    471	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
    472
    473	return sysfs_emit(buf, "%s\n",
    474			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
    475			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
    476}
    477
    478static ssize_t radeon_set_dpm_state(struct device *dev,
    479				    struct device_attribute *attr,
    480				    const char *buf,
    481				    size_t count)
    482{
    483	struct drm_device *ddev = dev_get_drvdata(dev);
    484	struct radeon_device *rdev = ddev->dev_private;
    485
    486	mutex_lock(&rdev->pm.mutex);
    487	if (strncmp("battery", buf, strlen("battery")) == 0)
    488		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
    489	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
    490		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
    491	else if (strncmp("performance", buf, strlen("performance")) == 0)
    492		rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
    493	else {
    494		mutex_unlock(&rdev->pm.mutex);
    495		count = -EINVAL;
    496		goto fail;
    497	}
    498	mutex_unlock(&rdev->pm.mutex);
    499
    500	/* Can't set dpm state when the card is off */
    501	if (!(rdev->flags & RADEON_IS_PX) ||
    502	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
    503		radeon_pm_compute_clocks(rdev);
    504
    505fail:
    506	return count;
    507}
    508
    509static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
    510						       struct device_attribute *attr,
    511						       char *buf)
    512{
    513	struct drm_device *ddev = dev_get_drvdata(dev);
    514	struct radeon_device *rdev = ddev->dev_private;
    515	enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
    516
    517	if  ((rdev->flags & RADEON_IS_PX) &&
    518	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    519		return sysfs_emit(buf, "off\n");
    520
    521	return sysfs_emit(buf, "%s\n",
    522			  (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
    523			  (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
    524}
    525
    526static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
    527						       struct device_attribute *attr,
    528						       const char *buf,
    529						       size_t count)
    530{
    531	struct drm_device *ddev = dev_get_drvdata(dev);
    532	struct radeon_device *rdev = ddev->dev_private;
    533	enum radeon_dpm_forced_level level;
    534	int ret = 0;
    535
    536	/* Can't force performance level when the card is off */
    537	if  ((rdev->flags & RADEON_IS_PX) &&
    538	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    539		return -EINVAL;
    540
    541	mutex_lock(&rdev->pm.mutex);
    542	if (strncmp("low", buf, strlen("low")) == 0) {
    543		level = RADEON_DPM_FORCED_LEVEL_LOW;
    544	} else if (strncmp("high", buf, strlen("high")) == 0) {
    545		level = RADEON_DPM_FORCED_LEVEL_HIGH;
    546	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
    547		level = RADEON_DPM_FORCED_LEVEL_AUTO;
    548	} else {
    549		count = -EINVAL;
    550		goto fail;
    551	}
    552	if (rdev->asic->dpm.force_performance_level) {
    553		if (rdev->pm.dpm.thermal_active) {
    554			count = -EINVAL;
    555			goto fail;
    556		}
    557		ret = radeon_dpm_force_performance_level(rdev, level);
    558		if (ret)
    559			count = -EINVAL;
    560	}
    561fail:
    562	mutex_unlock(&rdev->pm.mutex);
    563
    564	return count;
    565}
    566
    567static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
    568					    struct device_attribute *attr,
    569					    char *buf)
    570{
    571	struct radeon_device *rdev = dev_get_drvdata(dev);
    572	u32 pwm_mode = 0;
    573
    574	if (rdev->asic->dpm.fan_ctrl_get_mode)
    575		pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
    576
    577	/* never 0 (full-speed), fuse or smc-controlled always */
    578	return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
    579}
    580
    581static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
    582					    struct device_attribute *attr,
    583					    const char *buf,
    584					    size_t count)
    585{
    586	struct radeon_device *rdev = dev_get_drvdata(dev);
    587	int err;
    588	int value;
    589
    590	if(!rdev->asic->dpm.fan_ctrl_set_mode)
    591		return -EINVAL;
    592
    593	err = kstrtoint(buf, 10, &value);
    594	if (err)
    595		return err;
    596
    597	switch (value) {
    598	case 1: /* manual, percent-based */
    599		rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
    600		break;
    601	default: /* disable */
    602		rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
    603		break;
    604	}
    605
    606	return count;
    607}
    608
    609static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
    610					 struct device_attribute *attr,
    611					 char *buf)
    612{
    613	return sprintf(buf, "%i\n", 0);
    614}
    615
    616static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
    617					 struct device_attribute *attr,
    618					 char *buf)
    619{
    620	return sprintf(buf, "%i\n", 255);
    621}
    622
    623static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
    624				     struct device_attribute *attr,
    625				     const char *buf, size_t count)
    626{
    627	struct radeon_device *rdev = dev_get_drvdata(dev);
    628	int err;
    629	u32 value;
    630
    631	err = kstrtou32(buf, 10, &value);
    632	if (err)
    633		return err;
    634
    635	value = (value * 100) / 255;
    636
    637	err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
    638	if (err)
    639		return err;
    640
    641	return count;
    642}
    643
    644static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
    645				     struct device_attribute *attr,
    646				     char *buf)
    647{
    648	struct radeon_device *rdev = dev_get_drvdata(dev);
    649	int err;
    650	u32 speed;
    651
    652	err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
    653	if (err)
    654		return err;
    655
    656	speed = (speed * 255) / 100;
    657
    658	return sprintf(buf, "%i\n", speed);
    659}
    660
    661static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
    662static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
    663static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
    664static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
    665		   radeon_get_dpm_forced_performance_level,
    666		   radeon_set_dpm_forced_performance_level);
    667
    668static ssize_t radeon_hwmon_show_temp(struct device *dev,
    669				      struct device_attribute *attr,
    670				      char *buf)
    671{
    672	struct radeon_device *rdev = dev_get_drvdata(dev);
    673	struct drm_device *ddev = rdev->ddev;
    674	int temp;
    675
    676	/* Can't get temperature when the card is off */
    677	if  ((rdev->flags & RADEON_IS_PX) &&
    678	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    679		return -EINVAL;
    680
    681	if (rdev->asic->pm.get_temperature)
    682		temp = radeon_get_temperature(rdev);
    683	else
    684		temp = 0;
    685
    686	return sysfs_emit(buf, "%d\n", temp);
    687}
    688
    689static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
    690					     struct device_attribute *attr,
    691					     char *buf)
    692{
    693	struct radeon_device *rdev = dev_get_drvdata(dev);
    694	int hyst = to_sensor_dev_attr(attr)->index;
    695	int temp;
    696
    697	if (hyst)
    698		temp = rdev->pm.dpm.thermal.min_temp;
    699	else
    700		temp = rdev->pm.dpm.thermal.max_temp;
    701
    702	return sysfs_emit(buf, "%d\n", temp);
    703}
    704
    705static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
    706static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
    707static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
    708static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
    709static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
    710static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
    711static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
    712
    713static ssize_t radeon_hwmon_show_sclk(struct device *dev,
    714				      struct device_attribute *attr, char *buf)
    715{
    716	struct radeon_device *rdev = dev_get_drvdata(dev);
    717	struct drm_device *ddev = rdev->ddev;
    718	u32 sclk = 0;
    719
    720	/* Can't get clock frequency when the card is off */
    721	if ((rdev->flags & RADEON_IS_PX) &&
    722	    (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    723		return -EINVAL;
    724
    725	if (rdev->asic->dpm.get_current_sclk)
    726		sclk = radeon_dpm_get_current_sclk(rdev);
    727
    728	/* Value returned by dpm is in 10 KHz units, need to convert it into Hz 
    729	   for hwmon */
    730	sclk *= 10000;
    731
    732	return sysfs_emit(buf, "%u\n", sclk);
    733}
    734
    735static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL,
    736			  0);
    737
    738static ssize_t radeon_hwmon_show_vddc(struct device *dev,
    739				      struct device_attribute *attr, char *buf)
    740{
    741	struct radeon_device *rdev = dev_get_drvdata(dev);
    742	struct drm_device *ddev = rdev->ddev;
    743	u16 vddc = 0;
    744
    745	/* Can't get vddc when the card is off */
    746	if ((rdev->flags & RADEON_IS_PX) &&
    747		(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
    748		return -EINVAL;
    749
    750	if (rdev->asic->dpm.get_current_vddc)
    751		vddc = rdev->asic->dpm.get_current_vddc(rdev);
    752
    753	return sysfs_emit(buf, "%u\n", vddc);
    754}
    755
    756static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, radeon_hwmon_show_vddc, NULL,
    757			  0);
    758
    759static struct attribute *hwmon_attributes[] = {
    760	&sensor_dev_attr_temp1_input.dev_attr.attr,
    761	&sensor_dev_attr_temp1_crit.dev_attr.attr,
    762	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
    763	&sensor_dev_attr_pwm1.dev_attr.attr,
    764	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
    765	&sensor_dev_attr_pwm1_min.dev_attr.attr,
    766	&sensor_dev_attr_pwm1_max.dev_attr.attr,
    767	&sensor_dev_attr_freq1_input.dev_attr.attr,
    768	&sensor_dev_attr_in0_input.dev_attr.attr,
    769	NULL
    770};
    771
    772static umode_t hwmon_attributes_visible(struct kobject *kobj,
    773					struct attribute *attr, int index)
    774{
    775	struct device *dev = kobj_to_dev(kobj);
    776	struct radeon_device *rdev = dev_get_drvdata(dev);
    777	umode_t effective_mode = attr->mode;
    778
    779	/* Skip attributes if DPM is not enabled */
    780	if (rdev->pm.pm_method != PM_METHOD_DPM &&
    781	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
    782	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
    783	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
    784	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
    785	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    786	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
    787	     attr == &sensor_dev_attr_freq1_input.dev_attr.attr ||
    788	     attr == &sensor_dev_attr_in0_input.dev_attr.attr))
    789		return 0;
    790
    791	/* Skip vddc attribute if get_current_vddc is not implemented */
    792	if(attr == &sensor_dev_attr_in0_input.dev_attr.attr &&
    793		!rdev->asic->dpm.get_current_vddc)
    794		return 0;
    795
    796	/* Skip fan attributes if fan is not present */
    797	if (rdev->pm.no_fan &&
    798	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
    799	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
    800	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    801	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
    802		return 0;
    803
    804	/* mask fan attributes if we have no bindings for this asic to expose */
    805	if ((!rdev->asic->dpm.get_fan_speed_percent &&
    806	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
    807	    (!rdev->asic->dpm.fan_ctrl_get_mode &&
    808	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
    809		effective_mode &= ~S_IRUGO;
    810
    811	if ((!rdev->asic->dpm.set_fan_speed_percent &&
    812	     attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
    813	    (!rdev->asic->dpm.fan_ctrl_set_mode &&
    814	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
    815		effective_mode &= ~S_IWUSR;
    816
    817	/* hide max/min values if we can't both query and manage the fan */
    818	if ((!rdev->asic->dpm.set_fan_speed_percent &&
    819	     !rdev->asic->dpm.get_fan_speed_percent) &&
    820	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
    821	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
    822		return 0;
    823
    824	return effective_mode;
    825}
    826
    827static const struct attribute_group hwmon_attrgroup = {
    828	.attrs = hwmon_attributes,
    829	.is_visible = hwmon_attributes_visible,
    830};
    831
    832static const struct attribute_group *hwmon_groups[] = {
    833	&hwmon_attrgroup,
    834	NULL
    835};
    836
    837static int radeon_hwmon_init(struct radeon_device *rdev)
    838{
    839	int err = 0;
    840
    841	switch (rdev->pm.int_thermal_type) {
    842	case THERMAL_TYPE_RV6XX:
    843	case THERMAL_TYPE_RV770:
    844	case THERMAL_TYPE_EVERGREEN:
    845	case THERMAL_TYPE_NI:
    846	case THERMAL_TYPE_SUMO:
    847	case THERMAL_TYPE_SI:
    848	case THERMAL_TYPE_CI:
    849	case THERMAL_TYPE_KV:
    850		if (rdev->asic->pm.get_temperature == NULL)
    851			return err;
    852		rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
    853									   "radeon", rdev,
    854									   hwmon_groups);
    855		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
    856			err = PTR_ERR(rdev->pm.int_hwmon_dev);
    857			dev_err(rdev->dev,
    858				"Unable to register hwmon device: %d\n", err);
    859		}
    860		break;
    861	default:
    862		break;
    863	}
    864
    865	return err;
    866}
    867
    868static void radeon_hwmon_fini(struct radeon_device *rdev)
    869{
    870	if (rdev->pm.int_hwmon_dev)
    871		hwmon_device_unregister(rdev->pm.int_hwmon_dev);
    872}
    873
    874static void radeon_dpm_thermal_work_handler(struct work_struct *work)
    875{
    876	struct radeon_device *rdev =
    877		container_of(work, struct radeon_device,
    878			     pm.dpm.thermal.work);
    879	/* switch to the thermal state */
    880	enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
    881
    882	if (!rdev->pm.dpm_enabled)
    883		return;
    884
    885	if (rdev->asic->pm.get_temperature) {
    886		int temp = radeon_get_temperature(rdev);
    887
    888		if (temp < rdev->pm.dpm.thermal.min_temp)
    889			/* switch back the user state */
    890			dpm_state = rdev->pm.dpm.user_state;
    891	} else {
    892		if (rdev->pm.dpm.thermal.high_to_low)
    893			/* switch back the user state */
    894			dpm_state = rdev->pm.dpm.user_state;
    895	}
    896	mutex_lock(&rdev->pm.mutex);
    897	if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
    898		rdev->pm.dpm.thermal_active = true;
    899	else
    900		rdev->pm.dpm.thermal_active = false;
    901	rdev->pm.dpm.state = dpm_state;
    902	mutex_unlock(&rdev->pm.mutex);
    903
    904	radeon_pm_compute_clocks(rdev);
    905}
    906
    907static bool radeon_dpm_single_display(struct radeon_device *rdev)
    908{
    909	bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
    910		true : false;
    911
    912	/* check if the vblank period is too short to adjust the mclk */
    913	if (single_display && rdev->asic->dpm.vblank_too_short) {
    914		if (radeon_dpm_vblank_too_short(rdev))
    915			single_display = false;
    916	}
    917
    918	/* 120hz tends to be problematic even if they are under the
    919	 * vblank limit.
    920	 */
    921	if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
    922		single_display = false;
    923
    924	return single_display;
    925}
    926
    927static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
    928						     enum radeon_pm_state_type dpm_state)
    929{
    930	int i;
    931	struct radeon_ps *ps;
    932	u32 ui_class;
    933	bool single_display = radeon_dpm_single_display(rdev);
    934
    935	/* certain older asics have a separare 3D performance state,
    936	 * so try that first if the user selected performance
    937	 */
    938	if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
    939		dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
    940	/* balanced states don't exist at the moment */
    941	if (dpm_state == POWER_STATE_TYPE_BALANCED)
    942		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
    943
    944restart_search:
    945	/* Pick the best power state based on current conditions */
    946	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
    947		ps = &rdev->pm.dpm.ps[i];
    948		ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
    949		switch (dpm_state) {
    950		/* user states */
    951		case POWER_STATE_TYPE_BATTERY:
    952			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
    953				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    954					if (single_display)
    955						return ps;
    956				} else
    957					return ps;
    958			}
    959			break;
    960		case POWER_STATE_TYPE_BALANCED:
    961			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
    962				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    963					if (single_display)
    964						return ps;
    965				} else
    966					return ps;
    967			}
    968			break;
    969		case POWER_STATE_TYPE_PERFORMANCE:
    970			if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
    971				if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
    972					if (single_display)
    973						return ps;
    974				} else
    975					return ps;
    976			}
    977			break;
    978		/* internal states */
    979		case POWER_STATE_TYPE_INTERNAL_UVD:
    980			if (rdev->pm.dpm.uvd_ps)
    981				return rdev->pm.dpm.uvd_ps;
    982			else
    983				break;
    984		case POWER_STATE_TYPE_INTERNAL_UVD_SD:
    985			if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
    986				return ps;
    987			break;
    988		case POWER_STATE_TYPE_INTERNAL_UVD_HD:
    989			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
    990				return ps;
    991			break;
    992		case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
    993			if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
    994				return ps;
    995			break;
    996		case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
    997			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
    998				return ps;
    999			break;
   1000		case POWER_STATE_TYPE_INTERNAL_BOOT:
   1001			return rdev->pm.dpm.boot_ps;
   1002		case POWER_STATE_TYPE_INTERNAL_THERMAL:
   1003			if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
   1004				return ps;
   1005			break;
   1006		case POWER_STATE_TYPE_INTERNAL_ACPI:
   1007			if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
   1008				return ps;
   1009			break;
   1010		case POWER_STATE_TYPE_INTERNAL_ULV:
   1011			if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
   1012				return ps;
   1013			break;
   1014		case POWER_STATE_TYPE_INTERNAL_3DPERF:
   1015			if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
   1016				return ps;
   1017			break;
   1018		default:
   1019			break;
   1020		}
   1021	}
   1022	/* use a fallback state if we didn't match */
   1023	switch (dpm_state) {
   1024	case POWER_STATE_TYPE_INTERNAL_UVD_SD:
   1025		dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
   1026		goto restart_search;
   1027	case POWER_STATE_TYPE_INTERNAL_UVD_HD:
   1028	case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
   1029	case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
   1030		if (rdev->pm.dpm.uvd_ps) {
   1031			return rdev->pm.dpm.uvd_ps;
   1032		} else {
   1033			dpm_state = POWER_STATE_TYPE_PERFORMANCE;
   1034			goto restart_search;
   1035		}
   1036	case POWER_STATE_TYPE_INTERNAL_THERMAL:
   1037		dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
   1038		goto restart_search;
   1039	case POWER_STATE_TYPE_INTERNAL_ACPI:
   1040		dpm_state = POWER_STATE_TYPE_BATTERY;
   1041		goto restart_search;
   1042	case POWER_STATE_TYPE_BATTERY:
   1043	case POWER_STATE_TYPE_BALANCED:
   1044	case POWER_STATE_TYPE_INTERNAL_3DPERF:
   1045		dpm_state = POWER_STATE_TYPE_PERFORMANCE;
   1046		goto restart_search;
   1047	default:
   1048		break;
   1049	}
   1050
   1051	return NULL;
   1052}
   1053
   1054static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
   1055{
   1056	int i;
   1057	struct radeon_ps *ps;
   1058	enum radeon_pm_state_type dpm_state;
   1059	int ret;
   1060	bool single_display = radeon_dpm_single_display(rdev);
   1061
   1062	/* if dpm init failed */
   1063	if (!rdev->pm.dpm_enabled)
   1064		return;
   1065
   1066	if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
   1067		/* add other state override checks here */
   1068		if ((!rdev->pm.dpm.thermal_active) &&
   1069		    (!rdev->pm.dpm.uvd_active))
   1070			rdev->pm.dpm.state = rdev->pm.dpm.user_state;
   1071	}
   1072	dpm_state = rdev->pm.dpm.state;
   1073
   1074	ps = radeon_dpm_pick_power_state(rdev, dpm_state);
   1075	if (ps)
   1076		rdev->pm.dpm.requested_ps = ps;
   1077	else
   1078		return;
   1079
   1080	/* no need to reprogram if nothing changed unless we are on BTC+ */
   1081	if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
   1082		/* vce just modifies an existing state so force a change */
   1083		if (ps->vce_active != rdev->pm.dpm.vce_active)
   1084			goto force;
   1085		/* user has made a display change (such as timing) */
   1086		if (rdev->pm.dpm.single_display != single_display)
   1087			goto force;
   1088		if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
   1089			/* for pre-BTC and APUs if the num crtcs changed but state is the same,
   1090			 * all we need to do is update the display configuration.
   1091			 */
   1092			if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
   1093				/* update display watermarks based on new power state */
   1094				radeon_bandwidth_update(rdev);
   1095				/* update displays */
   1096				radeon_dpm_display_configuration_changed(rdev);
   1097				rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1098				rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1099			}
   1100			return;
   1101		} else {
   1102			/* for BTC+ if the num crtcs hasn't changed and state is the same,
   1103			 * nothing to do, if the num crtcs is > 1 and state is the same,
   1104			 * update display configuration.
   1105			 */
   1106			if (rdev->pm.dpm.new_active_crtcs ==
   1107			    rdev->pm.dpm.current_active_crtcs) {
   1108				return;
   1109			} else {
   1110				if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
   1111				    (rdev->pm.dpm.new_active_crtc_count > 1)) {
   1112					/* update display watermarks based on new power state */
   1113					radeon_bandwidth_update(rdev);
   1114					/* update displays */
   1115					radeon_dpm_display_configuration_changed(rdev);
   1116					rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1117					rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1118					return;
   1119				}
   1120			}
   1121		}
   1122	}
   1123
   1124force:
   1125	if (radeon_dpm == 1) {
   1126		printk("switching from power state:\n");
   1127		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
   1128		printk("switching to power state:\n");
   1129		radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
   1130	}
   1131
   1132	down_write(&rdev->pm.mclk_lock);
   1133	mutex_lock(&rdev->ring_lock);
   1134
   1135	/* update whether vce is active */
   1136	ps->vce_active = rdev->pm.dpm.vce_active;
   1137
   1138	ret = radeon_dpm_pre_set_power_state(rdev);
   1139	if (ret)
   1140		goto done;
   1141
   1142	/* update display watermarks based on new power state */
   1143	radeon_bandwidth_update(rdev);
   1144	/* update displays */
   1145	radeon_dpm_display_configuration_changed(rdev);
   1146
   1147	/* wait for the rings to drain */
   1148	for (i = 0; i < RADEON_NUM_RINGS; i++) {
   1149		struct radeon_ring *ring = &rdev->ring[i];
   1150		if (ring->ready)
   1151			radeon_fence_wait_empty(rdev, i);
   1152	}
   1153
   1154	/* program the new power state */
   1155	radeon_dpm_set_power_state(rdev);
   1156
   1157	/* update current power state */
   1158	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
   1159
   1160	radeon_dpm_post_set_power_state(rdev);
   1161
   1162	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
   1163	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
   1164	rdev->pm.dpm.single_display = single_display;
   1165
   1166	if (rdev->asic->dpm.force_performance_level) {
   1167		if (rdev->pm.dpm.thermal_active) {
   1168			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
   1169			/* force low perf level for thermal */
   1170			radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
   1171			/* save the user's level */
   1172			rdev->pm.dpm.forced_level = level;
   1173		} else {
   1174			/* otherwise, user selected level */
   1175			radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
   1176		}
   1177	}
   1178
   1179done:
   1180	mutex_unlock(&rdev->ring_lock);
   1181	up_write(&rdev->pm.mclk_lock);
   1182}
   1183
   1184void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
   1185{
   1186	enum radeon_pm_state_type dpm_state;
   1187
   1188	if (rdev->asic->dpm.powergate_uvd) {
   1189		mutex_lock(&rdev->pm.mutex);
   1190		/* don't powergate anything if we
   1191		   have active but pause streams */
   1192		enable |= rdev->pm.dpm.sd > 0;
   1193		enable |= rdev->pm.dpm.hd > 0;
   1194		/* enable/disable UVD */
   1195		radeon_dpm_powergate_uvd(rdev, !enable);
   1196		mutex_unlock(&rdev->pm.mutex);
   1197	} else {
   1198		if (enable) {
   1199			mutex_lock(&rdev->pm.mutex);
   1200			rdev->pm.dpm.uvd_active = true;
   1201			/* disable this for now */
   1202#if 0
   1203			if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
   1204				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
   1205			else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
   1206				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
   1207			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
   1208				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
   1209			else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
   1210				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
   1211			else
   1212#endif
   1213				dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
   1214			rdev->pm.dpm.state = dpm_state;
   1215			mutex_unlock(&rdev->pm.mutex);
   1216		} else {
   1217			mutex_lock(&rdev->pm.mutex);
   1218			rdev->pm.dpm.uvd_active = false;
   1219			mutex_unlock(&rdev->pm.mutex);
   1220		}
   1221
   1222		radeon_pm_compute_clocks(rdev);
   1223	}
   1224}
   1225
   1226void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
   1227{
   1228	if (enable) {
   1229		mutex_lock(&rdev->pm.mutex);
   1230		rdev->pm.dpm.vce_active = true;
   1231		/* XXX select vce level based on ring/task */
   1232		rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
   1233		mutex_unlock(&rdev->pm.mutex);
   1234	} else {
   1235		mutex_lock(&rdev->pm.mutex);
   1236		rdev->pm.dpm.vce_active = false;
   1237		mutex_unlock(&rdev->pm.mutex);
   1238	}
   1239
   1240	radeon_pm_compute_clocks(rdev);
   1241}
   1242
   1243static void radeon_pm_suspend_old(struct radeon_device *rdev)
   1244{
   1245	mutex_lock(&rdev->pm.mutex);
   1246	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1247		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
   1248			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
   1249	}
   1250	mutex_unlock(&rdev->pm.mutex);
   1251
   1252	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
   1253}
   1254
   1255static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
   1256{
   1257	mutex_lock(&rdev->pm.mutex);
   1258	/* disable dpm */
   1259	radeon_dpm_disable(rdev);
   1260	/* reset the power state */
   1261	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1262	rdev->pm.dpm_enabled = false;
   1263	mutex_unlock(&rdev->pm.mutex);
   1264}
   1265
   1266void radeon_pm_suspend(struct radeon_device *rdev)
   1267{
   1268	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1269		radeon_pm_suspend_dpm(rdev);
   1270	else
   1271		radeon_pm_suspend_old(rdev);
   1272}
   1273
   1274static void radeon_pm_resume_old(struct radeon_device *rdev)
   1275{
   1276	/* set up the default clocks if the MC ucode is loaded */
   1277	if ((rdev->family >= CHIP_BARTS) &&
   1278	    (rdev->family <= CHIP_CAYMAN) &&
   1279	    rdev->mc_fw) {
   1280		if (rdev->pm.default_vddc)
   1281			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1282						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1283		if (rdev->pm.default_vddci)
   1284			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1285						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1286		if (rdev->pm.default_sclk)
   1287			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1288		if (rdev->pm.default_mclk)
   1289			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1290	}
   1291	/* asic init will reset the default power state */
   1292	mutex_lock(&rdev->pm.mutex);
   1293	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
   1294	rdev->pm.current_clock_mode_index = 0;
   1295	rdev->pm.current_sclk = rdev->pm.default_sclk;
   1296	rdev->pm.current_mclk = rdev->pm.default_mclk;
   1297	if (rdev->pm.power_state) {
   1298		rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
   1299		rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
   1300	}
   1301	if (rdev->pm.pm_method == PM_METHOD_DYNPM
   1302	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
   1303		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1304		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1305				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1306	}
   1307	mutex_unlock(&rdev->pm.mutex);
   1308	radeon_pm_compute_clocks(rdev);
   1309}
   1310
   1311static void radeon_pm_resume_dpm(struct radeon_device *rdev)
   1312{
   1313	int ret;
   1314
   1315	/* asic init will reset to the boot state */
   1316	mutex_lock(&rdev->pm.mutex);
   1317	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1318	radeon_dpm_setup_asic(rdev);
   1319	ret = radeon_dpm_enable(rdev);
   1320	mutex_unlock(&rdev->pm.mutex);
   1321	if (ret)
   1322		goto dpm_resume_fail;
   1323	rdev->pm.dpm_enabled = true;
   1324	return;
   1325
   1326dpm_resume_fail:
   1327	DRM_ERROR("radeon: dpm resume failed\n");
   1328	if ((rdev->family >= CHIP_BARTS) &&
   1329	    (rdev->family <= CHIP_CAYMAN) &&
   1330	    rdev->mc_fw) {
   1331		if (rdev->pm.default_vddc)
   1332			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1333						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1334		if (rdev->pm.default_vddci)
   1335			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1336						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1337		if (rdev->pm.default_sclk)
   1338			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1339		if (rdev->pm.default_mclk)
   1340			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1341	}
   1342}
   1343
   1344void radeon_pm_resume(struct radeon_device *rdev)
   1345{
   1346	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1347		radeon_pm_resume_dpm(rdev);
   1348	else
   1349		radeon_pm_resume_old(rdev);
   1350}
   1351
   1352static int radeon_pm_init_old(struct radeon_device *rdev)
   1353{
   1354	int ret;
   1355
   1356	rdev->pm.profile = PM_PROFILE_DEFAULT;
   1357	rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
   1358	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1359	rdev->pm.dynpm_can_upclock = true;
   1360	rdev->pm.dynpm_can_downclock = true;
   1361	rdev->pm.default_sclk = rdev->clock.default_sclk;
   1362	rdev->pm.default_mclk = rdev->clock.default_mclk;
   1363	rdev->pm.current_sclk = rdev->clock.default_sclk;
   1364	rdev->pm.current_mclk = rdev->clock.default_mclk;
   1365	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
   1366
   1367	if (rdev->bios) {
   1368		if (rdev->is_atom_bios)
   1369			radeon_atombios_get_power_modes(rdev);
   1370		else
   1371			radeon_combios_get_power_modes(rdev);
   1372		radeon_pm_print_states(rdev);
   1373		radeon_pm_init_profile(rdev);
   1374		/* set up the default clocks if the MC ucode is loaded */
   1375		if ((rdev->family >= CHIP_BARTS) &&
   1376		    (rdev->family <= CHIP_CAYMAN) &&
   1377		    rdev->mc_fw) {
   1378			if (rdev->pm.default_vddc)
   1379				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1380							SET_VOLTAGE_TYPE_ASIC_VDDC);
   1381			if (rdev->pm.default_vddci)
   1382				radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1383							SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1384			if (rdev->pm.default_sclk)
   1385				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1386			if (rdev->pm.default_mclk)
   1387				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1388		}
   1389	}
   1390
   1391	/* set up the internal thermal sensor if applicable */
   1392	ret = radeon_hwmon_init(rdev);
   1393	if (ret)
   1394		return ret;
   1395
   1396	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
   1397
   1398	if (rdev->pm.num_power_states > 1) {
   1399		radeon_debugfs_pm_init(rdev);
   1400		DRM_INFO("radeon: power management initialized\n");
   1401	}
   1402
   1403	return 0;
   1404}
   1405
   1406static void radeon_dpm_print_power_states(struct radeon_device *rdev)
   1407{
   1408	int i;
   1409
   1410	for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
   1411		printk("== power state %d ==\n", i);
   1412		radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
   1413	}
   1414}
   1415
   1416static int radeon_pm_init_dpm(struct radeon_device *rdev)
   1417{
   1418	int ret;
   1419
   1420	/* default to balanced state */
   1421	rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
   1422	rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
   1423	rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
   1424	rdev->pm.default_sclk = rdev->clock.default_sclk;
   1425	rdev->pm.default_mclk = rdev->clock.default_mclk;
   1426	rdev->pm.current_sclk = rdev->clock.default_sclk;
   1427	rdev->pm.current_mclk = rdev->clock.default_mclk;
   1428	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
   1429
   1430	if (rdev->bios && rdev->is_atom_bios)
   1431		radeon_atombios_get_power_modes(rdev);
   1432	else
   1433		return -EINVAL;
   1434
   1435	/* set up the internal thermal sensor if applicable */
   1436	ret = radeon_hwmon_init(rdev);
   1437	if (ret)
   1438		return ret;
   1439
   1440	INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
   1441	mutex_lock(&rdev->pm.mutex);
   1442	radeon_dpm_init(rdev);
   1443	rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
   1444	if (radeon_dpm == 1)
   1445		radeon_dpm_print_power_states(rdev);
   1446	radeon_dpm_setup_asic(rdev);
   1447	ret = radeon_dpm_enable(rdev);
   1448	mutex_unlock(&rdev->pm.mutex);
   1449	if (ret)
   1450		goto dpm_failed;
   1451	rdev->pm.dpm_enabled = true;
   1452
   1453	radeon_debugfs_pm_init(rdev);
   1454
   1455	DRM_INFO("radeon: dpm initialized\n");
   1456
   1457	return 0;
   1458
   1459dpm_failed:
   1460	rdev->pm.dpm_enabled = false;
   1461	if ((rdev->family >= CHIP_BARTS) &&
   1462	    (rdev->family <= CHIP_CAYMAN) &&
   1463	    rdev->mc_fw) {
   1464		if (rdev->pm.default_vddc)
   1465			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
   1466						SET_VOLTAGE_TYPE_ASIC_VDDC);
   1467		if (rdev->pm.default_vddci)
   1468			radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
   1469						SET_VOLTAGE_TYPE_ASIC_VDDCI);
   1470		if (rdev->pm.default_sclk)
   1471			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
   1472		if (rdev->pm.default_mclk)
   1473			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
   1474	}
   1475	DRM_ERROR("radeon: dpm initialization failed\n");
   1476	return ret;
   1477}
   1478
   1479struct radeon_dpm_quirk {
   1480	u32 chip_vendor;
   1481	u32 chip_device;
   1482	u32 subsys_vendor;
   1483	u32 subsys_device;
   1484};
   1485
   1486/* cards with dpm stability problems */
   1487static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
   1488	/* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
   1489	{ PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
   1490	/* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
   1491	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
   1492	{ 0, 0, 0, 0 },
   1493};
   1494
   1495int radeon_pm_init(struct radeon_device *rdev)
   1496{
   1497	struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
   1498	bool disable_dpm = false;
   1499
   1500	/* Apply dpm quirks */
   1501	while (p && p->chip_device != 0) {
   1502		if (rdev->pdev->vendor == p->chip_vendor &&
   1503		    rdev->pdev->device == p->chip_device &&
   1504		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
   1505		    rdev->pdev->subsystem_device == p->subsys_device) {
   1506			disable_dpm = true;
   1507			break;
   1508		}
   1509		++p;
   1510	}
   1511
   1512	/* enable dpm on rv6xx+ */
   1513	switch (rdev->family) {
   1514	case CHIP_RV610:
   1515	case CHIP_RV630:
   1516	case CHIP_RV620:
   1517	case CHIP_RV635:
   1518	case CHIP_RV670:
   1519	case CHIP_RS780:
   1520	case CHIP_RS880:
   1521	case CHIP_RV770:
   1522		/* DPM requires the RLC, RV770+ dGPU requires SMC */
   1523		if (!rdev->rlc_fw)
   1524			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1525		else if ((rdev->family >= CHIP_RV770) &&
   1526			 (!(rdev->flags & RADEON_IS_IGP)) &&
   1527			 (!rdev->smc_fw))
   1528			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1529		else if (radeon_dpm == 1)
   1530			rdev->pm.pm_method = PM_METHOD_DPM;
   1531		else
   1532			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1533		break;
   1534	case CHIP_RV730:
   1535	case CHIP_RV710:
   1536	case CHIP_RV740:
   1537	case CHIP_CEDAR:
   1538	case CHIP_REDWOOD:
   1539	case CHIP_JUNIPER:
   1540	case CHIP_CYPRESS:
   1541	case CHIP_HEMLOCK:
   1542	case CHIP_PALM:
   1543	case CHIP_SUMO:
   1544	case CHIP_SUMO2:
   1545	case CHIP_BARTS:
   1546	case CHIP_TURKS:
   1547	case CHIP_CAICOS:
   1548	case CHIP_CAYMAN:
   1549	case CHIP_ARUBA:
   1550	case CHIP_TAHITI:
   1551	case CHIP_PITCAIRN:
   1552	case CHIP_VERDE:
   1553	case CHIP_OLAND:
   1554	case CHIP_HAINAN:
   1555	case CHIP_BONAIRE:
   1556	case CHIP_KABINI:
   1557	case CHIP_KAVERI:
   1558	case CHIP_HAWAII:
   1559	case CHIP_MULLINS:
   1560		/* DPM requires the RLC, RV770+ dGPU requires SMC */
   1561		if (!rdev->rlc_fw)
   1562			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1563		else if ((rdev->family >= CHIP_RV770) &&
   1564			 (!(rdev->flags & RADEON_IS_IGP)) &&
   1565			 (!rdev->smc_fw))
   1566			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1567		else if (disable_dpm && (radeon_dpm == -1))
   1568			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1569		else if (radeon_dpm == 0)
   1570			rdev->pm.pm_method = PM_METHOD_PROFILE;
   1571		else
   1572			rdev->pm.pm_method = PM_METHOD_DPM;
   1573		break;
   1574	default:
   1575		/* default to profile method */
   1576		rdev->pm.pm_method = PM_METHOD_PROFILE;
   1577		break;
   1578	}
   1579
   1580	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1581		return radeon_pm_init_dpm(rdev);
   1582	else
   1583		return radeon_pm_init_old(rdev);
   1584}
   1585
   1586int radeon_pm_late_init(struct radeon_device *rdev)
   1587{
   1588	int ret = 0;
   1589
   1590	if (rdev->pm.pm_method == PM_METHOD_DPM) {
   1591		if (rdev->pm.dpm_enabled) {
   1592			if (!rdev->pm.sysfs_initialized) {
   1593				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
   1594				if (ret)
   1595					DRM_ERROR("failed to create device file for dpm state\n");
   1596				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
   1597				if (ret)
   1598					DRM_ERROR("failed to create device file for dpm state\n");
   1599				/* XXX: these are noops for dpm but are here for backwards compat */
   1600				ret = device_create_file(rdev->dev, &dev_attr_power_profile);
   1601				if (ret)
   1602					DRM_ERROR("failed to create device file for power profile\n");
   1603				ret = device_create_file(rdev->dev, &dev_attr_power_method);
   1604				if (ret)
   1605					DRM_ERROR("failed to create device file for power method\n");
   1606				rdev->pm.sysfs_initialized = true;
   1607			}
   1608
   1609			mutex_lock(&rdev->pm.mutex);
   1610			ret = radeon_dpm_late_enable(rdev);
   1611			mutex_unlock(&rdev->pm.mutex);
   1612			if (ret) {
   1613				rdev->pm.dpm_enabled = false;
   1614				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
   1615			} else {
   1616				/* set the dpm state for PX since there won't be
   1617				 * a modeset to call this.
   1618				 */
   1619				radeon_pm_compute_clocks(rdev);
   1620			}
   1621		}
   1622	} else {
   1623		if ((rdev->pm.num_power_states > 1) &&
   1624		    (!rdev->pm.sysfs_initialized)) {
   1625			/* where's the best place to put these? */
   1626			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
   1627			if (ret)
   1628				DRM_ERROR("failed to create device file for power profile\n");
   1629			ret = device_create_file(rdev->dev, &dev_attr_power_method);
   1630			if (ret)
   1631				DRM_ERROR("failed to create device file for power method\n");
   1632			else
   1633				rdev->pm.sysfs_initialized = true;
   1634		}
   1635	}
   1636	return ret;
   1637}
   1638
   1639static void radeon_pm_fini_old(struct radeon_device *rdev)
   1640{
   1641	if (rdev->pm.num_power_states > 1) {
   1642		mutex_lock(&rdev->pm.mutex);
   1643		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
   1644			rdev->pm.profile = PM_PROFILE_DEFAULT;
   1645			radeon_pm_update_profile(rdev);
   1646			radeon_pm_set_clocks(rdev);
   1647		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1648			/* reset default clocks */
   1649			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
   1650			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
   1651			radeon_pm_set_clocks(rdev);
   1652		}
   1653		mutex_unlock(&rdev->pm.mutex);
   1654
   1655		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
   1656
   1657		device_remove_file(rdev->dev, &dev_attr_power_profile);
   1658		device_remove_file(rdev->dev, &dev_attr_power_method);
   1659	}
   1660
   1661	radeon_hwmon_fini(rdev);
   1662	kfree(rdev->pm.power_state);
   1663}
   1664
   1665static void radeon_pm_fini_dpm(struct radeon_device *rdev)
   1666{
   1667	if (rdev->pm.num_power_states > 1) {
   1668		mutex_lock(&rdev->pm.mutex);
   1669		radeon_dpm_disable(rdev);
   1670		mutex_unlock(&rdev->pm.mutex);
   1671
   1672		device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
   1673		device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
   1674		/* XXX backwards compat */
   1675		device_remove_file(rdev->dev, &dev_attr_power_profile);
   1676		device_remove_file(rdev->dev, &dev_attr_power_method);
   1677	}
   1678	radeon_dpm_fini(rdev);
   1679
   1680	radeon_hwmon_fini(rdev);
   1681	kfree(rdev->pm.power_state);
   1682}
   1683
   1684void radeon_pm_fini(struct radeon_device *rdev)
   1685{
   1686	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1687		radeon_pm_fini_dpm(rdev);
   1688	else
   1689		radeon_pm_fini_old(rdev);
   1690}
   1691
   1692static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
   1693{
   1694	struct drm_device *ddev = rdev->ddev;
   1695	struct drm_crtc *crtc;
   1696	struct radeon_crtc *radeon_crtc;
   1697
   1698	if (rdev->pm.num_power_states < 2)
   1699		return;
   1700
   1701	mutex_lock(&rdev->pm.mutex);
   1702
   1703	rdev->pm.active_crtcs = 0;
   1704	rdev->pm.active_crtc_count = 0;
   1705	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
   1706		list_for_each_entry(crtc,
   1707				    &ddev->mode_config.crtc_list, head) {
   1708			radeon_crtc = to_radeon_crtc(crtc);
   1709			if (radeon_crtc->enabled) {
   1710				rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
   1711				rdev->pm.active_crtc_count++;
   1712			}
   1713		}
   1714	}
   1715
   1716	if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
   1717		radeon_pm_update_profile(rdev);
   1718		radeon_pm_set_clocks(rdev);
   1719	} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
   1720		if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
   1721			if (rdev->pm.active_crtc_count > 1) {
   1722				if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
   1723					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
   1724
   1725					rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
   1726					rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
   1727					radeon_pm_get_dynpm_state(rdev);
   1728					radeon_pm_set_clocks(rdev);
   1729
   1730					DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
   1731				}
   1732			} else if (rdev->pm.active_crtc_count == 1) {
   1733				/* TODO: Increase clocks if needed for current mode */
   1734
   1735				if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
   1736					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1737					rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
   1738					radeon_pm_get_dynpm_state(rdev);
   1739					radeon_pm_set_clocks(rdev);
   1740
   1741					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1742							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1743				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
   1744					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
   1745					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1746							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1747					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
   1748				}
   1749			} else { /* count == 0 */
   1750				if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
   1751					cancel_delayed_work(&rdev->pm.dynpm_idle_work);
   1752
   1753					rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
   1754					rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
   1755					radeon_pm_get_dynpm_state(rdev);
   1756					radeon_pm_set_clocks(rdev);
   1757				}
   1758			}
   1759		}
   1760	}
   1761
   1762	mutex_unlock(&rdev->pm.mutex);
   1763}
   1764
   1765static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
   1766{
   1767	struct drm_device *ddev = rdev->ddev;
   1768	struct drm_crtc *crtc;
   1769	struct radeon_crtc *radeon_crtc;
   1770	struct radeon_connector *radeon_connector;
   1771
   1772	if (!rdev->pm.dpm_enabled)
   1773		return;
   1774
   1775	mutex_lock(&rdev->pm.mutex);
   1776
   1777	/* update active crtc counts */
   1778	rdev->pm.dpm.new_active_crtcs = 0;
   1779	rdev->pm.dpm.new_active_crtc_count = 0;
   1780	rdev->pm.dpm.high_pixelclock_count = 0;
   1781	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
   1782		list_for_each_entry(crtc,
   1783				    &ddev->mode_config.crtc_list, head) {
   1784			radeon_crtc = to_radeon_crtc(crtc);
   1785			if (crtc->enabled) {
   1786				rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
   1787				rdev->pm.dpm.new_active_crtc_count++;
   1788				if (!radeon_crtc->connector)
   1789					continue;
   1790
   1791				radeon_connector = to_radeon_connector(radeon_crtc->connector);
   1792				if (radeon_connector->pixelclock_for_modeset > 297000)
   1793					rdev->pm.dpm.high_pixelclock_count++;
   1794			}
   1795		}
   1796	}
   1797
   1798	/* update battery/ac status */
   1799	if (power_supply_is_system_supplied() > 0)
   1800		rdev->pm.dpm.ac_power = true;
   1801	else
   1802		rdev->pm.dpm.ac_power = false;
   1803
   1804	radeon_dpm_change_power_state_locked(rdev);
   1805
   1806	mutex_unlock(&rdev->pm.mutex);
   1807
   1808}
   1809
   1810void radeon_pm_compute_clocks(struct radeon_device *rdev)
   1811{
   1812	if (rdev->pm.pm_method == PM_METHOD_DPM)
   1813		radeon_pm_compute_clocks_dpm(rdev);
   1814	else
   1815		radeon_pm_compute_clocks_old(rdev);
   1816}
   1817
   1818static bool radeon_pm_in_vbl(struct radeon_device *rdev)
   1819{
   1820	int  crtc, vpos, hpos, vbl_status;
   1821	bool in_vbl = true;
   1822
   1823	/* Iterate over all active crtc's. All crtc's must be in vblank,
   1824	 * otherwise return in_vbl == false.
   1825	 */
   1826	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
   1827		if (rdev->pm.active_crtcs & (1 << crtc)) {
   1828			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
   1829								crtc,
   1830								USE_REAL_VBLANKSTART,
   1831								&vpos, &hpos, NULL, NULL,
   1832								&rdev->mode_info.crtcs[crtc]->base.hwmode);
   1833			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
   1834			    !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
   1835				in_vbl = false;
   1836		}
   1837	}
   1838
   1839	return in_vbl;
   1840}
   1841
   1842static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
   1843{
   1844	u32 stat_crtc = 0;
   1845	bool in_vbl = radeon_pm_in_vbl(rdev);
   1846
   1847	if (!in_vbl)
   1848		DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
   1849			 finish ? "exit" : "entry");
   1850	return in_vbl;
   1851}
   1852
   1853static void radeon_dynpm_idle_work_handler(struct work_struct *work)
   1854{
   1855	struct radeon_device *rdev;
   1856	int resched;
   1857	rdev = container_of(work, struct radeon_device,
   1858				pm.dynpm_idle_work.work);
   1859
   1860	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
   1861	mutex_lock(&rdev->pm.mutex);
   1862	if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
   1863		int not_processed = 0;
   1864		int i;
   1865
   1866		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
   1867			struct radeon_ring *ring = &rdev->ring[i];
   1868
   1869			if (ring->ready) {
   1870				not_processed += radeon_fence_count_emitted(rdev, i);
   1871				if (not_processed >= 3)
   1872					break;
   1873			}
   1874		}
   1875
   1876		if (not_processed >= 3) { /* should upclock */
   1877			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
   1878				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1879			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
   1880				   rdev->pm.dynpm_can_upclock) {
   1881				rdev->pm.dynpm_planned_action =
   1882					DYNPM_ACTION_UPCLOCK;
   1883				rdev->pm.dynpm_action_timeout = jiffies +
   1884				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
   1885			}
   1886		} else if (not_processed == 0) { /* should downclock */
   1887			if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
   1888				rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
   1889			} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
   1890				   rdev->pm.dynpm_can_downclock) {
   1891				rdev->pm.dynpm_planned_action =
   1892					DYNPM_ACTION_DOWNCLOCK;
   1893				rdev->pm.dynpm_action_timeout = jiffies +
   1894				msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
   1895			}
   1896		}
   1897
   1898		/* Note, radeon_pm_set_clocks is called with static_switch set
   1899		 * to false since we want to wait for vbl to avoid flicker.
   1900		 */
   1901		if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
   1902		    jiffies > rdev->pm.dynpm_action_timeout) {
   1903			radeon_pm_get_dynpm_state(rdev);
   1904			radeon_pm_set_clocks(rdev);
   1905		}
   1906
   1907		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
   1908				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
   1909	}
   1910	mutex_unlock(&rdev->pm.mutex);
   1911	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
   1912}
   1913
   1914/*
   1915 * Debugfs info
   1916 */
   1917#if defined(CONFIG_DEBUG_FS)
   1918
   1919static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused)
   1920{
   1921	struct radeon_device *rdev = (struct radeon_device *)m->private;
   1922	struct drm_device *ddev = rdev->ddev;
   1923
   1924	if  ((rdev->flags & RADEON_IS_PX) &&
   1925	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
   1926		seq_printf(m, "PX asic powered off\n");
   1927	} else if (rdev->pm.dpm_enabled) {
   1928		mutex_lock(&rdev->pm.mutex);
   1929		if (rdev->asic->dpm.debugfs_print_current_performance_level)
   1930			radeon_dpm_debugfs_print_current_performance_level(rdev, m);
   1931		else
   1932			seq_printf(m, "Debugfs support not implemented for this asic\n");
   1933		mutex_unlock(&rdev->pm.mutex);
   1934	} else {
   1935		seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
   1936		/* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
   1937		if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
   1938			seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
   1939		else
   1940			seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
   1941		seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
   1942		if (rdev->asic->pm.get_memory_clock)
   1943			seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
   1944		if (rdev->pm.current_vddc)
   1945			seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
   1946		if (rdev->asic->pm.get_pcie_lanes)
   1947			seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
   1948	}
   1949
   1950	return 0;
   1951}
   1952
   1953DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_pm_info);
   1954#endif
   1955
   1956static void radeon_debugfs_pm_init(struct radeon_device *rdev)
   1957{
   1958#if defined(CONFIG_DEBUG_FS)
   1959	struct dentry *root = rdev->ddev->primary->debugfs_root;
   1960
   1961	debugfs_create_file("radeon_pm_info", 0444, root, rdev,
   1962			    &radeon_debugfs_pm_info_fops);
   1963
   1964#endif
   1965}