cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_dm_pp_smu.c (24518B)


      1/*
      2 * Copyright 2018 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: AMD
     23 */
     24#include <linux/string.h>
     25#include <linux/acpi.h>
     26
     27#include <drm/drm_probe_helper.h>
     28#include <drm/amdgpu_drm.h>
     29#include "dm_services.h"
     30#include "amdgpu.h"
     31#include "amdgpu_dm.h"
     32#include "amdgpu_dm_irq.h"
     33#include "amdgpu_pm.h"
     34#include "dm_pp_smu.h"
     35
     36bool dm_pp_apply_display_requirements(
     37		const struct dc_context *ctx,
     38		const struct dm_pp_display_configuration *pp_display_cfg)
     39{
     40	struct amdgpu_device *adev = ctx->driver_context;
     41	int i;
     42
     43	if (adev->pm.dpm_enabled) {
     44
     45		memset(&adev->pm.pm_display_cfg, 0,
     46				sizeof(adev->pm.pm_display_cfg));
     47
     48		adev->pm.pm_display_cfg.cpu_cc6_disable =
     49			pp_display_cfg->cpu_cc6_disable;
     50
     51		adev->pm.pm_display_cfg.cpu_pstate_disable =
     52			pp_display_cfg->cpu_pstate_disable;
     53
     54		adev->pm.pm_display_cfg.cpu_pstate_separation_time =
     55			pp_display_cfg->cpu_pstate_separation_time;
     56
     57		adev->pm.pm_display_cfg.nb_pstate_switch_disable =
     58			pp_display_cfg->nb_pstate_switch_disable;
     59
     60		adev->pm.pm_display_cfg.num_display =
     61				pp_display_cfg->display_count;
     62		adev->pm.pm_display_cfg.num_path_including_non_display =
     63				pp_display_cfg->display_count;
     64
     65		adev->pm.pm_display_cfg.min_core_set_clock =
     66				pp_display_cfg->min_engine_clock_khz/10;
     67		adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
     68				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
     69		adev->pm.pm_display_cfg.min_mem_set_clock =
     70				pp_display_cfg->min_memory_clock_khz/10;
     71
     72		adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
     73				pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
     74		adev->pm.pm_display_cfg.min_dcef_set_clk =
     75				pp_display_cfg->min_dcfclock_khz/10;
     76
     77		adev->pm.pm_display_cfg.multi_monitor_in_sync =
     78				pp_display_cfg->all_displays_in_sync;
     79		adev->pm.pm_display_cfg.min_vblank_time =
     80				pp_display_cfg->avail_mclk_switch_time_us;
     81
     82		adev->pm.pm_display_cfg.display_clk =
     83				pp_display_cfg->disp_clk_khz/10;
     84
     85		adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
     86				pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
     87
     88		adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
     89		adev->pm.pm_display_cfg.line_time_in_us =
     90				pp_display_cfg->line_time_in_us;
     91
     92		adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
     93		adev->pm.pm_display_cfg.crossfire_display_index = -1;
     94		adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
     95
     96		for (i = 0; i < pp_display_cfg->display_count; i++) {
     97			const struct dm_pp_single_disp_config *dc_cfg =
     98						&pp_display_cfg->disp_configs[i];
     99			adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
    100		}
    101
    102		amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
    103
    104		amdgpu_dpm_compute_clocks(adev);
    105	}
    106
    107	return true;
    108}
    109
    110static void get_default_clock_levels(
    111		enum dm_pp_clock_type clk_type,
    112		struct dm_pp_clock_levels *clks)
    113{
    114	uint32_t disp_clks_in_khz[6] = {
    115			300000, 400000, 496560, 626090, 685720, 757900 };
    116	uint32_t sclks_in_khz[6] = {
    117			300000, 360000, 423530, 514290, 626090, 720000 };
    118	uint32_t mclks_in_khz[2] = { 333000, 800000 };
    119
    120	switch (clk_type) {
    121	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
    122		clks->num_levels = 6;
    123		memmove(clks->clocks_in_khz, disp_clks_in_khz,
    124				sizeof(disp_clks_in_khz));
    125		break;
    126	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
    127		clks->num_levels = 6;
    128		memmove(clks->clocks_in_khz, sclks_in_khz,
    129				sizeof(sclks_in_khz));
    130		break;
    131	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
    132		clks->num_levels = 2;
    133		memmove(clks->clocks_in_khz, mclks_in_khz,
    134				sizeof(mclks_in_khz));
    135		break;
    136	default:
    137		clks->num_levels = 0;
    138		break;
    139	}
    140}
    141
    142static enum amd_pp_clock_type dc_to_pp_clock_type(
    143		enum dm_pp_clock_type dm_pp_clk_type)
    144{
    145	enum amd_pp_clock_type amd_pp_clk_type = 0;
    146
    147	switch (dm_pp_clk_type) {
    148	case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
    149		amd_pp_clk_type = amd_pp_disp_clock;
    150		break;
    151	case DM_PP_CLOCK_TYPE_ENGINE_CLK:
    152		amd_pp_clk_type = amd_pp_sys_clock;
    153		break;
    154	case DM_PP_CLOCK_TYPE_MEMORY_CLK:
    155		amd_pp_clk_type = amd_pp_mem_clock;
    156		break;
    157	case DM_PP_CLOCK_TYPE_DCEFCLK:
    158		amd_pp_clk_type  = amd_pp_dcef_clock;
    159		break;
    160	case DM_PP_CLOCK_TYPE_DCFCLK:
    161		amd_pp_clk_type = amd_pp_dcf_clock;
    162		break;
    163	case DM_PP_CLOCK_TYPE_PIXELCLK:
    164		amd_pp_clk_type = amd_pp_pixel_clock;
    165		break;
    166	case DM_PP_CLOCK_TYPE_FCLK:
    167		amd_pp_clk_type = amd_pp_f_clock;
    168		break;
    169	case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
    170		amd_pp_clk_type = amd_pp_phy_clock;
    171		break;
    172	case DM_PP_CLOCK_TYPE_DPPCLK:
    173		amd_pp_clk_type = amd_pp_dpp_clock;
    174		break;
    175	default:
    176		DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
    177				dm_pp_clk_type);
    178		break;
    179	}
    180
    181	return amd_pp_clk_type;
    182}
    183
    184static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
    185			enum PP_DAL_POWERLEVEL max_clocks_state)
    186{
    187	switch (max_clocks_state) {
    188	case PP_DAL_POWERLEVEL_0:
    189		return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
    190	case PP_DAL_POWERLEVEL_1:
    191		return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
    192	case PP_DAL_POWERLEVEL_2:
    193		return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
    194	case PP_DAL_POWERLEVEL_3:
    195		return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
    196	case PP_DAL_POWERLEVEL_4:
    197		return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
    198	case PP_DAL_POWERLEVEL_5:
    199		return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
    200	case PP_DAL_POWERLEVEL_6:
    201		return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
    202	case PP_DAL_POWERLEVEL_7:
    203		return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
    204	default:
    205		DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
    206				max_clocks_state);
    207		return DM_PP_CLOCKS_STATE_INVALID;
    208	}
    209}
    210
    211static void pp_to_dc_clock_levels(
    212		const struct amd_pp_clocks *pp_clks,
    213		struct dm_pp_clock_levels *dc_clks,
    214		enum dm_pp_clock_type dc_clk_type)
    215{
    216	uint32_t i;
    217
    218	if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
    219		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    220				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    221				pp_clks->count,
    222				DM_PP_MAX_CLOCK_LEVELS);
    223
    224		dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    225	} else
    226		dc_clks->num_levels = pp_clks->count;
    227
    228	DRM_INFO("DM_PPLIB: values for %s clock\n",
    229			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    230
    231	for (i = 0; i < dc_clks->num_levels; i++) {
    232		DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
    233		dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
    234	}
    235}
    236
    237static void pp_to_dc_clock_levels_with_latency(
    238		const struct pp_clock_levels_with_latency *pp_clks,
    239		struct dm_pp_clock_levels_with_latency *clk_level_info,
    240		enum dm_pp_clock_type dc_clk_type)
    241{
    242	uint32_t i;
    243
    244	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
    245		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    246				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    247				pp_clks->num_levels,
    248				DM_PP_MAX_CLOCK_LEVELS);
    249
    250		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    251	} else
    252		clk_level_info->num_levels = pp_clks->num_levels;
    253
    254	DRM_DEBUG("DM_PPLIB: values for %s clock\n",
    255			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    256
    257	for (i = 0; i < clk_level_info->num_levels; i++) {
    258		DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
    259		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
    260		clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
    261	}
    262}
    263
    264static void pp_to_dc_clock_levels_with_voltage(
    265		const struct pp_clock_levels_with_voltage *pp_clks,
    266		struct dm_pp_clock_levels_with_voltage *clk_level_info,
    267		enum dm_pp_clock_type dc_clk_type)
    268{
    269	uint32_t i;
    270
    271	if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
    272		DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
    273				DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
    274				pp_clks->num_levels,
    275				DM_PP_MAX_CLOCK_LEVELS);
    276
    277		clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
    278	} else
    279		clk_level_info->num_levels = pp_clks->num_levels;
    280
    281	DRM_INFO("DM_PPLIB: values for %s clock\n",
    282			DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
    283
    284	for (i = 0; i < clk_level_info->num_levels; i++) {
    285		DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
    286			 pp_clks->data[i].voltage_in_mv);
    287		clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
    288		clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
    289	}
    290}
    291
    292bool dm_pp_get_clock_levels_by_type(
    293		const struct dc_context *ctx,
    294		enum dm_pp_clock_type clk_type,
    295		struct dm_pp_clock_levels *dc_clks)
    296{
    297	struct amdgpu_device *adev = ctx->driver_context;
    298	struct amd_pp_clocks pp_clks = { 0 };
    299	struct amd_pp_simple_clock_info validation_clks = { 0 };
    300	uint32_t i;
    301
    302	if (amdgpu_dpm_get_clock_by_type(adev,
    303		dc_to_pp_clock_type(clk_type), &pp_clks)) {
    304		/* Error in pplib. Provide default values. */
    305		get_default_clock_levels(clk_type, dc_clks);
    306		return true;
    307	}
    308
    309	pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
    310
    311	if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) {
    312		/* Error in pplib. Provide default values. */
    313		DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
    314		validation_clks.engine_max_clock = 72000;
    315		validation_clks.memory_max_clock = 80000;
    316		validation_clks.level = 0;
    317	}
    318
    319	DRM_INFO("DM_PPLIB: Validation clocks:\n");
    320	DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
    321			validation_clks.engine_max_clock);
    322	DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
    323			validation_clks.memory_max_clock);
    324	DRM_INFO("DM_PPLIB:    level           : %d\n",
    325			validation_clks.level);
    326
    327	/* Translate 10 kHz to kHz. */
    328	validation_clks.engine_max_clock *= 10;
    329	validation_clks.memory_max_clock *= 10;
    330
    331	/* Determine the highest non-boosted level from the Validation Clocks */
    332	if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
    333		for (i = 0; i < dc_clks->num_levels; i++) {
    334			if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
    335				/* This clock is higher the validation clock.
    336				 * Than means the previous one is the highest
    337				 * non-boosted one. */
    338				DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
    339						dc_clks->num_levels, i);
    340				dc_clks->num_levels = i > 0 ? i : 1;
    341				break;
    342			}
    343		}
    344	} else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
    345		for (i = 0; i < dc_clks->num_levels; i++) {
    346			if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
    347				DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
    348						dc_clks->num_levels, i);
    349				dc_clks->num_levels = i > 0 ? i : 1;
    350				break;
    351			}
    352		}
    353	}
    354
    355	return true;
    356}
    357
    358bool dm_pp_get_clock_levels_by_type_with_latency(
    359	const struct dc_context *ctx,
    360	enum dm_pp_clock_type clk_type,
    361	struct dm_pp_clock_levels_with_latency *clk_level_info)
    362{
    363	struct amdgpu_device *adev = ctx->driver_context;
    364	struct pp_clock_levels_with_latency pp_clks = { 0 };
    365	int ret;
    366
    367	ret = amdgpu_dpm_get_clock_by_type_with_latency(adev,
    368					dc_to_pp_clock_type(clk_type),
    369					&pp_clks);
    370	if (ret)
    371		return false;
    372
    373	pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
    374
    375	return true;
    376}
    377
    378bool dm_pp_get_clock_levels_by_type_with_voltage(
    379	const struct dc_context *ctx,
    380	enum dm_pp_clock_type clk_type,
    381	struct dm_pp_clock_levels_with_voltage *clk_level_info)
    382{
    383	struct amdgpu_device *adev = ctx->driver_context;
    384	struct pp_clock_levels_with_voltage pp_clk_info = {0};
    385	int ret;
    386
    387	ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev,
    388					dc_to_pp_clock_type(clk_type),
    389					&pp_clk_info);
    390	if (ret)
    391		return false;
    392
    393	pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
    394
    395	return true;
    396}
    397
    398bool dm_pp_notify_wm_clock_changes(
    399	const struct dc_context *ctx,
    400	struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
    401{
    402	struct amdgpu_device *adev = ctx->driver_context;
    403
    404	/*
    405	 * Limit this watermark setting for Polaris for now
    406	 * TODO: expand this to other ASICs
    407	 */
    408	if ((adev->asic_type >= CHIP_POLARIS10) &&
    409	     (adev->asic_type <= CHIP_VEGAM) &&
    410	     !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
    411						(void *)wm_with_clock_ranges))
    412			return true;
    413
    414	return false;
    415}
    416
    417bool dm_pp_apply_power_level_change_request(
    418	const struct dc_context *ctx,
    419	struct dm_pp_power_level_change_request *level_change_req)
    420{
    421	/* TODO: to be implemented */
    422	return false;
    423}
    424
    425bool dm_pp_apply_clock_for_voltage_request(
    426	const struct dc_context *ctx,
    427	struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
    428{
    429	struct amdgpu_device *adev = ctx->driver_context;
    430	struct pp_display_clock_request pp_clock_request = {0};
    431	int ret = 0;
    432
    433	pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
    434	pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
    435
    436	if (!pp_clock_request.clock_type)
    437		return false;
    438
    439	ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request);
    440	if (ret && (ret != -EOPNOTSUPP))
    441		return false;
    442
    443	return true;
    444}
    445
    446bool dm_pp_get_static_clocks(
    447	const struct dc_context *ctx,
    448	struct dm_pp_static_clock_info *static_clk_info)
    449{
    450	struct amdgpu_device *adev = ctx->driver_context;
    451	struct amd_pp_clock_info pp_clk_info = {0};
    452
    453	if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info))
    454		return false;
    455
    456	static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
    457	static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
    458	static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
    459
    460	return true;
    461}
    462
    463static void pp_rv_set_wm_ranges(struct pp_smu *pp,
    464		struct pp_smu_wm_range_sets *ranges)
    465{
    466	const struct dc_context *ctx = pp->dm;
    467	struct amdgpu_device *adev = ctx->driver_context;
    468	struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
    469	struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
    470	struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
    471	int32_t i;
    472
    473	wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
    474	wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
    475
    476	for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
    477		if (ranges->reader_wm_sets[i].wm_inst > 3)
    478			wm_dce_clocks[i].wm_set_id = WM_SET_A;
    479		else
    480			wm_dce_clocks[i].wm_set_id =
    481					ranges->reader_wm_sets[i].wm_inst;
    482		wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
    483				ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
    484		wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
    485				ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
    486		wm_dce_clocks[i].wm_max_mem_clk_in_khz =
    487				ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
    488		wm_dce_clocks[i].wm_min_mem_clk_in_khz =
    489				ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
    490	}
    491
    492	for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
    493		if (ranges->writer_wm_sets[i].wm_inst > 3)
    494			wm_soc_clocks[i].wm_set_id = WM_SET_A;
    495		else
    496			wm_soc_clocks[i].wm_set_id =
    497					ranges->writer_wm_sets[i].wm_inst;
    498		wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
    499				ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
    500		wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
    501				ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
    502		wm_soc_clocks[i].wm_max_mem_clk_in_khz =
    503				ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
    504		wm_soc_clocks[i].wm_min_mem_clk_in_khz =
    505				ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
    506	}
    507
    508	amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
    509						    &wm_with_clock_ranges);
    510}
    511
    512static void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
    513{
    514	const struct dc_context *ctx = pp->dm;
    515	struct amdgpu_device *adev = ctx->driver_context;
    516
    517	amdgpu_dpm_notify_smu_enable_pwe(adev);
    518}
    519
    520static void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
    521{
    522	const struct dc_context *ctx = pp->dm;
    523	struct amdgpu_device *adev = ctx->driver_context;
    524
    525	amdgpu_dpm_set_active_display_count(adev, count);
    526}
    527
    528static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
    529{
    530	const struct dc_context *ctx = pp->dm;
    531	struct amdgpu_device *adev = ctx->driver_context;
    532
    533	amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock);
    534}
    535
    536static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
    537{
    538	const struct dc_context *ctx = pp->dm;
    539	struct amdgpu_device *adev = ctx->driver_context;
    540
    541	amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock);
    542}
    543
    544static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
    545{
    546	const struct dc_context *ctx = pp->dm;
    547	struct amdgpu_device *adev = ctx->driver_context;
    548
    549	amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz);
    550}
    551
    552static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
    553		struct pp_smu_wm_range_sets *ranges)
    554{
    555	const struct dc_context *ctx = pp->dm;
    556	struct amdgpu_device *adev = ctx->driver_context;
    557
    558	amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
    559
    560	return PP_SMU_RESULT_OK;
    561}
    562
    563static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
    564{
    565	const struct dc_context *ctx = pp->dm;
    566	struct amdgpu_device *adev = ctx->driver_context;
    567	int ret = 0;
    568
    569	ret = amdgpu_dpm_set_active_display_count(adev, count);
    570	if (ret == -EOPNOTSUPP)
    571		return PP_SMU_RESULT_UNSUPPORTED;
    572	else if (ret)
    573		/* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
    574		return PP_SMU_RESULT_FAIL;
    575
    576	return PP_SMU_RESULT_OK;
    577}
    578
    579static enum pp_smu_status
    580pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
    581{
    582	const struct dc_context *ctx = pp->dm;
    583	struct amdgpu_device *adev = ctx->driver_context;
    584	int ret = 0;
    585
    586	/* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
    587	ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz);
    588	if (ret == -EOPNOTSUPP)
    589		return PP_SMU_RESULT_UNSUPPORTED;
    590	else if (ret)
    591		return PP_SMU_RESULT_FAIL;
    592
    593	return PP_SMU_RESULT_OK;
    594}
    595
    596static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
    597		struct pp_smu *pp, int mhz)
    598{
    599	const struct dc_context *ctx = pp->dm;
    600	struct amdgpu_device *adev = ctx->driver_context;
    601	struct pp_display_clock_request clock_req;
    602	int ret = 0;
    603
    604	clock_req.clock_type = amd_pp_dcef_clock;
    605	clock_req.clock_freq_in_khz = mhz * 1000;
    606
    607	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    608	 * 1: fail
    609	 */
    610	ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
    611	if (ret == -EOPNOTSUPP)
    612		return PP_SMU_RESULT_UNSUPPORTED;
    613	else if (ret)
    614		return PP_SMU_RESULT_FAIL;
    615
    616	return PP_SMU_RESULT_OK;
    617}
    618
    619static enum pp_smu_status
    620pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
    621{
    622	const struct dc_context *ctx = pp->dm;
    623	struct amdgpu_device *adev = ctx->driver_context;
    624	struct pp_display_clock_request clock_req;
    625	int ret = 0;
    626
    627	clock_req.clock_type = amd_pp_mem_clock;
    628	clock_req.clock_freq_in_khz = mhz * 1000;
    629
    630	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    631	 * 1: fail
    632	 */
    633	ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
    634	if (ret == -EOPNOTSUPP)
    635		return PP_SMU_RESULT_UNSUPPORTED;
    636	else if (ret)
    637		return PP_SMU_RESULT_FAIL;
    638
    639	return PP_SMU_RESULT_OK;
    640}
    641
    642static enum pp_smu_status pp_nv_set_pstate_handshake_support(
    643	struct pp_smu *pp, bool pstate_handshake_supported)
    644{
    645	const struct dc_context *ctx = pp->dm;
    646	struct amdgpu_device *adev = ctx->driver_context;
    647
    648	if (amdgpu_dpm_display_disable_memory_clock_switch(adev,
    649							  !pstate_handshake_supported))
    650		return PP_SMU_RESULT_FAIL;
    651
    652	return PP_SMU_RESULT_OK;
    653}
    654
    655static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
    656		enum pp_smu_nv_clock_id clock_id, int mhz)
    657{
    658	const struct dc_context *ctx = pp->dm;
    659	struct amdgpu_device *adev = ctx->driver_context;
    660	struct pp_display_clock_request clock_req;
    661	int ret = 0;
    662
    663	switch (clock_id) {
    664	case PP_SMU_NV_DISPCLK:
    665		clock_req.clock_type = amd_pp_disp_clock;
    666		break;
    667	case PP_SMU_NV_PHYCLK:
    668		clock_req.clock_type = amd_pp_phy_clock;
    669		break;
    670	case PP_SMU_NV_PIXELCLK:
    671		clock_req.clock_type = amd_pp_pixel_clock;
    672		break;
    673	default:
    674		break;
    675	}
    676	clock_req.clock_freq_in_khz = mhz * 1000;
    677
    678	/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
    679	 * 1: fail
    680	 */
    681	ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req);
    682	if (ret == -EOPNOTSUPP)
    683		return PP_SMU_RESULT_UNSUPPORTED;
    684	else if (ret)
    685		return PP_SMU_RESULT_FAIL;
    686
    687	return PP_SMU_RESULT_OK;
    688}
    689
    690static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
    691		struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
    692{
    693	const struct dc_context *ctx = pp->dm;
    694	struct amdgpu_device *adev = ctx->driver_context;
    695	int ret = 0;
    696
    697	ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev,
    698							  max_clocks);
    699	if (ret == -EOPNOTSUPP)
    700		return PP_SMU_RESULT_UNSUPPORTED;
    701	else if (ret)
    702		return PP_SMU_RESULT_FAIL;
    703
    704	return PP_SMU_RESULT_OK;
    705}
    706
    707static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
    708		unsigned int *clock_values_in_khz, unsigned int *num_states)
    709{
    710	const struct dc_context *ctx = pp->dm;
    711	struct amdgpu_device *adev = ctx->driver_context;
    712	int ret = 0;
    713
    714	ret = amdgpu_dpm_get_uclk_dpm_states(adev,
    715					     clock_values_in_khz,
    716					    num_states);
    717	if (ret == -EOPNOTSUPP)
    718		return PP_SMU_RESULT_UNSUPPORTED;
    719	else if (ret)
    720		return PP_SMU_RESULT_FAIL;
    721
    722	return PP_SMU_RESULT_OK;
    723}
    724
    725static enum pp_smu_status pp_rn_get_dpm_clock_table(
    726		struct pp_smu *pp, struct dpm_clocks *clock_table)
    727{
    728	const struct dc_context *ctx = pp->dm;
    729	struct amdgpu_device *adev = ctx->driver_context;
    730	int ret = 0;
    731
    732	ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table);
    733	if (ret == -EOPNOTSUPP)
    734		return PP_SMU_RESULT_UNSUPPORTED;
    735	else if (ret)
    736		return PP_SMU_RESULT_FAIL;
    737
    738	return PP_SMU_RESULT_OK;
    739}
    740
    741static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
    742		struct pp_smu_wm_range_sets *ranges)
    743{
    744	const struct dc_context *ctx = pp->dm;
    745	struct amdgpu_device *adev = ctx->driver_context;
    746
    747	amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges);
    748
    749	return PP_SMU_RESULT_OK;
    750}
    751
    752void dm_pp_get_funcs(
    753		struct dc_context *ctx,
    754		struct pp_smu_funcs *funcs)
    755{
    756	switch (ctx->dce_version) {
    757	case DCN_VERSION_1_0:
    758	case DCN_VERSION_1_01:
    759		funcs->ctx.ver = PP_SMU_VER_RV;
    760		funcs->rv_funcs.pp_smu.dm = ctx;
    761		funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
    762		funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
    763		funcs->rv_funcs.set_display_count =
    764				pp_rv_set_active_display_count;
    765		funcs->rv_funcs.set_min_deep_sleep_dcfclk =
    766				pp_rv_set_min_deep_sleep_dcfclk;
    767		funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
    768				pp_rv_set_hard_min_dcefclk_by_freq;
    769		funcs->rv_funcs.set_hard_min_fclk_by_freq =
    770				pp_rv_set_hard_min_fclk_by_freq;
    771		break;
    772	case DCN_VERSION_2_0:
    773		funcs->ctx.ver = PP_SMU_VER_NV;
    774		funcs->nv_funcs.pp_smu.dm = ctx;
    775		funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
    776		funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
    777				pp_nv_set_hard_min_dcefclk_by_freq;
    778		funcs->nv_funcs.set_min_deep_sleep_dcfclk =
    779				pp_nv_set_min_deep_sleep_dcfclk;
    780		funcs->nv_funcs.set_voltage_by_freq =
    781				pp_nv_set_voltage_by_freq;
    782		funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
    783
    784		/* todo set_pme_wa_enable cause 4k@6ohz display not light up */
    785		funcs->nv_funcs.set_pme_wa_enable = NULL;
    786		/* todo debug waring message */
    787		funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
    788		/* todo  compare data with window driver*/
    789		funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
    790		/*todo  compare data with window driver */
    791		funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
    792		funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
    793		break;
    794
    795	case DCN_VERSION_2_1:
    796		funcs->ctx.ver = PP_SMU_VER_RN;
    797		funcs->rn_funcs.pp_smu.dm = ctx;
    798		funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
    799		funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
    800		break;
    801	default:
    802		DRM_ERROR("smu version is not supported !\n");
    803		break;
    804	}
    805}