dce_clk_mgr.c (31803B)
1/* 2 * Copyright 2012-16 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26#include <linux/slab.h> 27 28#include "dce_clk_mgr.h" 29 30#include "reg_helper.h" 31#include "dmcu.h" 32#include "core_types.h" 33#include "dal_asic_id.h" 34 35#define TO_DCE_CLK_MGR(clocks)\ 36 container_of(clocks, struct dce_clk_mgr, base) 37 38#define REG(reg) \ 39 (clk_mgr_dce->regs->reg) 40 41#undef FN 42#define FN(reg_name, field_name) \ 43 clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name 44 45#define CTX \ 46 clk_mgr_dce->base.ctx 47#define DC_LOGGER \ 48 clk_mgr->ctx->logger 49 50/* Max clock values for each state indexed by "enum clocks_state": */ 51static const struct state_dependent_clocks dce80_max_clks_by_state[] = { 52/* ClocksStateInvalid - should not be used */ 53{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 54/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ 55{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 56/* ClocksStateLow */ 57{ .display_clk_khz = 352000, .pixel_clk_khz = 330000}, 58/* ClocksStateNominal */ 59{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, 60/* ClocksStatePerformance */ 61{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; 62 63static const struct state_dependent_clocks dce110_max_clks_by_state[] = { 64/*ClocksStateInvalid - should not be used*/ 65{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 66/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 67{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, 68/*ClocksStateLow*/ 69{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, 70/*ClocksStateNominal*/ 71{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, 72/*ClocksStatePerformance*/ 73{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; 74 75static const struct state_dependent_clocks dce112_max_clks_by_state[] = { 76/*ClocksStateInvalid - should not be used*/ 77{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 78/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 79{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, 80/*ClocksStateLow*/ 81{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, 82/*ClocksStateNominal*/ 83{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, 84/*ClocksStatePerformance*/ 85{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; 86 87static const struct state_dependent_clocks dce120_max_clks_by_state[] = { 88/*ClocksStateInvalid - should not be used*/ 89{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 90/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ 91{ .display_clk_khz = 0, .pixel_clk_khz = 0 }, 92/*ClocksStateLow*/ 93{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, 94/*ClocksStateNominal*/ 95{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, 96/*ClocksStatePerformance*/ 97{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; 98 99int dentist_get_divider_from_did(int did) 100{ 101 if (did < DENTIST_BASE_DID_1) 102 did = DENTIST_BASE_DID_1; 103 if (did > DENTIST_MAX_DID) 104 did = DENTIST_MAX_DID; 105 106 if (did < DENTIST_BASE_DID_2) { 107 return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP 108 * (did - DENTIST_BASE_DID_1); 109 } else if (did < DENTIST_BASE_DID_3) { 110 return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP 111 * (did - DENTIST_BASE_DID_2); 112 } else if (did < DENTIST_BASE_DID_4) { 113 return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP 114 * (did - DENTIST_BASE_DID_3); 115 } else { 116 return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP 117 * (did - DENTIST_BASE_DID_4); 118 } 119} 120 121/* SW will adjust DP REF Clock average value for all purposes 122 * (DP DTO / DP Audio DTO and DP GTC) 123 if clock is spread for all cases: 124 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW 125 calculations for DS_INCR/DS_MODULO (this is planned to be default case) 126 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW 127 calculations (not planned to be used, but average clock should still 128 be valid) 129 -if SS enabled on DP Ref clock and HW de-spreading disabled 130 (should not be case with CIK) then SW should program all rates 131 generated according to average value (case as with previous ASICs) 132 */ 133static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) 134{ 135 if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { 136 struct fixed31_32 ss_percentage = dc_fixpt_div_int( 137 dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, 138 clk_mgr_dce->dprefclk_ss_divider), 200); 139 struct fixed31_32 adj_dp_ref_clk_khz; 140 141 ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); 142 adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); 143 dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); 144 } 145 return dp_ref_clk_khz; 146} 147 148static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) 149{ 150 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 151 int dprefclk_wdivider; 152 int dprefclk_src_sel; 153 int dp_ref_clk_khz = 600000; 154 int target_div; 155 156 /* ASSERT DP Reference Clock source is from DFS*/ 157 REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); 158 ASSERT(dprefclk_src_sel == 0); 159 160 /* Read the mmDENTIST_DISPCLK_CNTL to get the currently 161 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ 162 REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); 163 164 /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ 165 target_div = dentist_get_divider_from_did(dprefclk_wdivider); 166 167 /* Calculate the current DFS clock, in kHz.*/ 168 dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR 169 * clk_mgr_dce->dentist_vco_freq_khz) / target_div; 170 171 return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); 172} 173 174int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) 175{ 176 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 177 178 return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); 179} 180 181/* unit: in_khz before mode set, get pixel clock from context. ASIC register 182 * may not be programmed yet 183 */ 184static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) 185{ 186 uint32_t max_pix_clk = 0; 187 int i; 188 189 for (i = 0; i < MAX_PIPES; i++) { 190 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 191 192 if (pipe_ctx->stream == NULL) 193 continue; 194 195 /* do not check under lay */ 196 if (pipe_ctx->top_pipe) 197 continue; 198 199 if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk) 200 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; 201 202 /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS 203 * logic for HBR3 still needs Nominal (0.8V) on VDDC rail 204 */ 205 if (dc_is_dp_signal(pipe_ctx->stream->signal) && 206 pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) 207 max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; 208 } 209 210 return max_pix_clk; 211} 212 213static enum dm_pp_clocks_state dce_get_required_clocks_state( 214 struct clk_mgr *clk_mgr, 215 struct dc_state *context) 216{ 217 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 218 int i; 219 enum dm_pp_clocks_state low_req_clk; 220 int max_pix_clk = get_max_pixel_clock_for_all_paths(context); 221 222 /* Iterate from highest supported to lowest valid state, and update 223 * lowest RequiredState with the lowest state that satisfies 224 * all required clocks 225 */ 226 for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) 227 if (context->bw_ctx.bw.dce.dispclk_khz > 228 clk_mgr_dce->max_clks_by_state[i].display_clk_khz 229 || max_pix_clk > 230 clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) 231 break; 232 233 low_req_clk = i + 1; 234 if (low_req_clk > clk_mgr_dce->max_clks_state) { 235 /* set max clock state for high phyclock, invalid on exceeding display clock */ 236 if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz 237 < context->bw_ctx.bw.dce.dispclk_khz) 238 low_req_clk = DM_PP_CLOCKS_STATE_INVALID; 239 else 240 low_req_clk = clk_mgr_dce->max_clks_state; 241 } 242 243 return low_req_clk; 244} 245 246static int dce_set_clock( 247 struct clk_mgr *clk_mgr, 248 int requested_clk_khz) 249{ 250 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 251 struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; 252 struct dc_bios *bp = clk_mgr->ctx->dc_bios; 253 int actual_clock = requested_clk_khz; 254 struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; 255 256 /* Make sure requested clock isn't lower than minimum threshold*/ 257 if (requested_clk_khz > 0) 258 requested_clk_khz = max(requested_clk_khz, 259 clk_mgr_dce->dentist_vco_freq_khz / 64); 260 261 /* Prepare to program display clock*/ 262 pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; 263 pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; 264 265 if (clk_mgr_dce->dfs_bypass_active) 266 pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; 267 268 bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); 269 270 if (clk_mgr_dce->dfs_bypass_active) { 271 /* Cache the fixed display clock*/ 272 clk_mgr_dce->dfs_bypass_disp_clk = 273 pxl_clk_params.dfs_bypass_display_clock; 274 actual_clock = pxl_clk_params.dfs_bypass_display_clock; 275 } 276 277 /* from power down, we need mark the clock state as ClocksStateNominal 278 * from HWReset, so when resume we will call pplib voltage regulator.*/ 279 if (requested_clk_khz == 0) 280 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 281 282 if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) 283 dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); 284 285 return actual_clock; 286} 287 288int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) 289{ 290 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 291 struct bp_set_dce_clock_parameters dce_clk_params; 292 struct dc_bios *bp = clk_mgr->ctx->dc_bios; 293 struct dc *core_dc = clk_mgr->ctx->dc; 294 struct dmcu *dmcu = core_dc->res_pool->dmcu; 295 int actual_clock = requested_clk_khz; 296 /* Prepare to program display clock*/ 297 memset(&dce_clk_params, 0, sizeof(dce_clk_params)); 298 299 /* Make sure requested clock isn't lower than minimum threshold*/ 300 if (requested_clk_khz > 0) 301 requested_clk_khz = max(requested_clk_khz, 302 clk_mgr_dce->dentist_vco_freq_khz / 62); 303 304 dce_clk_params.target_clock_frequency = requested_clk_khz; 305 dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; 306 dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; 307 308 bp->funcs->set_dce_clock(bp, &dce_clk_params); 309 actual_clock = dce_clk_params.target_clock_frequency; 310 311 /* from power down, we need mark the clock state as ClocksStateNominal 312 * from HWReset, so when resume we will call pplib voltage regulator.*/ 313 if (requested_clk_khz == 0) 314 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 315 316 /*Program DP ref Clock*/ 317 /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ 318 dce_clk_params.target_clock_frequency = 0; 319 dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; 320 321 if (!((clk_mgr->ctx->asic_id.chip_family == FAMILY_AI) && 322 ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))) 323 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = 324 (dce_clk_params.pll_id == 325 CLOCK_SOURCE_COMBO_DISPLAY_PLL0); 326 else 327 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; 328 329 bp->funcs->set_dce_clock(bp, &dce_clk_params); 330 331 if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { 332 if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { 333 if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) 334 dmcu->funcs->set_psr_wait_loop(dmcu, 335 actual_clock / 1000 / 7); 336 } 337 } 338 339 clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; 340 return actual_clock; 341} 342 343static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) 344{ 345 struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; 346 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 347 struct integrated_info info = { { { 0 } } }; 348 struct dc_firmware_info fw_info = { { 0 } }; 349 int i; 350 351 if (bp->integrated_info) 352 info = *bp->integrated_info; 353 354 clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; 355 if (clk_mgr_dce->dentist_vco_freq_khz == 0) { 356 bp->funcs->get_firmware_info(bp, &fw_info); 357 clk_mgr_dce->dentist_vco_freq_khz = 358 fw_info.smu_gpu_pll_output_freq; 359 if (clk_mgr_dce->dentist_vco_freq_khz == 0) 360 clk_mgr_dce->dentist_vco_freq_khz = 3600000; 361 } 362 363 /*update the maximum display clock for each power state*/ 364 for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { 365 enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; 366 367 switch (i) { 368 case 0: 369 clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; 370 break; 371 372 case 1: 373 clk_state = DM_PP_CLOCKS_STATE_LOW; 374 break; 375 376 case 2: 377 clk_state = DM_PP_CLOCKS_STATE_NOMINAL; 378 break; 379 380 case 3: 381 clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; 382 break; 383 384 default: 385 clk_state = DM_PP_CLOCKS_STATE_INVALID; 386 break; 387 } 388 389 /*Do not allow bad VBIOS/SBIOS to override with invalid values, 390 * check for > 100MHz*/ 391 if (info.disp_clk_voltage[i].max_supported_clk >= 100000) 392 clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = 393 info.disp_clk_voltage[i].max_supported_clk; 394 } 395 396 if (!debug->disable_dfs_bypass && bp->integrated_info) 397 if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) 398 clk_mgr_dce->dfs_bypass_enabled = true; 399} 400 401void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) 402{ 403 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 404 int ss_info_num = bp->funcs->get_ss_entry_number( 405 bp, AS_SIGNAL_TYPE_GPU_PLL); 406 407 if (ss_info_num) { 408 struct spread_spectrum_info info = { { 0 } }; 409 enum bp_result result = bp->funcs->get_spread_spectrum_info( 410 bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); 411 412 /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS 413 * even if SS not enabled and in that case 414 * SSInfo.spreadSpectrumPercentage !=0 would be sign 415 * that SS is enabled 416 */ 417 if (result == BP_RESULT_OK && 418 info.spread_spectrum_percentage != 0) { 419 clk_mgr_dce->ss_on_dprefclk = true; 420 clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; 421 422 if (info.type.CENTER_MODE == 0) { 423 /* TODO: Currently for DP Reference clock we 424 * need only SS percentage for 425 * downspread */ 426 clk_mgr_dce->dprefclk_ss_percentage = 427 info.spread_spectrum_percentage; 428 } 429 430 return; 431 } 432 433 result = bp->funcs->get_spread_spectrum_info( 434 bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); 435 436 /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS 437 * even if SS not enabled and in that case 438 * SSInfo.spreadSpectrumPercentage !=0 would be sign 439 * that SS is enabled 440 */ 441 if (result == BP_RESULT_OK && 442 info.spread_spectrum_percentage != 0) { 443 clk_mgr_dce->ss_on_dprefclk = true; 444 clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; 445 446 if (info.type.CENTER_MODE == 0) { 447 /* Currently for DP Reference clock we 448 * need only SS percentage for 449 * downspread */ 450 clk_mgr_dce->dprefclk_ss_percentage = 451 info.spread_spectrum_percentage; 452 } 453 if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss) 454 clk_mgr_dce->dprefclk_ss_percentage = 0; 455 } 456 } 457} 458 459/** 460 * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info 461 * @clk_mgr: clock manager base structure 462 * 463 * Reads from VBIOS the XGMI spread spectrum info and saves it within 464 * the dce clock manager. This operation will overwrite the existing dprefclk 465 * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also 466 * sets the ->xgmi_enabled flag. 467 */ 468void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr) 469{ 470 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 471 enum bp_result result; 472 struct spread_spectrum_info info = { { 0 } }; 473 struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; 474 475 clk_mgr_dce->xgmi_enabled = false; 476 477 result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI, 478 0, &info); 479 if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { 480 clk_mgr_dce->xgmi_enabled = true; 481 clk_mgr_dce->ss_on_dprefclk = true; 482 clk_mgr_dce->dprefclk_ss_divider = 483 info.spread_percentage_divider; 484 485 if (info.type.CENTER_MODE == 0) { 486 /* Currently for DP Reference clock we 487 * need only SS percentage for 488 * downspread */ 489 clk_mgr_dce->dprefclk_ss_percentage = 490 info.spread_spectrum_percentage; 491 } 492 } 493} 494 495void dce110_fill_display_configs( 496 const struct dc_state *context, 497 struct dm_pp_display_configuration *pp_display_cfg) 498{ 499 int j; 500 int num_cfgs = 0; 501 502 for (j = 0; j < context->stream_count; j++) { 503 int k; 504 505 const struct dc_stream_state *stream = context->streams[j]; 506 struct dm_pp_single_disp_config *cfg = 507 &pp_display_cfg->disp_configs[num_cfgs]; 508 const struct pipe_ctx *pipe_ctx = NULL; 509 510 for (k = 0; k < MAX_PIPES; k++) 511 if (stream == context->res_ctx.pipe_ctx[k].stream) { 512 pipe_ctx = &context->res_ctx.pipe_ctx[k]; 513 break; 514 } 515 516 ASSERT(pipe_ctx != NULL); 517 518 /* only notify active stream */ 519 if (stream->dpms_off) 520 continue; 521 522 num_cfgs++; 523 cfg->signal = pipe_ctx->stream->signal; 524 cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; 525 cfg->src_height = stream->src.height; 526 cfg->src_width = stream->src.width; 527 cfg->ddi_channel_mapping = 528 stream->link->ddi_channel_mapping.raw; 529 cfg->transmitter = 530 stream->link->link_enc->transmitter; 531 cfg->link_settings.lane_count = 532 stream->link->cur_link_settings.lane_count; 533 cfg->link_settings.link_rate = 534 stream->link->cur_link_settings.link_rate; 535 cfg->link_settings.link_spread = 536 stream->link->cur_link_settings.link_spread; 537 cfg->sym_clock = stream->phy_pix_clk; 538 /* Round v_refresh*/ 539 cfg->v_refresh = stream->timing.pix_clk_100hz * 100; 540 cfg->v_refresh /= stream->timing.h_total; 541 cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) 542 / stream->timing.v_total; 543 } 544 545 pp_display_cfg->display_count = num_cfgs; 546} 547 548static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) 549{ 550 uint8_t j; 551 uint32_t min_vertical_blank_time = -1; 552 553 for (j = 0; j < context->stream_count; j++) { 554 struct dc_stream_state *stream = context->streams[j]; 555 uint32_t vertical_blank_in_pixels = 0; 556 uint32_t vertical_blank_time = 0; 557 558 vertical_blank_in_pixels = stream->timing.h_total * 559 (stream->timing.v_total 560 - stream->timing.v_addressable); 561 562 vertical_blank_time = vertical_blank_in_pixels 563 * 10000 / stream->timing.pix_clk_100hz; 564 565 if (min_vertical_blank_time > vertical_blank_time) 566 min_vertical_blank_time = vertical_blank_time; 567 } 568 569 return min_vertical_blank_time; 570} 571 572static int determine_sclk_from_bounding_box( 573 const struct dc *dc, 574 int required_sclk) 575{ 576 int i; 577 578 /* 579 * Some asics do not give us sclk levels, so we just report the actual 580 * required sclk 581 */ 582 if (dc->sclk_lvls.num_levels == 0) 583 return required_sclk; 584 585 for (i = 0; i < dc->sclk_lvls.num_levels; i++) { 586 if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) 587 return dc->sclk_lvls.clocks_in_khz[i]; 588 } 589 /* 590 * even maximum level could not satisfy requirement, this 591 * is unexpected at this stage, should have been caught at 592 * validation time 593 */ 594 ASSERT(0); 595 return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; 596} 597 598static void dce_pplib_apply_display_requirements( 599 struct dc *dc, 600 struct dc_state *context) 601{ 602 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 603 604 pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); 605 606 dce110_fill_display_configs(context, pp_display_cfg); 607 608 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) 609 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); 610} 611 612static void dce11_pplib_apply_display_requirements( 613 struct dc *dc, 614 struct dc_state *context) 615{ 616 struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; 617 618 pp_display_cfg->all_displays_in_sync = 619 context->bw_ctx.bw.dce.all_displays_in_sync; 620 pp_display_cfg->nb_pstate_switch_disable = 621 context->bw_ctx.bw.dce.nbp_state_change_enable == false; 622 pp_display_cfg->cpu_cc6_disable = 623 context->bw_ctx.bw.dce.cpuc_state_change_enable == false; 624 pp_display_cfg->cpu_pstate_disable = 625 context->bw_ctx.bw.dce.cpup_state_change_enable == false; 626 pp_display_cfg->cpu_pstate_separation_time = 627 context->bw_ctx.bw.dce.blackout_recovery_time_us; 628 629 pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz 630 / MEMORY_TYPE_MULTIPLIER_CZ; 631 632 pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( 633 dc, 634 context->bw_ctx.bw.dce.sclk_khz); 635 636 /* 637 * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. 638 * This is not required for less than 5 displays, 639 * thus don't request decfclk in dc to avoid impact 640 * on power saving. 641 * 642 */ 643 pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? 644 pp_display_cfg->min_engine_clock_khz : 0; 645 646 pp_display_cfg->min_engine_clock_deep_sleep_khz 647 = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; 648 649 pp_display_cfg->avail_mclk_switch_time_us = 650 dce110_get_min_vblank_time_us(context); 651 /* TODO: dce11.2*/ 652 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; 653 654 pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; 655 656 dce110_fill_display_configs(context, pp_display_cfg); 657 658 /* TODO: is this still applicable?*/ 659 if (pp_display_cfg->display_count == 1) { 660 const struct dc_crtc_timing *timing = 661 &context->streams[0]->timing; 662 663 pp_display_cfg->crtc_index = 664 pp_display_cfg->disp_configs[0].pipe_idx; 665 pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; 666 } 667 668 if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) 669 dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); 670} 671 672static void dce_update_clocks(struct clk_mgr *clk_mgr, 673 struct dc_state *context, 674 bool safe_to_lower) 675{ 676 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 677 struct dm_pp_power_level_change_request level_change_req; 678 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 679 680 /*TODO: W/A for dal3 linux, investigate why this works */ 681 if (!clk_mgr_dce->dfs_bypass_active) 682 patched_disp_clk = patched_disp_clk * 115 / 100; 683 684 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 685 /* get max clock state from PPLIB */ 686 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 687 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 688 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 689 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 690 } 691 692 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 693 patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk); 694 clk_mgr->clks.dispclk_khz = patched_disp_clk; 695 } 696 dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 697} 698 699static void dce11_update_clocks(struct clk_mgr *clk_mgr, 700 struct dc_state *context, 701 bool safe_to_lower) 702{ 703 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 704 struct dm_pp_power_level_change_request level_change_req; 705 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 706 707 /*TODO: W/A for dal3 linux, investigate why this works */ 708 if (!clk_mgr_dce->dfs_bypass_active) 709 patched_disp_clk = patched_disp_clk * 115 / 100; 710 711 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 712 /* get max clock state from PPLIB */ 713 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 714 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 715 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 716 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 717 } 718 719 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 720 context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); 721 clk_mgr->clks.dispclk_khz = patched_disp_clk; 722 } 723 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 724} 725 726static void dce112_update_clocks(struct clk_mgr *clk_mgr, 727 struct dc_state *context, 728 bool safe_to_lower) 729{ 730 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 731 struct dm_pp_power_level_change_request level_change_req; 732 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 733 734 /*TODO: W/A for dal3 linux, investigate why this works */ 735 if (!clk_mgr_dce->dfs_bypass_active) 736 patched_disp_clk = patched_disp_clk * 115 / 100; 737 738 level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); 739 /* get max clock state from PPLIB */ 740 if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) 741 || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { 742 if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) 743 clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; 744 } 745 746 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 747 patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk); 748 clk_mgr->clks.dispclk_khz = patched_disp_clk; 749 } 750 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 751} 752 753static void dce12_update_clocks(struct clk_mgr *clk_mgr, 754 struct dc_state *context, 755 bool safe_to_lower) 756{ 757 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); 758 struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; 759 int max_pix_clk = get_max_pixel_clock_for_all_paths(context); 760 int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; 761 762 /*TODO: W/A for dal3 linux, investigate why this works */ 763 if (!clk_mgr_dce->dfs_bypass_active) 764 patched_disp_clk = patched_disp_clk * 115 / 100; 765 766 if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { 767 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; 768 /* 769 * When xGMI is enabled, the display clk needs to be adjusted 770 * with the WAFL link's SS percentage. 771 */ 772 if (clk_mgr_dce->xgmi_enabled) 773 patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss( 774 clk_mgr_dce, patched_disp_clk); 775 clock_voltage_req.clocks_in_khz = patched_disp_clk; 776 clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk); 777 778 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 779 } 780 781 if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { 782 clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; 783 clock_voltage_req.clocks_in_khz = max_pix_clk; 784 clk_mgr->clks.phyclk_khz = max_pix_clk; 785 786 dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); 787 } 788 dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); 789} 790 791static const struct clk_mgr_funcs dce120_funcs = { 792 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, 793 .update_clocks = dce12_update_clocks 794}; 795 796static const struct clk_mgr_funcs dce112_funcs = { 797 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 798 .update_clocks = dce112_update_clocks 799}; 800 801static const struct clk_mgr_funcs dce110_funcs = { 802 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 803 .update_clocks = dce11_update_clocks, 804}; 805 806static const struct clk_mgr_funcs dce_funcs = { 807 .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, 808 .update_clocks = dce_update_clocks 809}; 810 811static void dce_clk_mgr_construct( 812 struct dce_clk_mgr *clk_mgr_dce, 813 struct dc_context *ctx, 814 const struct clk_mgr_registers *regs, 815 const struct clk_mgr_shift *clk_shift, 816 const struct clk_mgr_mask *clk_mask) 817{ 818 struct clk_mgr *base = &clk_mgr_dce->base; 819 struct dm_pp_static_clock_info static_clk_info = {0}; 820 821 base->ctx = ctx; 822 base->funcs = &dce_funcs; 823 824 clk_mgr_dce->regs = regs; 825 clk_mgr_dce->clk_mgr_shift = clk_shift; 826 clk_mgr_dce->clk_mgr_mask = clk_mask; 827 828 clk_mgr_dce->dfs_bypass_disp_clk = 0; 829 830 clk_mgr_dce->dprefclk_ss_percentage = 0; 831 clk_mgr_dce->dprefclk_ss_divider = 1000; 832 clk_mgr_dce->ss_on_dprefclk = false; 833 834 835 if (dm_pp_get_static_clocks(ctx, &static_clk_info)) 836 clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; 837 else 838 clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; 839 clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; 840 841 dce_clock_read_integrated_info(clk_mgr_dce); 842 dce_clock_read_ss_info(clk_mgr_dce); 843} 844 845struct clk_mgr *dce_clk_mgr_create( 846 struct dc_context *ctx, 847 const struct clk_mgr_registers *regs, 848 const struct clk_mgr_shift *clk_shift, 849 const struct clk_mgr_mask *clk_mask) 850{ 851 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 852 853 if (clk_mgr_dce == NULL) { 854 BREAK_TO_DEBUGGER(); 855 return NULL; 856 } 857 858 memcpy(clk_mgr_dce->max_clks_by_state, 859 dce80_max_clks_by_state, 860 sizeof(dce80_max_clks_by_state)); 861 862 dce_clk_mgr_construct( 863 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 864 865 return &clk_mgr_dce->base; 866} 867 868struct clk_mgr *dce110_clk_mgr_create( 869 struct dc_context *ctx, 870 const struct clk_mgr_registers *regs, 871 const struct clk_mgr_shift *clk_shift, 872 const struct clk_mgr_mask *clk_mask) 873{ 874 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 875 876 if (clk_mgr_dce == NULL) { 877 BREAK_TO_DEBUGGER(); 878 return NULL; 879 } 880 881 memcpy(clk_mgr_dce->max_clks_by_state, 882 dce110_max_clks_by_state, 883 sizeof(dce110_max_clks_by_state)); 884 885 dce_clk_mgr_construct( 886 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 887 888 clk_mgr_dce->base.funcs = &dce110_funcs; 889 890 return &clk_mgr_dce->base; 891} 892 893struct clk_mgr *dce112_clk_mgr_create( 894 struct dc_context *ctx, 895 const struct clk_mgr_registers *regs, 896 const struct clk_mgr_shift *clk_shift, 897 const struct clk_mgr_mask *clk_mask) 898{ 899 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 900 901 if (clk_mgr_dce == NULL) { 902 BREAK_TO_DEBUGGER(); 903 return NULL; 904 } 905 906 memcpy(clk_mgr_dce->max_clks_by_state, 907 dce112_max_clks_by_state, 908 sizeof(dce112_max_clks_by_state)); 909 910 dce_clk_mgr_construct( 911 clk_mgr_dce, ctx, regs, clk_shift, clk_mask); 912 913 clk_mgr_dce->base.funcs = &dce112_funcs; 914 915 return &clk_mgr_dce->base; 916} 917 918struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) 919{ 920 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); 921 922 if (clk_mgr_dce == NULL) { 923 BREAK_TO_DEBUGGER(); 924 return NULL; 925 } 926 927 memcpy(clk_mgr_dce->max_clks_by_state, 928 dce120_max_clks_by_state, 929 sizeof(dce120_max_clks_by_state)); 930 931 dce_clk_mgr_construct( 932 clk_mgr_dce, ctx, NULL, NULL, NULL); 933 934 clk_mgr_dce->dprefclk_khz = 600000; 935 clk_mgr_dce->base.funcs = &dce120_funcs; 936 937 return &clk_mgr_dce->base; 938} 939 940struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx) 941{ 942 struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), 943 GFP_KERNEL); 944 945 if (clk_mgr_dce == NULL) { 946 BREAK_TO_DEBUGGER(); 947 return NULL; 948 } 949 950 memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, 951 sizeof(dce120_max_clks_by_state)); 952 953 dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL); 954 955 clk_mgr_dce->dprefclk_khz = 625000; 956 clk_mgr_dce->base.funcs = &dce120_funcs; 957 958 return &clk_mgr_dce->base; 959} 960 961void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) 962{ 963 struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); 964 965 kfree(clk_mgr_dce); 966 *clk_mgr = NULL; 967}