yellow_carp_ppt.c (34211B)
1/* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#define SWSMU_CODE_LAYER_L2 25 26#include "amdgpu.h" 27#include "amdgpu_smu.h" 28#include "smu_v13_0.h" 29#include "smu13_driver_if_yellow_carp.h" 30#include "yellow_carp_ppt.h" 31#include "smu_v13_0_1_ppsmc.h" 32#include "smu_v13_0_1_pmfw.h" 33#include "smu_cmn.h" 34 35/* 36 * DO NOT use these for err/warn/info/debug messages. 37 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 38 * They are more MGPU friendly. 39 */ 40#undef pr_err 41#undef pr_warn 42#undef pr_info 43#undef pr_debug 44 45#define FEATURE_MASK(feature) (1ULL << feature) 46#define SMC_DPM_FEATURE ( \ 47 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 48 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 49 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 50 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 51 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 52 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 53 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 54 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \ 55 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 56 57static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = { 58 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 59 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 60 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 61 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1), 62 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 63 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 64 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 65 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 66 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 67 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 68 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 69 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 70 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 71 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 72 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 73 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 74 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 75 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 76 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 77 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 78 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 79 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 80 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 81 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 82 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 83 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 84 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 85 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 86 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 87 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 88}; 89 90static struct cmn2asic_mapping yellow_carp_feature_mask_map[SMU_FEATURE_COUNT] = { 91 FEA_MAP(CCLK_DPM), 92 FEA_MAP(FAN_CONTROLLER), 93 FEA_MAP(PPT), 94 FEA_MAP(TDC), 95 FEA_MAP(THERMAL), 96 FEA_MAP(ULV), 97 FEA_MAP(VCN_DPM), 98 FEA_MAP_REVERSE(FCLK), 99 FEA_MAP_REVERSE(SOCCLK), 100 FEA_MAP(LCLK_DPM), 101 FEA_MAP(SHUBCLK_DPM), 102 FEA_MAP(DCFCLK_DPM), 103 FEA_MAP_HALF_REVERSE(GFX), 104 FEA_MAP(DS_GFXCLK), 105 FEA_MAP(DS_SOCCLK), 106 FEA_MAP(DS_LCLK), 107 FEA_MAP(DS_DCFCLK), 108 FEA_MAP(DS_FCLK), 109 FEA_MAP(DS_MP1CLK), 110 FEA_MAP(DS_MP0CLK), 111 FEA_MAP(GFX_DEM), 112 FEA_MAP(PSI), 113 FEA_MAP(PROCHOT), 114 FEA_MAP(CPUOFF), 115 FEA_MAP(STAPM), 116 FEA_MAP(S0I3), 117 FEA_MAP(PERF_LIMIT), 118 FEA_MAP(CORE_DLDO), 119 FEA_MAP(RSMU_LOW_POWER), 120 FEA_MAP(SMN_LOW_POWER), 121 FEA_MAP(THM_LOW_POWER), 122 FEA_MAP(SMUIO_LOW_POWER), 123 FEA_MAP(MP1_LOW_POWER), 124 FEA_MAP(DS_VCN), 125 FEA_MAP(CPPC), 126 FEA_MAP(DF_CSTATES), 127 FEA_MAP(MSMU_LOW_POWER), 128 FEA_MAP(ATHUB_PG), 129}; 130 131static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = { 132 TAB_MAP_VALID(WATERMARKS), 133 TAB_MAP_VALID(SMU_METRICS), 134 TAB_MAP_VALID(CUSTOM_DPM), 135 TAB_MAP_VALID(DPMCLOCKS), 136}; 137 138static int yellow_carp_init_smc_tables(struct smu_context *smu) 139{ 140 struct smu_table_context *smu_table = &smu->smu_table; 141 struct smu_table *tables = smu_table->tables; 142 143 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 144 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 145 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 146 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 147 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 148 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 149 150 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 151 if (!smu_table->clocks_table) 152 goto err0_out; 153 154 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 155 if (!smu_table->metrics_table) 156 goto err1_out; 157 smu_table->metrics_time = 0; 158 159 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 160 if (!smu_table->watermarks_table) 161 goto err2_out; 162 163 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 164 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 165 if (!smu_table->gpu_metrics_table) 166 goto err3_out; 167 168 return 0; 169 170err3_out: 171 kfree(smu_table->watermarks_table); 172err2_out: 173 kfree(smu_table->metrics_table); 174err1_out: 175 kfree(smu_table->clocks_table); 176err0_out: 177 return -ENOMEM; 178} 179 180static int yellow_carp_fini_smc_tables(struct smu_context *smu) 181{ 182 struct smu_table_context *smu_table = &smu->smu_table; 183 184 kfree(smu_table->clocks_table); 185 smu_table->clocks_table = NULL; 186 187 kfree(smu_table->metrics_table); 188 smu_table->metrics_table = NULL; 189 190 kfree(smu_table->watermarks_table); 191 smu_table->watermarks_table = NULL; 192 193 kfree(smu_table->gpu_metrics_table); 194 smu_table->gpu_metrics_table = NULL; 195 196 return 0; 197} 198 199static int yellow_carp_system_features_control(struct smu_context *smu, bool en) 200{ 201 struct amdgpu_device *adev = smu->adev; 202 int ret = 0; 203 204 if (!en && !adev->in_s0ix) 205 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 206 207 return ret; 208} 209 210static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable) 211{ 212 int ret = 0; 213 214 /* vcn dpm on is a prerequisite for vcn power gate messages */ 215 if (enable) 216 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 217 0, NULL); 218 else 219 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 220 0, NULL); 221 222 return ret; 223} 224 225static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) 226{ 227 int ret = 0; 228 229 if (enable) 230 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 231 0, NULL); 232 else 233 ret = smu_cmn_send_smc_msg_with_param(smu, 234 SMU_MSG_PowerDownJpeg, 0, 235 NULL); 236 237 return ret; 238} 239 240 241static bool yellow_carp_is_dpm_running(struct smu_context *smu) 242{ 243 int ret = 0; 244 uint64_t feature_enabled; 245 246 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 247 248 if (ret) 249 return false; 250 251 return !!(feature_enabled & SMC_DPM_FEATURE); 252} 253 254static int yellow_carp_post_smu_init(struct smu_context *smu) 255{ 256 struct amdgpu_device *adev = smu->adev; 257 int ret = 0; 258 259 /* allow message will be sent after enable message on Yellow Carp*/ 260 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL); 261 if (ret) 262 dev_err(adev->dev, "Failed to Enable GfxOff!\n"); 263 return ret; 264} 265 266static int yellow_carp_mode_reset(struct smu_context *smu, int type) 267{ 268 int ret = 0; 269 270 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL); 271 if (ret) 272 dev_err(smu->adev->dev, "Failed to mode reset!\n"); 273 274 return ret; 275} 276 277static int yellow_carp_mode2_reset(struct smu_context *smu) 278{ 279 return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2); 280} 281 282 283static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics, 284 uint32_t *apu_percent, uint32_t *dgpu_percent) 285{ 286 uint32_t apu_boost = 0; 287 uint32_t dgpu_boost = 0; 288 uint16_t apu_limit = 0; 289 uint16_t dgpu_limit = 0; 290 uint16_t apu_power = 0; 291 uint16_t dgpu_power = 0; 292 293 /* APU and dGPU power values are reported in milli Watts 294 * and STAPM power limits are in Watts */ 295 apu_power = metrics->ApuPower/1000; 296 apu_limit = metrics->StapmOpnLimit; 297 if (apu_power > apu_limit && apu_limit != 0) 298 apu_boost = ((apu_power - apu_limit) * 100) / apu_limit; 299 apu_boost = (apu_boost > 100) ? 100 : apu_boost; 300 301 dgpu_power = metrics->dGpuPower/1000; 302 if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit) 303 dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit; 304 if (dgpu_power > dgpu_limit && dgpu_limit != 0) 305 dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit; 306 dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost; 307 308 if (dgpu_boost >= apu_boost) 309 apu_boost = 0; 310 else 311 dgpu_boost = 0; 312 313 *apu_percent = apu_boost; 314 *dgpu_percent = dgpu_boost; 315 316} 317 318static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, 319 MetricsMember_t member, 320 uint32_t *value) 321{ 322 struct smu_table_context *smu_table = &smu->smu_table; 323 324 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 325 int ret = 0; 326 uint32_t apu_percent = 0; 327 uint32_t dgpu_percent = 0; 328 329 ret = smu_cmn_get_metrics_table(smu, NULL, false); 330 if (ret) 331 return ret; 332 333 switch (member) { 334 case METRICS_AVERAGE_GFXCLK: 335 *value = metrics->GfxclkFrequency; 336 break; 337 case METRICS_AVERAGE_SOCCLK: 338 *value = metrics->SocclkFrequency; 339 break; 340 case METRICS_AVERAGE_VCLK: 341 *value = metrics->VclkFrequency; 342 break; 343 case METRICS_AVERAGE_DCLK: 344 *value = metrics->DclkFrequency; 345 break; 346 case METRICS_AVERAGE_UCLK: 347 *value = metrics->MemclkFrequency; 348 break; 349 case METRICS_AVERAGE_GFXACTIVITY: 350 *value = metrics->GfxActivity / 100; 351 break; 352 case METRICS_AVERAGE_VCNACTIVITY: 353 *value = metrics->UvdActivity; 354 break; 355 case METRICS_AVERAGE_SOCKETPOWER: 356 *value = (metrics->CurrentSocketPower << 8) / 1000; 357 break; 358 case METRICS_TEMPERATURE_EDGE: 359 *value = metrics->GfxTemperature / 100 * 360 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 361 break; 362 case METRICS_TEMPERATURE_HOTSPOT: 363 *value = metrics->SocTemperature / 100 * 364 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 365 break; 366 case METRICS_THROTTLER_STATUS: 367 *value = metrics->ThrottlerStatus; 368 break; 369 case METRICS_VOLTAGE_VDDGFX: 370 *value = metrics->Voltage[0]; 371 break; 372 case METRICS_VOLTAGE_VDDSOC: 373 *value = metrics->Voltage[1]; 374 break; 375 case METRICS_SS_APU_SHARE: 376 /* return the percentage of APU power boost 377 * with respect to APU's power limit. 378 */ 379 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 380 *value = apu_percent; 381 break; 382 case METRICS_SS_DGPU_SHARE: 383 /* return the percentage of dGPU power boost 384 * with respect to dGPU's power limit. 385 */ 386 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent); 387 *value = dgpu_percent; 388 break; 389 default: 390 *value = UINT_MAX; 391 break; 392 } 393 394 return ret; 395} 396 397static int yellow_carp_read_sensor(struct smu_context *smu, 398 enum amd_pp_sensors sensor, 399 void *data, uint32_t *size) 400{ 401 int ret = 0; 402 403 if (!data || !size) 404 return -EINVAL; 405 406 switch (sensor) { 407 case AMDGPU_PP_SENSOR_GPU_LOAD: 408 ret = yellow_carp_get_smu_metrics_data(smu, 409 METRICS_AVERAGE_GFXACTIVITY, 410 (uint32_t *)data); 411 *size = 4; 412 break; 413 case AMDGPU_PP_SENSOR_GPU_POWER: 414 ret = yellow_carp_get_smu_metrics_data(smu, 415 METRICS_AVERAGE_SOCKETPOWER, 416 (uint32_t *)data); 417 *size = 4; 418 break; 419 case AMDGPU_PP_SENSOR_EDGE_TEMP: 420 ret = yellow_carp_get_smu_metrics_data(smu, 421 METRICS_TEMPERATURE_EDGE, 422 (uint32_t *)data); 423 *size = 4; 424 break; 425 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 426 ret = yellow_carp_get_smu_metrics_data(smu, 427 METRICS_TEMPERATURE_HOTSPOT, 428 (uint32_t *)data); 429 *size = 4; 430 break; 431 case AMDGPU_PP_SENSOR_GFX_MCLK: 432 ret = yellow_carp_get_smu_metrics_data(smu, 433 METRICS_AVERAGE_UCLK, 434 (uint32_t *)data); 435 *(uint32_t *)data *= 100; 436 *size = 4; 437 break; 438 case AMDGPU_PP_SENSOR_GFX_SCLK: 439 ret = yellow_carp_get_smu_metrics_data(smu, 440 METRICS_AVERAGE_GFXCLK, 441 (uint32_t *)data); 442 *(uint32_t *)data *= 100; 443 *size = 4; 444 break; 445 case AMDGPU_PP_SENSOR_VDDGFX: 446 ret = yellow_carp_get_smu_metrics_data(smu, 447 METRICS_VOLTAGE_VDDGFX, 448 (uint32_t *)data); 449 *size = 4; 450 break; 451 case AMDGPU_PP_SENSOR_VDDNB: 452 ret = yellow_carp_get_smu_metrics_data(smu, 453 METRICS_VOLTAGE_VDDSOC, 454 (uint32_t *)data); 455 *size = 4; 456 break; 457 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 458 ret = yellow_carp_get_smu_metrics_data(smu, 459 METRICS_SS_APU_SHARE, 460 (uint32_t *)data); 461 *size = 4; 462 break; 463 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 464 ret = yellow_carp_get_smu_metrics_data(smu, 465 METRICS_SS_DGPU_SHARE, 466 (uint32_t *)data); 467 *size = 4; 468 break; 469 default: 470 ret = -EOPNOTSUPP; 471 break; 472 } 473 474 return ret; 475} 476 477static int yellow_carp_set_watermarks_table(struct smu_context *smu, 478 struct pp_smu_wm_range_sets *clock_ranges) 479{ 480 int i; 481 int ret = 0; 482 Watermarks_t *table = smu->smu_table.watermarks_table; 483 484 if (!table || !clock_ranges) 485 return -EINVAL; 486 487 if (clock_ranges) { 488 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 489 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 490 return -EINVAL; 491 492 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 493 table->WatermarkRow[WM_DCFCLK][i].MinClock = 494 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 495 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 496 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 497 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 498 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 499 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 500 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 501 502 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 503 clock_ranges->reader_wm_sets[i].wm_inst; 504 } 505 506 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 507 table->WatermarkRow[WM_SOCCLK][i].MinClock = 508 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 509 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 510 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 511 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 512 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 513 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 514 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 515 516 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 517 clock_ranges->writer_wm_sets[i].wm_inst; 518 } 519 520 smu->watermarks_bitmap |= WATERMARKS_EXIST; 521 } 522 523 /* pass data to smu controller */ 524 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 525 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 526 ret = smu_cmn_write_watermarks_table(smu); 527 if (ret) { 528 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 529 return ret; 530 } 531 smu->watermarks_bitmap |= WATERMARKS_LOADED; 532 } 533 534 return 0; 535} 536 537static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, 538 void **table) 539{ 540 struct smu_table_context *smu_table = &smu->smu_table; 541 struct gpu_metrics_v2_1 *gpu_metrics = 542 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 543 SmuMetrics_t metrics; 544 int ret = 0; 545 546 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 547 if (ret) 548 return ret; 549 550 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 551 552 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 553 gpu_metrics->temperature_soc = metrics.SocTemperature; 554 memcpy(&gpu_metrics->temperature_core[0], 555 &metrics.CoreTemperature[0], 556 sizeof(uint16_t) * 8); 557 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 558 559 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 560 gpu_metrics->average_mm_activity = metrics.UvdActivity; 561 562 gpu_metrics->average_socket_power = metrics.CurrentSocketPower; 563 gpu_metrics->average_gfx_power = metrics.Power[0]; 564 gpu_metrics->average_soc_power = metrics.Power[1]; 565 memcpy(&gpu_metrics->average_core_power[0], 566 &metrics.CorePower[0], 567 sizeof(uint16_t) * 8); 568 569 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 570 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 571 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 572 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 573 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 574 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 575 576 memcpy(&gpu_metrics->current_coreclk[0], 577 &metrics.CoreFrequency[0], 578 sizeof(uint16_t) * 8); 579 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 580 581 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 582 583 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 584 585 *table = (void *)gpu_metrics; 586 587 return sizeof(struct gpu_metrics_v2_1); 588} 589 590static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) 591{ 592 struct smu_table_context *smu_table = &smu->smu_table; 593 594 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 595} 596 597static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, 598 long input[], uint32_t size) 599{ 600 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 601 int ret = 0; 602 603 /* Only allowed in manual mode */ 604 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 605 return -EINVAL; 606 607 switch (type) { 608 case PP_OD_EDIT_SCLK_VDDC_TABLE: 609 if (size != 2) { 610 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 611 return -EINVAL; 612 } 613 614 if (input[0] == 0) { 615 if (input[1] < smu->gfx_default_hard_min_freq) { 616 dev_warn(smu->adev->dev, 617 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 618 input[1], smu->gfx_default_hard_min_freq); 619 return -EINVAL; 620 } 621 smu->gfx_actual_hard_min_freq = input[1]; 622 } else if (input[0] == 1) { 623 if (input[1] > smu->gfx_default_soft_max_freq) { 624 dev_warn(smu->adev->dev, 625 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 626 input[1], smu->gfx_default_soft_max_freq); 627 return -EINVAL; 628 } 629 smu->gfx_actual_soft_max_freq = input[1]; 630 } else { 631 return -EINVAL; 632 } 633 break; 634 case PP_OD_RESTORE_DEFAULT_TABLE: 635 if (size != 0) { 636 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 637 return -EINVAL; 638 } else { 639 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 640 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 641 } 642 break; 643 case PP_OD_COMMIT_DPM_TABLE: 644 if (size != 0) { 645 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 646 return -EINVAL; 647 } else { 648 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 649 dev_err(smu->adev->dev, 650 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 651 smu->gfx_actual_hard_min_freq, 652 smu->gfx_actual_soft_max_freq); 653 return -EINVAL; 654 } 655 656 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 657 smu->gfx_actual_hard_min_freq, NULL); 658 if (ret) { 659 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 660 return ret; 661 } 662 663 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 664 smu->gfx_actual_soft_max_freq, NULL); 665 if (ret) { 666 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 667 return ret; 668 } 669 } 670 break; 671 default: 672 return -ENOSYS; 673 } 674 675 return ret; 676} 677 678static int yellow_carp_get_current_clk_freq(struct smu_context *smu, 679 enum smu_clk_type clk_type, 680 uint32_t *value) 681{ 682 MetricsMember_t member_type; 683 684 switch (clk_type) { 685 case SMU_SOCCLK: 686 member_type = METRICS_AVERAGE_SOCCLK; 687 break; 688 case SMU_VCLK: 689 member_type = METRICS_AVERAGE_VCLK; 690 break; 691 case SMU_DCLK: 692 member_type = METRICS_AVERAGE_DCLK; 693 break; 694 case SMU_MCLK: 695 member_type = METRICS_AVERAGE_UCLK; 696 break; 697 case SMU_FCLK: 698 return smu_cmn_send_smc_msg_with_param(smu, 699 SMU_MSG_GetFclkFrequency, 0, value); 700 case SMU_GFXCLK: 701 case SMU_SCLK: 702 return smu_cmn_send_smc_msg_with_param(smu, 703 SMU_MSG_GetGfxclkFrequency, 0, value); 704 break; 705 default: 706 return -EINVAL; 707 } 708 709 return yellow_carp_get_smu_metrics_data(smu, member_type, value); 710} 711 712static int yellow_carp_get_dpm_level_count(struct smu_context *smu, 713 enum smu_clk_type clk_type, 714 uint32_t *count) 715{ 716 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 717 718 switch (clk_type) { 719 case SMU_SOCCLK: 720 *count = clk_table->NumSocClkLevelsEnabled; 721 break; 722 case SMU_VCLK: 723 *count = clk_table->VcnClkLevelsEnabled; 724 break; 725 case SMU_DCLK: 726 *count = clk_table->VcnClkLevelsEnabled; 727 break; 728 case SMU_MCLK: 729 *count = clk_table->NumDfPstatesEnabled; 730 break; 731 case SMU_FCLK: 732 *count = clk_table->NumDfPstatesEnabled; 733 break; 734 default: 735 break; 736 } 737 738 return 0; 739} 740 741static int yellow_carp_get_dpm_freq_by_index(struct smu_context *smu, 742 enum smu_clk_type clk_type, 743 uint32_t dpm_level, 744 uint32_t *freq) 745{ 746 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 747 748 if (!clk_table || clk_type >= SMU_CLK_COUNT) 749 return -EINVAL; 750 751 switch (clk_type) { 752 case SMU_SOCCLK: 753 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 754 return -EINVAL; 755 *freq = clk_table->SocClocks[dpm_level]; 756 break; 757 case SMU_VCLK: 758 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 759 return -EINVAL; 760 *freq = clk_table->VClocks[dpm_level]; 761 break; 762 case SMU_DCLK: 763 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 764 return -EINVAL; 765 *freq = clk_table->DClocks[dpm_level]; 766 break; 767 case SMU_UCLK: 768 case SMU_MCLK: 769 if (dpm_level >= clk_table->NumDfPstatesEnabled) 770 return -EINVAL; 771 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 772 break; 773 case SMU_FCLK: 774 if (dpm_level >= clk_table->NumDfPstatesEnabled) 775 return -EINVAL; 776 *freq = clk_table->DfPstateTable[dpm_level].FClk; 777 break; 778 default: 779 return -EINVAL; 780 } 781 782 return 0; 783} 784 785static bool yellow_carp_clk_dpm_is_enabled(struct smu_context *smu, 786 enum smu_clk_type clk_type) 787{ 788 enum smu_feature_mask feature_id = 0; 789 790 switch (clk_type) { 791 case SMU_MCLK: 792 case SMU_UCLK: 793 case SMU_FCLK: 794 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 795 break; 796 case SMU_GFXCLK: 797 case SMU_SCLK: 798 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 799 break; 800 case SMU_SOCCLK: 801 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 802 break; 803 case SMU_VCLK: 804 case SMU_DCLK: 805 feature_id = SMU_FEATURE_VCN_DPM_BIT; 806 break; 807 default: 808 return true; 809 } 810 811 return smu_cmn_feature_is_enabled(smu, feature_id); 812} 813 814static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu, 815 enum smu_clk_type clk_type, 816 uint32_t *min, 817 uint32_t *max) 818{ 819 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 820 uint32_t clock_limit; 821 uint32_t max_dpm_level, min_dpm_level; 822 int ret = 0; 823 824 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) { 825 switch (clk_type) { 826 case SMU_MCLK: 827 case SMU_UCLK: 828 clock_limit = smu->smu_table.boot_values.uclk; 829 break; 830 case SMU_FCLK: 831 clock_limit = smu->smu_table.boot_values.fclk; 832 break; 833 case SMU_GFXCLK: 834 case SMU_SCLK: 835 clock_limit = smu->smu_table.boot_values.gfxclk; 836 break; 837 case SMU_SOCCLK: 838 clock_limit = smu->smu_table.boot_values.socclk; 839 break; 840 case SMU_VCLK: 841 clock_limit = smu->smu_table.boot_values.vclk; 842 break; 843 case SMU_DCLK: 844 clock_limit = smu->smu_table.boot_values.dclk; 845 break; 846 default: 847 clock_limit = 0; 848 break; 849 } 850 851 /* clock in Mhz unit */ 852 if (min) 853 *min = clock_limit / 100; 854 if (max) 855 *max = clock_limit / 100; 856 857 return 0; 858 } 859 860 if (max) { 861 switch (clk_type) { 862 case SMU_GFXCLK: 863 case SMU_SCLK: 864 *max = clk_table->MaxGfxClk; 865 break; 866 case SMU_MCLK: 867 case SMU_UCLK: 868 case SMU_FCLK: 869 max_dpm_level = 0; 870 break; 871 case SMU_SOCCLK: 872 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 873 break; 874 case SMU_VCLK: 875 case SMU_DCLK: 876 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 877 break; 878 default: 879 ret = -EINVAL; 880 goto failed; 881 } 882 883 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 884 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); 885 if (ret) 886 goto failed; 887 } 888 } 889 890 if (min) { 891 switch (clk_type) { 892 case SMU_GFXCLK: 893 case SMU_SCLK: 894 *min = clk_table->MinGfxClk; 895 break; 896 case SMU_MCLK: 897 case SMU_UCLK: 898 case SMU_FCLK: 899 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 900 break; 901 case SMU_SOCCLK: 902 min_dpm_level = 0; 903 break; 904 case SMU_VCLK: 905 case SMU_DCLK: 906 min_dpm_level = 0; 907 break; 908 default: 909 ret = -EINVAL; 910 goto failed; 911 } 912 913 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 914 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); 915 if (ret) 916 goto failed; 917 } 918 } 919 920failed: 921 return ret; 922} 923 924static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu, 925 enum smu_clk_type clk_type, 926 uint32_t min, 927 uint32_t max) 928{ 929 enum smu_message_type msg_set_min, msg_set_max; 930 int ret = 0; 931 932 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) 933 return -EINVAL; 934 935 switch (clk_type) { 936 case SMU_GFXCLK: 937 case SMU_SCLK: 938 msg_set_min = SMU_MSG_SetHardMinGfxClk; 939 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 940 break; 941 case SMU_FCLK: 942 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 943 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 944 break; 945 case SMU_SOCCLK: 946 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 947 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 948 break; 949 case SMU_VCLK: 950 case SMU_DCLK: 951 msg_set_min = SMU_MSG_SetHardMinVcn; 952 msg_set_max = SMU_MSG_SetSoftMaxVcn; 953 break; 954 default: 955 return -EINVAL; 956 } 957 958 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); 959 if (ret) 960 goto out; 961 962 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); 963 if (ret) 964 goto out; 965 966out: 967 return ret; 968} 969 970static int yellow_carp_print_clk_levels(struct smu_context *smu, 971 enum smu_clk_type clk_type, char *buf) 972{ 973 int i, size = 0, ret = 0; 974 uint32_t cur_value = 0, value = 0, count = 0; 975 uint32_t min, max; 976 977 smu_cmn_get_sysfs_buf(&buf, &size); 978 979 switch (clk_type) { 980 case SMU_OD_SCLK: 981 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 982 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 983 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 984 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 985 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 986 break; 987 case SMU_OD_RANGE: 988 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 989 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 990 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); 991 break; 992 case SMU_SOCCLK: 993 case SMU_VCLK: 994 case SMU_DCLK: 995 case SMU_MCLK: 996 case SMU_FCLK: 997 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 998 if (ret) 999 goto print_clk_out; 1000 1001 ret = yellow_carp_get_dpm_level_count(smu, clk_type, &count); 1002 if (ret) 1003 goto print_clk_out; 1004 1005 for (i = 0; i < count; i++) { 1006 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); 1007 if (ret) 1008 goto print_clk_out; 1009 1010 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 1011 cur_value == value ? "*" : ""); 1012 } 1013 break; 1014 case SMU_GFXCLK: 1015 case SMU_SCLK: 1016 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); 1017 if (ret) 1018 goto print_clk_out; 1019 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 1020 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 1021 if (cur_value == max) 1022 i = 2; 1023 else if (cur_value == min) 1024 i = 0; 1025 else 1026 i = 1; 1027 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 1028 i == 0 ? "*" : ""); 1029 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1030 i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, 1031 i == 1 ? "*" : ""); 1032 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 1033 i == 2 ? "*" : ""); 1034 break; 1035 default: 1036 break; 1037 } 1038 1039print_clk_out: 1040 return size; 1041} 1042 1043static int yellow_carp_force_clk_levels(struct smu_context *smu, 1044 enum smu_clk_type clk_type, uint32_t mask) 1045{ 1046 uint32_t soft_min_level = 0, soft_max_level = 0; 1047 uint32_t min_freq = 0, max_freq = 0; 1048 int ret = 0; 1049 1050 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1051 soft_max_level = mask ? (fls(mask) - 1) : 0; 1052 1053 switch (clk_type) { 1054 case SMU_SOCCLK: 1055 case SMU_FCLK: 1056 case SMU_VCLK: 1057 case SMU_DCLK: 1058 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 1059 if (ret) 1060 goto force_level_out; 1061 1062 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 1063 if (ret) 1064 goto force_level_out; 1065 1066 ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 1067 if (ret) 1068 goto force_level_out; 1069 break; 1070 default: 1071 ret = -EINVAL; 1072 break; 1073 } 1074 1075force_level_out: 1076 return ret; 1077} 1078 1079static int yellow_carp_set_performance_level(struct smu_context *smu, 1080 enum amd_dpm_forced_level level) 1081{ 1082 struct amdgpu_device *adev = smu->adev; 1083 uint32_t sclk_min = 0, sclk_max = 0; 1084 uint32_t fclk_min = 0, fclk_max = 0; 1085 uint32_t socclk_min = 0, socclk_max = 0; 1086 int ret = 0; 1087 1088 switch (level) { 1089 case AMD_DPM_FORCED_LEVEL_HIGH: 1090 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 1091 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 1092 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 1093 sclk_min = sclk_max; 1094 fclk_min = fclk_max; 1095 socclk_min = socclk_max; 1096 break; 1097 case AMD_DPM_FORCED_LEVEL_LOW: 1098 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 1099 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 1100 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 1101 sclk_max = sclk_min; 1102 fclk_max = fclk_min; 1103 socclk_max = socclk_min; 1104 break; 1105 case AMD_DPM_FORCED_LEVEL_AUTO: 1106 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1107 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1108 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1109 break; 1110 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1111 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1112 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1113 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1114 /* Temporarily do nothing since the optimal clocks haven't been provided yet */ 1115 break; 1116 case AMD_DPM_FORCED_LEVEL_MANUAL: 1117 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1118 return 0; 1119 default: 1120 dev_err(adev->dev, "Invalid performance level %d\n", level); 1121 return -EINVAL; 1122 } 1123 1124 if (sclk_min && sclk_max) { 1125 ret = yellow_carp_set_soft_freq_limited_range(smu, 1126 SMU_SCLK, 1127 sclk_min, 1128 sclk_max); 1129 if (ret) 1130 return ret; 1131 1132 smu->gfx_actual_hard_min_freq = sclk_min; 1133 smu->gfx_actual_soft_max_freq = sclk_max; 1134 } 1135 1136 if (fclk_min && fclk_max) { 1137 ret = yellow_carp_set_soft_freq_limited_range(smu, 1138 SMU_FCLK, 1139 fclk_min, 1140 fclk_max); 1141 if (ret) 1142 return ret; 1143 } 1144 1145 if (socclk_min && socclk_max) { 1146 ret = yellow_carp_set_soft_freq_limited_range(smu, 1147 SMU_SOCCLK, 1148 socclk_min, 1149 socclk_max); 1150 if (ret) 1151 return ret; 1152 } 1153 1154 return ret; 1155} 1156 1157static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1158{ 1159 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1160 1161 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1162 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1163 smu->gfx_actual_hard_min_freq = 0; 1164 smu->gfx_actual_soft_max_freq = 0; 1165 1166 return 0; 1167} 1168 1169static const struct pptable_funcs yellow_carp_ppt_funcs = { 1170 .check_fw_status = smu_v13_0_check_fw_status, 1171 .check_fw_version = smu_v13_0_check_fw_version, 1172 .init_smc_tables = yellow_carp_init_smc_tables, 1173 .fini_smc_tables = yellow_carp_fini_smc_tables, 1174 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1175 .system_features_control = yellow_carp_system_features_control, 1176 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1177 .send_smc_msg = smu_cmn_send_smc_msg, 1178 .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, 1179 .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, 1180 .set_default_dpm_table = yellow_carp_set_default_dpm_tables, 1181 .read_sensor = yellow_carp_read_sensor, 1182 .is_dpm_running = yellow_carp_is_dpm_running, 1183 .set_watermarks_table = yellow_carp_set_watermarks_table, 1184 .get_gpu_metrics = yellow_carp_get_gpu_metrics, 1185 .get_enabled_mask = smu_cmn_get_enabled_mask, 1186 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1187 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1188 .gfx_off_control = smu_v13_0_gfx_off_control, 1189 .post_init = yellow_carp_post_smu_init, 1190 .mode2_reset = yellow_carp_mode2_reset, 1191 .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, 1192 .od_edit_dpm_table = yellow_carp_od_edit_dpm_table, 1193 .print_clk_levels = yellow_carp_print_clk_levels, 1194 .force_clk_levels = yellow_carp_force_clk_levels, 1195 .set_performance_level = yellow_carp_set_performance_level, 1196 .set_fine_grain_gfx_freq_parameters = yellow_carp_set_fine_grain_gfx_freq_parameters, 1197}; 1198 1199void yellow_carp_set_ppt_funcs(struct smu_context *smu) 1200{ 1201 smu->ppt_funcs = &yellow_carp_ppt_funcs; 1202 smu->message_map = yellow_carp_message_map; 1203 smu->feature_map = yellow_carp_feature_mask_map; 1204 smu->table_map = yellow_carp_table_map; 1205 smu->is_apu = true; 1206}