smu_v11_0.c (59824B)
1/* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24#include <linux/module.h> 25#include <linux/pci.h> 26#include <linux/reboot.h> 27 28#define SMU_11_0_PARTIAL_PPTABLE 29#define SWSMU_CODE_LAYER_L3 30 31#include "amdgpu.h" 32#include "amdgpu_smu.h" 33#include "atomfirmware.h" 34#include "amdgpu_atomfirmware.h" 35#include "amdgpu_atombios.h" 36#include "smu_v11_0.h" 37#include "soc15_common.h" 38#include "atom.h" 39#include "amdgpu_ras.h" 40#include "smu_cmn.h" 41 42#include "asic_reg/thm/thm_11_0_2_offset.h" 43#include "asic_reg/thm/thm_11_0_2_sh_mask.h" 44#include "asic_reg/mp/mp_11_0_offset.h" 45#include "asic_reg/mp/mp_11_0_sh_mask.h" 46#include "asic_reg/smuio/smuio_11_0_0_offset.h" 47#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" 48 49/* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54#undef pr_err 55#undef pr_warn 56#undef pr_info 57#undef pr_debug 58 59MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); 60MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); 61MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); 62MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); 63MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin"); 64MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin"); 65MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin"); 66MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin"); 67 68#define SMU11_VOLTAGE_SCALE 4 69 70#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms 71 72#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 73#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 74#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 75#define smnPCIE_LC_SPEED_CNTL 0x11140290 76#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 77#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 78 79#define mmTHM_BACO_CNTL_ARCT 0xA7 80#define mmTHM_BACO_CNTL_ARCT_BASE_IDX 0 81 82int smu_v11_0_init_microcode(struct smu_context *smu) 83{ 84 struct amdgpu_device *adev = smu->adev; 85 const char *chip_name; 86 char fw_name[SMU_FW_NAME_LEN]; 87 int err = 0; 88 const struct smc_firmware_header_v1_0 *hdr; 89 const struct common_firmware_header *header; 90 struct amdgpu_firmware_info *ucode = NULL; 91 92 if (amdgpu_sriov_vf(adev) && 93 ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) || 94 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)))) 95 return 0; 96 97 switch (adev->ip_versions[MP1_HWIP][0]) { 98 case IP_VERSION(11, 0, 0): 99 chip_name = "navi10"; 100 break; 101 case IP_VERSION(11, 0, 5): 102 chip_name = "navi14"; 103 break; 104 case IP_VERSION(11, 0, 9): 105 chip_name = "navi12"; 106 break; 107 case IP_VERSION(11, 0, 7): 108 chip_name = "sienna_cichlid"; 109 break; 110 case IP_VERSION(11, 0, 11): 111 chip_name = "navy_flounder"; 112 break; 113 case IP_VERSION(11, 0, 12): 114 chip_name = "dimgrey_cavefish"; 115 break; 116 case IP_VERSION(11, 0, 13): 117 chip_name = "beige_goby"; 118 break; 119 case IP_VERSION(11, 0, 2): 120 chip_name = "arcturus"; 121 break; 122 default: 123 dev_err(adev->dev, "Unsupported IP version 0x%x\n", 124 adev->ip_versions[MP1_HWIP][0]); 125 return -EINVAL; 126 } 127 128 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); 129 130 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 131 if (err) 132 goto out; 133 err = amdgpu_ucode_validate(adev->pm.fw); 134 if (err) 135 goto out; 136 137 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 138 amdgpu_ucode_print_smc_hdr(&hdr->header); 139 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 140 141 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 142 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 143 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 144 ucode->fw = adev->pm.fw; 145 header = (const struct common_firmware_header *)ucode->fw->data; 146 adev->firmware.fw_size += 147 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 148 } 149 150out: 151 if (err) { 152 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n", 153 fw_name); 154 release_firmware(adev->pm.fw); 155 adev->pm.fw = NULL; 156 } 157 return err; 158} 159 160void smu_v11_0_fini_microcode(struct smu_context *smu) 161{ 162 struct amdgpu_device *adev = smu->adev; 163 164 release_firmware(adev->pm.fw); 165 adev->pm.fw = NULL; 166 adev->pm.fw_version = 0; 167} 168 169int smu_v11_0_load_microcode(struct smu_context *smu) 170{ 171 struct amdgpu_device *adev = smu->adev; 172 const uint32_t *src; 173 const struct smc_firmware_header_v1_0 *hdr; 174 uint32_t addr_start = MP1_SRAM; 175 uint32_t i; 176 uint32_t smc_fw_size; 177 uint32_t mp1_fw_flags; 178 179 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 180 src = (const uint32_t *)(adev->pm.fw->data + 181 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 182 smc_fw_size = hdr->header.ucode_size_bytes; 183 184 for (i = 1; i < smc_fw_size/4 - 1; i++) { 185 WREG32_PCIE(addr_start, src[i]); 186 addr_start += 4; 187 } 188 189 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 190 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 191 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 192 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 193 194 for (i = 0; i < adev->usec_timeout; i++) { 195 mp1_fw_flags = RREG32_PCIE(MP1_Public | 196 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 197 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 198 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 199 break; 200 udelay(1); 201 } 202 203 if (i == adev->usec_timeout) 204 return -ETIME; 205 206 return 0; 207} 208 209int smu_v11_0_check_fw_status(struct smu_context *smu) 210{ 211 struct amdgpu_device *adev = smu->adev; 212 uint32_t mp1_fw_flags; 213 214 mp1_fw_flags = RREG32_PCIE(MP1_Public | 215 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 216 217 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 218 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 219 return 0; 220 221 return -EIO; 222} 223 224int smu_v11_0_check_fw_version(struct smu_context *smu) 225{ 226 struct amdgpu_device *adev = smu->adev; 227 uint32_t if_version = 0xff, smu_version = 0xff; 228 uint8_t smu_program, smu_major, smu_minor, smu_debug; 229 int ret = 0; 230 231 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 232 if (ret) 233 return ret; 234 235 smu_program = (smu_version >> 24) & 0xff; 236 smu_major = (smu_version >> 16) & 0xff; 237 smu_minor = (smu_version >> 8) & 0xff; 238 smu_debug = (smu_version >> 0) & 0xff; 239 if (smu->is_apu) 240 adev->pm.fw_version = smu_version; 241 242 switch (adev->ip_versions[MP1_HWIP][0]) { 243 case IP_VERSION(11, 0, 0): 244 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 245 break; 246 case IP_VERSION(11, 0, 9): 247 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 248 break; 249 case IP_VERSION(11, 0, 5): 250 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 251 break; 252 case IP_VERSION(11, 0, 7): 253 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 254 break; 255 case IP_VERSION(11, 0, 11): 256 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 257 break; 258 case IP_VERSION(11, 5, 0): 259 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; 260 break; 261 case IP_VERSION(11, 0, 12): 262 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 263 break; 264 case IP_VERSION(11, 0, 13): 265 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 266 break; 267 case IP_VERSION(11, 0, 8): 268 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; 269 break; 270 case IP_VERSION(11, 0, 2): 271 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 272 break; 273 default: 274 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", 275 adev->ip_versions[MP1_HWIP][0]); 276 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 277 break; 278 } 279 280 /* 281 * 1. if_version mismatch is not critical as our fw is designed 282 * to be backward compatible. 283 * 2. New fw usually brings some optimizations. But that's visible 284 * only on the paired driver. 285 * Considering above, we just leave user a warning message instead 286 * of halt driver loading. 287 */ 288 if (if_version != smu->smc_driver_if_version) { 289 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 290 "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n", 291 smu->smc_driver_if_version, if_version, 292 smu_program, smu_version, smu_major, smu_minor, smu_debug); 293 dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); 294 } 295 296 return ret; 297} 298 299static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 300{ 301 struct amdgpu_device *adev = smu->adev; 302 uint32_t ppt_offset_bytes; 303 const struct smc_firmware_header_v2_0 *v2; 304 305 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 306 307 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 308 *size = le32_to_cpu(v2->ppt_size_bytes); 309 *table = (uint8_t *)v2 + ppt_offset_bytes; 310 311 return 0; 312} 313 314static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, 315 uint32_t *size, uint32_t pptable_id) 316{ 317 struct amdgpu_device *adev = smu->adev; 318 const struct smc_firmware_header_v2_1 *v2_1; 319 struct smc_soft_pptable_entry *entries; 320 uint32_t pptable_count = 0; 321 int i = 0; 322 323 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 324 entries = (struct smc_soft_pptable_entry *) 325 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 326 pptable_count = le32_to_cpu(v2_1->pptable_count); 327 for (i = 0; i < pptable_count; i++) { 328 if (le32_to_cpu(entries[i].id) == pptable_id) { 329 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 330 *size = le32_to_cpu(entries[i].ppt_size_bytes); 331 break; 332 } 333 } 334 335 if (i == pptable_count) 336 return -EINVAL; 337 338 return 0; 339} 340 341int smu_v11_0_setup_pptable(struct smu_context *smu) 342{ 343 struct amdgpu_device *adev = smu->adev; 344 const struct smc_firmware_header_v1_0 *hdr; 345 int ret, index; 346 uint32_t size = 0; 347 uint16_t atom_table_size; 348 uint8_t frev, crev; 349 void *table; 350 uint16_t version_major, version_minor; 351 352 if (!amdgpu_sriov_vf(adev)) { 353 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 354 version_major = le16_to_cpu(hdr->header.header_version_major); 355 version_minor = le16_to_cpu(hdr->header.header_version_minor); 356 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { 357 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); 358 switch (version_minor) { 359 case 0: 360 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); 361 break; 362 case 1: 363 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size, 364 smu->smu_table.boot_values.pp_table_id); 365 break; 366 default: 367 ret = -EINVAL; 368 break; 369 } 370 if (ret) 371 return ret; 372 goto out; 373 } 374 } 375 376 dev_info(adev->dev, "use vbios provided pptable\n"); 377 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 378 powerplayinfo); 379 380 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 381 (uint8_t **)&table); 382 if (ret) 383 return ret; 384 size = atom_table_size; 385 386out: 387 if (!smu->smu_table.power_play_table) 388 smu->smu_table.power_play_table = table; 389 if (!smu->smu_table.power_play_table_size) 390 smu->smu_table.power_play_table_size = size; 391 392 return 0; 393} 394 395int smu_v11_0_init_smc_tables(struct smu_context *smu) 396{ 397 struct smu_table_context *smu_table = &smu->smu_table; 398 struct smu_table *tables = smu_table->tables; 399 int ret = 0; 400 401 smu_table->driver_pptable = 402 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 403 if (!smu_table->driver_pptable) { 404 ret = -ENOMEM; 405 goto err0_out; 406 } 407 408 smu_table->max_sustainable_clocks = 409 kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL); 410 if (!smu_table->max_sustainable_clocks) { 411 ret = -ENOMEM; 412 goto err1_out; 413 } 414 415 /* Arcturus does not support OVERDRIVE */ 416 if (tables[SMU_TABLE_OVERDRIVE].size) { 417 smu_table->overdrive_table = 418 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 419 if (!smu_table->overdrive_table) { 420 ret = -ENOMEM; 421 goto err2_out; 422 } 423 424 smu_table->boot_overdrive_table = 425 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 426 if (!smu_table->boot_overdrive_table) { 427 ret = -ENOMEM; 428 goto err3_out; 429 } 430 431 smu_table->user_overdrive_table = 432 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 433 if (!smu_table->user_overdrive_table) { 434 ret = -ENOMEM; 435 goto err4_out; 436 } 437 438 } 439 440 return 0; 441 442err4_out: 443 kfree(smu_table->boot_overdrive_table); 444err3_out: 445 kfree(smu_table->overdrive_table); 446err2_out: 447 kfree(smu_table->max_sustainable_clocks); 448err1_out: 449 kfree(smu_table->driver_pptable); 450err0_out: 451 return ret; 452} 453 454int smu_v11_0_fini_smc_tables(struct smu_context *smu) 455{ 456 struct smu_table_context *smu_table = &smu->smu_table; 457 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 458 459 kfree(smu_table->gpu_metrics_table); 460 kfree(smu_table->user_overdrive_table); 461 kfree(smu_table->boot_overdrive_table); 462 kfree(smu_table->overdrive_table); 463 kfree(smu_table->max_sustainable_clocks); 464 kfree(smu_table->driver_pptable); 465 kfree(smu_table->clocks_table); 466 smu_table->gpu_metrics_table = NULL; 467 smu_table->user_overdrive_table = NULL; 468 smu_table->boot_overdrive_table = NULL; 469 smu_table->overdrive_table = NULL; 470 smu_table->max_sustainable_clocks = NULL; 471 smu_table->driver_pptable = NULL; 472 smu_table->clocks_table = NULL; 473 kfree(smu_table->hardcode_pptable); 474 smu_table->hardcode_pptable = NULL; 475 476 kfree(smu_table->driver_smu_config_table); 477 kfree(smu_table->ecc_table); 478 kfree(smu_table->metrics_table); 479 kfree(smu_table->watermarks_table); 480 smu_table->driver_smu_config_table = NULL; 481 smu_table->ecc_table = NULL; 482 smu_table->metrics_table = NULL; 483 smu_table->watermarks_table = NULL; 484 smu_table->metrics_time = 0; 485 486 kfree(smu_dpm->dpm_context); 487 kfree(smu_dpm->golden_dpm_context); 488 kfree(smu_dpm->dpm_current_power_state); 489 kfree(smu_dpm->dpm_request_power_state); 490 smu_dpm->dpm_context = NULL; 491 smu_dpm->golden_dpm_context = NULL; 492 smu_dpm->dpm_context_size = 0; 493 smu_dpm->dpm_current_power_state = NULL; 494 smu_dpm->dpm_request_power_state = NULL; 495 496 return 0; 497} 498 499int smu_v11_0_init_power(struct smu_context *smu) 500{ 501 struct amdgpu_device *adev = smu->adev; 502 struct smu_power_context *smu_power = &smu->smu_power; 503 size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ? 504 sizeof(struct smu_11_5_power_context) : 505 sizeof(struct smu_11_0_power_context); 506 507 smu_power->power_context = kzalloc(size, GFP_KERNEL); 508 if (!smu_power->power_context) 509 return -ENOMEM; 510 smu_power->power_context_size = size; 511 512 return 0; 513} 514 515int smu_v11_0_fini_power(struct smu_context *smu) 516{ 517 struct smu_power_context *smu_power = &smu->smu_power; 518 519 kfree(smu_power->power_context); 520 smu_power->power_context = NULL; 521 smu_power->power_context_size = 0; 522 523 return 0; 524} 525 526static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev, 527 uint8_t clk_id, 528 uint8_t syspll_id, 529 uint32_t *clk_freq) 530{ 531 struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 532 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 533 int ret, index; 534 535 input.clk_id = clk_id; 536 input.syspll_id = syspll_id; 537 input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 538 index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 539 getsmuclockinfo); 540 541 ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 542 (uint32_t *)&input); 543 if (ret) 544 return -EINVAL; 545 546 output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 547 *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 548 549 return 0; 550} 551 552int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) 553{ 554 int ret, index; 555 uint16_t size; 556 uint8_t frev, crev; 557 struct atom_common_table_header *header; 558 struct atom_firmware_info_v3_3 *v_3_3; 559 struct atom_firmware_info_v3_1 *v_3_1; 560 561 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 562 firmwareinfo); 563 564 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 565 (uint8_t **)&header); 566 if (ret) 567 return ret; 568 569 if (header->format_revision != 3) { 570 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n"); 571 return -EINVAL; 572 } 573 574 switch (header->content_revision) { 575 case 0: 576 case 1: 577 case 2: 578 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 579 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 580 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 581 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 582 smu->smu_table.boot_values.socclk = 0; 583 smu->smu_table.boot_values.dcefclk = 0; 584 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 585 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 586 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 587 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 588 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 589 smu->smu_table.boot_values.pp_table_id = 0; 590 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability; 591 break; 592 case 3: 593 case 4: 594 default: 595 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 596 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 597 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 598 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 599 smu->smu_table.boot_values.socclk = 0; 600 smu->smu_table.boot_values.dcefclk = 0; 601 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 602 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 603 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 604 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 605 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 606 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 607 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability; 608 } 609 610 smu->smu_table.boot_values.format_revision = header->format_revision; 611 smu->smu_table.boot_values.content_revision = header->content_revision; 612 613 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 614 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, 615 (uint8_t)0, 616 &smu->smu_table.boot_values.socclk); 617 618 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 619 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, 620 (uint8_t)0, 621 &smu->smu_table.boot_values.dcefclk); 622 623 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 624 (uint8_t)SMU11_SYSPLL0_ECLK_ID, 625 (uint8_t)0, 626 &smu->smu_table.boot_values.eclk); 627 628 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 629 (uint8_t)SMU11_SYSPLL0_VCLK_ID, 630 (uint8_t)0, 631 &smu->smu_table.boot_values.vclk); 632 633 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 634 (uint8_t)SMU11_SYSPLL0_DCLK_ID, 635 (uint8_t)0, 636 &smu->smu_table.boot_values.dclk); 637 638 if ((smu->smu_table.boot_values.format_revision == 3) && 639 (smu->smu_table.boot_values.content_revision >= 2)) 640 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 641 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, 642 (uint8_t)SMU11_SYSPLL1_2_ID, 643 &smu->smu_table.boot_values.fclk); 644 645 smu_v11_0_atom_get_smu_clockinfo(smu->adev, 646 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID, 647 (uint8_t)SMU11_SYSPLL3_1_ID, 648 &smu->smu_table.boot_values.lclk); 649 650 return 0; 651} 652 653int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) 654{ 655 struct smu_table_context *smu_table = &smu->smu_table; 656 struct smu_table *memory_pool = &smu_table->memory_pool; 657 int ret = 0; 658 uint64_t address; 659 uint32_t address_low, address_high; 660 661 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 662 return ret; 663 664 address = (uintptr_t)memory_pool->cpu_addr; 665 address_high = (uint32_t)upper_32_bits(address); 666 address_low = (uint32_t)lower_32_bits(address); 667 668 ret = smu_cmn_send_smc_msg_with_param(smu, 669 SMU_MSG_SetSystemVirtualDramAddrHigh, 670 address_high, 671 NULL); 672 if (ret) 673 return ret; 674 ret = smu_cmn_send_smc_msg_with_param(smu, 675 SMU_MSG_SetSystemVirtualDramAddrLow, 676 address_low, 677 NULL); 678 if (ret) 679 return ret; 680 681 address = memory_pool->mc_address; 682 address_high = (uint32_t)upper_32_bits(address); 683 address_low = (uint32_t)lower_32_bits(address); 684 685 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 686 address_high, NULL); 687 if (ret) 688 return ret; 689 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 690 address_low, NULL); 691 if (ret) 692 return ret; 693 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 694 (uint32_t)memory_pool->size, NULL); 695 if (ret) 696 return ret; 697 698 return ret; 699} 700 701int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 702{ 703 int ret; 704 705 ret = smu_cmn_send_smc_msg_with_param(smu, 706 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 707 if (ret) 708 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!"); 709 710 return ret; 711} 712 713int smu_v11_0_set_driver_table_location(struct smu_context *smu) 714{ 715 struct smu_table *driver_table = &smu->smu_table.driver_table; 716 int ret = 0; 717 718 if (driver_table->mc_address) { 719 ret = smu_cmn_send_smc_msg_with_param(smu, 720 SMU_MSG_SetDriverDramAddrHigh, 721 upper_32_bits(driver_table->mc_address), 722 NULL); 723 if (!ret) 724 ret = smu_cmn_send_smc_msg_with_param(smu, 725 SMU_MSG_SetDriverDramAddrLow, 726 lower_32_bits(driver_table->mc_address), 727 NULL); 728 } 729 730 return ret; 731} 732 733int smu_v11_0_set_tool_table_location(struct smu_context *smu) 734{ 735 int ret = 0; 736 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 737 738 if (tool_table->mc_address) { 739 ret = smu_cmn_send_smc_msg_with_param(smu, 740 SMU_MSG_SetToolsDramAddrHigh, 741 upper_32_bits(tool_table->mc_address), 742 NULL); 743 if (!ret) 744 ret = smu_cmn_send_smc_msg_with_param(smu, 745 SMU_MSG_SetToolsDramAddrLow, 746 lower_32_bits(tool_table->mc_address), 747 NULL); 748 } 749 750 return ret; 751} 752 753int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) 754{ 755 struct amdgpu_device *adev = smu->adev; 756 757 /* Navy_Flounder/Dimgrey_Cavefish do not support to change 758 * display num currently 759 */ 760 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) || 761 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) || 762 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) || 763 adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) 764 return 0; 765 766 return smu_cmn_send_smc_msg_with_param(smu, 767 SMU_MSG_NumOfDisplays, 768 count, 769 NULL); 770} 771 772 773int smu_v11_0_set_allowed_mask(struct smu_context *smu) 774{ 775 struct smu_feature *feature = &smu->smu_feature; 776 int ret = 0; 777 uint32_t feature_mask[2]; 778 779 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) { 780 ret = -EINVAL; 781 goto failed; 782 } 783 784 bitmap_to_arr32(feature_mask, feature->allowed, 64); 785 786 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 787 feature_mask[1], NULL); 788 if (ret) 789 goto failed; 790 791 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, 792 feature_mask[0], NULL); 793 if (ret) 794 goto failed; 795 796failed: 797 return ret; 798} 799 800int smu_v11_0_system_features_control(struct smu_context *smu, 801 bool en) 802{ 803 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 804 SMU_MSG_DisableAllSmuFeatures), NULL); 805} 806 807int smu_v11_0_notify_display_change(struct smu_context *smu) 808{ 809 int ret = 0; 810 811 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 812 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 813 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 814 815 return ret; 816} 817 818static int 819smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 820 enum smu_clk_type clock_select) 821{ 822 int ret = 0; 823 int clk_id; 824 825 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 826 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 827 return 0; 828 829 clk_id = smu_cmn_to_asic_specific_index(smu, 830 CMN2ASIC_MAPPING_CLK, 831 clock_select); 832 if (clk_id < 0) 833 return -EINVAL; 834 835 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 836 clk_id << 16, clock); 837 if (ret) { 838 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 839 return ret; 840 } 841 842 if (*clock != 0) 843 return 0; 844 845 /* if DC limit is zero, return AC limit */ 846 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 847 clk_id << 16, clock); 848 if (ret) { 849 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 850 return ret; 851 } 852 853 return 0; 854} 855 856int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) 857{ 858 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = 859 smu->smu_table.max_sustainable_clocks; 860 int ret = 0; 861 862 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 863 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 864 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 865 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 866 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 867 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 868 869 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 870 ret = smu_v11_0_get_max_sustainable_clock(smu, 871 &(max_sustainable_clocks->uclock), 872 SMU_UCLK); 873 if (ret) { 874 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 875 __func__); 876 return ret; 877 } 878 } 879 880 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 881 ret = smu_v11_0_get_max_sustainable_clock(smu, 882 &(max_sustainable_clocks->soc_clock), 883 SMU_SOCCLK); 884 if (ret) { 885 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 886 __func__); 887 return ret; 888 } 889 } 890 891 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 892 ret = smu_v11_0_get_max_sustainable_clock(smu, 893 &(max_sustainable_clocks->dcef_clock), 894 SMU_DCEFCLK); 895 if (ret) { 896 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 897 __func__); 898 return ret; 899 } 900 901 ret = smu_v11_0_get_max_sustainable_clock(smu, 902 &(max_sustainable_clocks->display_clock), 903 SMU_DISPCLK); 904 if (ret) { 905 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 906 __func__); 907 return ret; 908 } 909 ret = smu_v11_0_get_max_sustainable_clock(smu, 910 &(max_sustainable_clocks->phy_clock), 911 SMU_PHYCLK); 912 if (ret) { 913 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 914 __func__); 915 return ret; 916 } 917 ret = smu_v11_0_get_max_sustainable_clock(smu, 918 &(max_sustainable_clocks->pixel_clock), 919 SMU_PIXCLK); 920 if (ret) { 921 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 922 __func__); 923 return ret; 924 } 925 } 926 927 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 928 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 929 930 return 0; 931} 932 933int smu_v11_0_get_current_power_limit(struct smu_context *smu, 934 uint32_t *power_limit) 935{ 936 int power_src; 937 int ret = 0; 938 939 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 940 return -EINVAL; 941 942 power_src = smu_cmn_to_asic_specific_index(smu, 943 CMN2ASIC_MAPPING_PWR, 944 smu->adev->pm.ac_power ? 945 SMU_POWER_SOURCE_AC : 946 SMU_POWER_SOURCE_DC); 947 if (power_src < 0) 948 return -EINVAL; 949 950 /* 951 * BIT 24-31: ControllerId (only PPT0 is supported for now) 952 * BIT 16-23: PowerSource 953 */ 954 ret = smu_cmn_send_smc_msg_with_param(smu, 955 SMU_MSG_GetPptLimit, 956 (0 << 24) | (power_src << 16), 957 power_limit); 958 if (ret) 959 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 960 961 return ret; 962} 963 964int smu_v11_0_set_power_limit(struct smu_context *smu, 965 enum smu_ppt_limit_type limit_type, 966 uint32_t limit) 967{ 968 int power_src; 969 int ret = 0; 970 uint32_t limit_param; 971 972 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 973 return -EINVAL; 974 975 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 976 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 977 return -EOPNOTSUPP; 978 } 979 980 power_src = smu_cmn_to_asic_specific_index(smu, 981 CMN2ASIC_MAPPING_PWR, 982 smu->adev->pm.ac_power ? 983 SMU_POWER_SOURCE_AC : 984 SMU_POWER_SOURCE_DC); 985 if (power_src < 0) 986 return -EINVAL; 987 988 /* 989 * BIT 24-31: ControllerId (only PPT0 is supported for now) 990 * BIT 16-23: PowerSource 991 * BIT 0-15: PowerLimit 992 */ 993 limit_param = (limit & 0xFFFF); 994 limit_param |= 0 << 24; 995 limit_param |= (power_src) << 16; 996 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL); 997 if (ret) { 998 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 999 return ret; 1000 } 1001 1002 smu->current_power_limit = limit; 1003 1004 return 0; 1005} 1006 1007static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu) 1008{ 1009 return smu_cmn_send_smc_msg(smu, 1010 SMU_MSG_ReenableAcDcInterrupt, 1011 NULL); 1012} 1013 1014static int smu_v11_0_process_pending_interrupt(struct smu_context *smu) 1015{ 1016 int ret = 0; 1017 1018 if (smu->dc_controlled_by_gpio && 1019 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) 1020 ret = smu_v11_0_ack_ac_dc_interrupt(smu); 1021 1022 return ret; 1023} 1024 1025void smu_v11_0_interrupt_work(struct smu_context *smu) 1026{ 1027 if (smu_v11_0_ack_ac_dc_interrupt(smu)) 1028 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n"); 1029} 1030 1031int smu_v11_0_enable_thermal_alert(struct smu_context *smu) 1032{ 1033 int ret = 0; 1034 1035 if (smu->smu_table.thermal_controller_type) { 1036 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1037 if (ret) 1038 return ret; 1039 } 1040 1041 /* 1042 * After init there might have been missed interrupts triggered 1043 * before driver registers for interrupt (Ex. AC/DC). 1044 */ 1045 return smu_v11_0_process_pending_interrupt(smu); 1046} 1047 1048int smu_v11_0_disable_thermal_alert(struct smu_context *smu) 1049{ 1050 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1051} 1052 1053static uint16_t convert_to_vddc(uint8_t vid) 1054{ 1055 return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); 1056} 1057 1058int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1059{ 1060 struct amdgpu_device *adev = smu->adev; 1061 uint32_t vdd = 0, val_vid = 0; 1062 1063 if (!value) 1064 return -EINVAL; 1065 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & 1066 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1067 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1068 1069 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1070 1071 *value = vdd; 1072 1073 return 0; 1074 1075} 1076 1077int 1078smu_v11_0_display_clock_voltage_request(struct smu_context *smu, 1079 struct pp_display_clock_request 1080 *clock_req) 1081{ 1082 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1083 int ret = 0; 1084 enum smu_clk_type clk_select = 0; 1085 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1086 1087 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1088 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1089 switch (clk_type) { 1090 case amd_pp_dcef_clock: 1091 clk_select = SMU_DCEFCLK; 1092 break; 1093 case amd_pp_disp_clock: 1094 clk_select = SMU_DISPCLK; 1095 break; 1096 case amd_pp_pixel_clock: 1097 clk_select = SMU_PIXCLK; 1098 break; 1099 case amd_pp_phy_clock: 1100 clk_select = SMU_PHYCLK; 1101 break; 1102 case amd_pp_mem_clock: 1103 clk_select = SMU_UCLK; 1104 break; 1105 default: 1106 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1107 ret = -EINVAL; 1108 break; 1109 } 1110 1111 if (ret) 1112 goto failed; 1113 1114 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1115 return 0; 1116 1117 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1118 1119 if(clk_select == SMU_UCLK) 1120 smu->hard_min_uclk_req_from_dal = clk_freq; 1121 } 1122 1123failed: 1124 return ret; 1125} 1126 1127int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) 1128{ 1129 int ret = 0; 1130 struct amdgpu_device *adev = smu->adev; 1131 1132 switch (adev->ip_versions[MP1_HWIP][0]) { 1133 case IP_VERSION(11, 0, 0): 1134 case IP_VERSION(11, 0, 5): 1135 case IP_VERSION(11, 0, 9): 1136 case IP_VERSION(11, 0, 7): 1137 case IP_VERSION(11, 0, 11): 1138 case IP_VERSION(11, 0, 12): 1139 case IP_VERSION(11, 0, 13): 1140 case IP_VERSION(11, 5, 0): 1141 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 1142 return 0; 1143 if (enable) 1144 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 1145 else 1146 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 1147 break; 1148 default: 1149 break; 1150 } 1151 1152 return ret; 1153} 1154 1155uint32_t 1156smu_v11_0_get_fan_control_mode(struct smu_context *smu) 1157{ 1158 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1159 return AMD_FAN_CTRL_AUTO; 1160 else 1161 return smu->user_dpm_profile.fan_mode; 1162} 1163 1164static int 1165smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1166{ 1167 int ret = 0; 1168 1169 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1170 return 0; 1171 1172 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1173 if (ret) 1174 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1175 __func__, (auto_fan_control ? "Start" : "Stop")); 1176 1177 return ret; 1178} 1179 1180static int 1181smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1182{ 1183 struct amdgpu_device *adev = smu->adev; 1184 1185 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1186 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1187 CG_FDO_CTRL2, TMIN, 0)); 1188 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, 1189 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), 1190 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1191 1192 return 0; 1193} 1194 1195int 1196smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed) 1197{ 1198 struct amdgpu_device *adev = smu->adev; 1199 uint32_t duty100, duty; 1200 uint64_t tmp64; 1201 1202 speed = MIN(speed, 255); 1203 1204 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1205 CG_FDO_CTRL1, FMAX_DUTY100); 1206 if (!duty100) 1207 return -EINVAL; 1208 1209 tmp64 = (uint64_t)speed * duty100; 1210 do_div(tmp64, 255); 1211 duty = (uint32_t)tmp64; 1212 1213 WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, 1214 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), 1215 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1216 1217 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1218} 1219 1220int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, 1221 uint32_t speed) 1222{ 1223 struct amdgpu_device *adev = smu->adev; 1224 /* 1225 * crystal_clock_freq used for fan speed rpm calculation is 1226 * always 25Mhz. So, hardcode it as 2500(in 10K unit). 1227 */ 1228 uint32_t crystal_clock_freq = 2500; 1229 uint32_t tach_period; 1230 1231 /* 1232 * To prevent from possible overheat, some ASICs may have requirement 1233 * for minimum fan speed: 1234 * - For some NV10 SKU, the fan speed cannot be set lower than 1235 * 700 RPM. 1236 * - For some Sienna Cichlid SKU, the fan speed cannot be set 1237 * lower than 500 RPM. 1238 */ 1239 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1240 WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, 1241 REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), 1242 CG_TACH_CTRL, TARGET_PERIOD, 1243 tach_period)); 1244 1245 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1246} 1247 1248int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu, 1249 uint32_t *speed) 1250{ 1251 struct amdgpu_device *adev = smu->adev; 1252 uint32_t duty100, duty; 1253 uint64_t tmp64; 1254 1255 /* 1256 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1257 * detected via register retrieving. To workaround this, we will 1258 * report the fan speed as 0 PWM if user just requested such. 1259 */ 1260 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM) 1261 && !smu->user_dpm_profile.fan_speed_pwm) { 1262 *speed = 0; 1263 return 0; 1264 } 1265 1266 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), 1267 CG_FDO_CTRL1, FMAX_DUTY100); 1268 duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS), 1269 CG_THERMAL_STATUS, FDO_PWM_DUTY); 1270 if (!duty100) 1271 return -EINVAL; 1272 1273 tmp64 = (uint64_t)duty * 255; 1274 do_div(tmp64, duty100); 1275 *speed = MIN((uint32_t)tmp64, 255); 1276 1277 return 0; 1278} 1279 1280int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, 1281 uint32_t *speed) 1282{ 1283 struct amdgpu_device *adev = smu->adev; 1284 uint32_t crystal_clock_freq = 2500; 1285 uint32_t tach_status; 1286 uint64_t tmp64; 1287 1288 /* 1289 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly 1290 * detected via register retrieving. To workaround this, we will 1291 * report the fan speed as 0 RPM if user just requested such. 1292 */ 1293 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM) 1294 && !smu->user_dpm_profile.fan_speed_rpm) { 1295 *speed = 0; 1296 return 0; 1297 } 1298 1299 tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000; 1300 1301 tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS); 1302 if (tach_status) { 1303 do_div(tmp64, tach_status); 1304 *speed = (uint32_t)tmp64; 1305 } else { 1306 dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n"); 1307 *speed = 0; 1308 } 1309 1310 return 0; 1311} 1312 1313int 1314smu_v11_0_set_fan_control_mode(struct smu_context *smu, 1315 uint32_t mode) 1316{ 1317 int ret = 0; 1318 1319 switch (mode) { 1320 case AMD_FAN_CTRL_NONE: 1321 ret = smu_v11_0_auto_fan_control(smu, 0); 1322 if (!ret) 1323 ret = smu_v11_0_set_fan_speed_pwm(smu, 255); 1324 break; 1325 case AMD_FAN_CTRL_MANUAL: 1326 ret = smu_v11_0_auto_fan_control(smu, 0); 1327 break; 1328 case AMD_FAN_CTRL_AUTO: 1329 ret = smu_v11_0_auto_fan_control(smu, 1); 1330 break; 1331 default: 1332 break; 1333 } 1334 1335 if (ret) { 1336 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1337 return -EINVAL; 1338 } 1339 1340 return ret; 1341} 1342 1343int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, 1344 uint32_t pstate) 1345{ 1346 return smu_cmn_send_smc_msg_with_param(smu, 1347 SMU_MSG_SetXgmiMode, 1348 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1349 NULL); 1350} 1351 1352static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, 1353 struct amdgpu_irq_src *source, 1354 unsigned tyep, 1355 enum amdgpu_interrupt_state state) 1356{ 1357 struct smu_context *smu = adev->powerplay.pp_handle; 1358 uint32_t low, high; 1359 uint32_t val = 0; 1360 1361 switch (state) { 1362 case AMDGPU_IRQ_STATE_DISABLE: 1363 /* For THM irqs */ 1364 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1365 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1366 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1367 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1368 1369 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); 1370 1371 /* For MP1 SW irqs */ 1372 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1373 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1374 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1375 1376 break; 1377 case AMDGPU_IRQ_STATE_ENABLE: 1378 /* For THM irqs */ 1379 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1380 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1381 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1382 smu->thermal_range.software_shutdown_temp); 1383 1384 val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); 1385 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1386 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1387 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1388 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1389 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1390 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1391 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1392 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); 1393 1394 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1395 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1396 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1397 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); 1398 1399 /* For MP1 SW irqs */ 1400 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT); 1401 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1402 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1403 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val); 1404 1405 val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1406 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1407 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val); 1408 1409 break; 1410 default: 1411 break; 1412 } 1413 1414 return 0; 1415} 1416 1417#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1418#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1419 1420#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1421 1422static int smu_v11_0_irq_process(struct amdgpu_device *adev, 1423 struct amdgpu_irq_src *source, 1424 struct amdgpu_iv_entry *entry) 1425{ 1426 struct smu_context *smu = adev->powerplay.pp_handle; 1427 uint32_t client_id = entry->client_id; 1428 uint32_t src_id = entry->src_id; 1429 /* 1430 * ctxid is used to distinguish different 1431 * events for SMCToHost interrupt. 1432 */ 1433 uint32_t ctxid = entry->src_data[0]; 1434 uint32_t data; 1435 1436 if (client_id == SOC15_IH_CLIENTID_THM) { 1437 switch (src_id) { 1438 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1439 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1440 /* 1441 * SW CTF just occurred. 1442 * Try to do a graceful shutdown to prevent further damage. 1443 */ 1444 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1445 orderly_poweroff(true); 1446 break; 1447 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1448 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1449 break; 1450 default: 1451 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1452 src_id); 1453 break; 1454 } 1455 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1456 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1457 /* 1458 * HW CTF just occurred. Shutdown to prevent further damage. 1459 */ 1460 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1461 orderly_poweroff(true); 1462 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1463 if (src_id == 0xfe) { 1464 /* ACK SMUToHost interrupt */ 1465 data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL); 1466 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1467 WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data); 1468 1469 switch (ctxid) { 1470 case 0x3: 1471 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1472 schedule_work(&smu->interrupt_work); 1473 break; 1474 case 0x4: 1475 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1476 schedule_work(&smu->interrupt_work); 1477 break; 1478 case 0x7: 1479 /* 1480 * Increment the throttle interrupt counter 1481 */ 1482 atomic64_inc(&smu->throttle_int_counter); 1483 1484 if (!atomic_read(&adev->throttling_logging_enabled)) 1485 return 0; 1486 1487 if (__ratelimit(&adev->throttling_logging_rs)) 1488 schedule_work(&smu->throttling_logging_work); 1489 1490 break; 1491 } 1492 } 1493 } 1494 1495 return 0; 1496} 1497 1498static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = 1499{ 1500 .set = smu_v11_0_set_irq_state, 1501 .process = smu_v11_0_irq_process, 1502}; 1503 1504int smu_v11_0_register_irq_handler(struct smu_context *smu) 1505{ 1506 struct amdgpu_device *adev = smu->adev; 1507 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1508 int ret = 0; 1509 1510 irq_src->num_types = 1; 1511 irq_src->funcs = &smu_v11_0_irq_funcs; 1512 1513 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1514 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1515 irq_src); 1516 if (ret) 1517 return ret; 1518 1519 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1520 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1521 irq_src); 1522 if (ret) 1523 return ret; 1524 1525 /* Register CTF(GPIO_19) interrupt */ 1526 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1527 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1528 irq_src); 1529 if (ret) 1530 return ret; 1531 1532 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1533 0xfe, 1534 irq_src); 1535 if (ret) 1536 return ret; 1537 1538 return ret; 1539} 1540 1541int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1542 struct pp_smu_nv_clock_table *max_clocks) 1543{ 1544 struct smu_table_context *table_context = &smu->smu_table; 1545 struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL; 1546 1547 if (!max_clocks || !table_context->max_sustainable_clocks) 1548 return -EINVAL; 1549 1550 sustainable_clocks = table_context->max_sustainable_clocks; 1551 1552 max_clocks->dcfClockInKhz = 1553 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1554 max_clocks->displayClockInKhz = 1555 (unsigned int) sustainable_clocks->display_clock * 1000; 1556 max_clocks->phyClockInKhz = 1557 (unsigned int) sustainable_clocks->phy_clock * 1000; 1558 max_clocks->pixelClockInKhz = 1559 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1560 max_clocks->uClockInKhz = 1561 (unsigned int) sustainable_clocks->uclock * 1000; 1562 max_clocks->socClockInKhz = 1563 (unsigned int) sustainable_clocks->soc_clock * 1000; 1564 max_clocks->dscClockInKhz = 0; 1565 max_clocks->dppClockInKhz = 0; 1566 max_clocks->fabricClockInKhz = 0; 1567 1568 return 0; 1569} 1570 1571int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) 1572{ 1573 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1574} 1575 1576int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, 1577 enum smu_v11_0_baco_seq baco_seq) 1578{ 1579 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); 1580} 1581 1582bool smu_v11_0_baco_is_support(struct smu_context *smu) 1583{ 1584 struct smu_baco_context *smu_baco = &smu->smu_baco; 1585 1586 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) 1587 return false; 1588 1589 /* Arcturus does not support this bit mask */ 1590 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1591 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1592 return false; 1593 1594 return true; 1595} 1596 1597enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) 1598{ 1599 struct smu_baco_context *smu_baco = &smu->smu_baco; 1600 1601 return smu_baco->state; 1602} 1603 1604#define D3HOT_BACO_SEQUENCE 0 1605#define D3HOT_BAMACO_SEQUENCE 2 1606 1607int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) 1608{ 1609 struct smu_baco_context *smu_baco = &smu->smu_baco; 1610 struct amdgpu_device *adev = smu->adev; 1611 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1612 uint32_t data; 1613 int ret = 0; 1614 1615 if (smu_v11_0_baco_get_state(smu) == state) 1616 return 0; 1617 1618 if (state == SMU_BACO_STATE_ENTER) { 1619 switch (adev->ip_versions[MP1_HWIP][0]) { 1620 case IP_VERSION(11, 0, 7): 1621 case IP_VERSION(11, 0, 11): 1622 case IP_VERSION(11, 0, 12): 1623 case IP_VERSION(11, 0, 13): 1624 if (amdgpu_runtime_pm == 2) 1625 ret = smu_cmn_send_smc_msg_with_param(smu, 1626 SMU_MSG_EnterBaco, 1627 D3HOT_BAMACO_SEQUENCE, 1628 NULL); 1629 else 1630 ret = smu_cmn_send_smc_msg_with_param(smu, 1631 SMU_MSG_EnterBaco, 1632 D3HOT_BACO_SEQUENCE, 1633 NULL); 1634 break; 1635 default: 1636 if (!ras || !adev->ras_enabled || 1637 adev->gmc.xgmi.pending_reset) { 1638 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1639 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT); 1640 data |= 0x80000000; 1641 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data); 1642 } else { 1643 data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); 1644 data |= 0x80000000; 1645 WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); 1646 } 1647 1648 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL); 1649 } else { 1650 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL); 1651 } 1652 break; 1653 } 1654 1655 } else { 1656 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); 1657 if (ret) 1658 return ret; 1659 1660 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1661 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1662 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1663 } 1664 1665 if (!ret) 1666 smu_baco->state = state; 1667 1668 return ret; 1669} 1670 1671int smu_v11_0_baco_enter(struct smu_context *smu) 1672{ 1673 int ret = 0; 1674 1675 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); 1676 if (ret) 1677 return ret; 1678 1679 msleep(10); 1680 1681 return ret; 1682} 1683 1684int smu_v11_0_baco_exit(struct smu_context *smu) 1685{ 1686 return smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT); 1687} 1688 1689int smu_v11_0_mode1_reset(struct smu_context *smu) 1690{ 1691 int ret = 0; 1692 1693 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); 1694 if (!ret) 1695 msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS); 1696 1697 return ret; 1698} 1699 1700int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable) 1701{ 1702 int ret = 0; 1703 1704 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL); 1705 1706 return ret; 1707} 1708 1709 1710int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1711 uint32_t *min, uint32_t *max) 1712{ 1713 int ret = 0, clk_id = 0; 1714 uint32_t param = 0; 1715 uint32_t clock_limit; 1716 1717 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1718 switch (clk_type) { 1719 case SMU_MCLK: 1720 case SMU_UCLK: 1721 clock_limit = smu->smu_table.boot_values.uclk; 1722 break; 1723 case SMU_GFXCLK: 1724 case SMU_SCLK: 1725 clock_limit = smu->smu_table.boot_values.gfxclk; 1726 break; 1727 case SMU_SOCCLK: 1728 clock_limit = smu->smu_table.boot_values.socclk; 1729 break; 1730 default: 1731 clock_limit = 0; 1732 break; 1733 } 1734 1735 /* clock in Mhz unit */ 1736 if (min) 1737 *min = clock_limit / 100; 1738 if (max) 1739 *max = clock_limit / 100; 1740 1741 return 0; 1742 } 1743 1744 clk_id = smu_cmn_to_asic_specific_index(smu, 1745 CMN2ASIC_MAPPING_CLK, 1746 clk_type); 1747 if (clk_id < 0) { 1748 ret = -EINVAL; 1749 goto failed; 1750 } 1751 param = (clk_id & 0xffff) << 16; 1752 1753 if (max) { 1754 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max); 1755 if (ret) 1756 goto failed; 1757 } 1758 1759 if (min) { 1760 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1761 if (ret) 1762 goto failed; 1763 } 1764 1765failed: 1766 return ret; 1767} 1768 1769int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, 1770 enum smu_clk_type clk_type, 1771 uint32_t min, 1772 uint32_t max) 1773{ 1774 int ret = 0, clk_id = 0; 1775 uint32_t param; 1776 1777 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1778 return 0; 1779 1780 clk_id = smu_cmn_to_asic_specific_index(smu, 1781 CMN2ASIC_MAPPING_CLK, 1782 clk_type); 1783 if (clk_id < 0) 1784 return clk_id; 1785 1786 if (max > 0) { 1787 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1788 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1789 param, NULL); 1790 if (ret) 1791 goto out; 1792 } 1793 1794 if (min > 0) { 1795 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1796 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1797 param, NULL); 1798 if (ret) 1799 goto out; 1800 } 1801 1802out: 1803 return ret; 1804} 1805 1806int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, 1807 enum smu_clk_type clk_type, 1808 uint32_t min, 1809 uint32_t max) 1810{ 1811 int ret = 0, clk_id = 0; 1812 uint32_t param; 1813 1814 if (min <= 0 && max <= 0) 1815 return -EINVAL; 1816 1817 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1818 return 0; 1819 1820 clk_id = smu_cmn_to_asic_specific_index(smu, 1821 CMN2ASIC_MAPPING_CLK, 1822 clk_type); 1823 if (clk_id < 0) 1824 return clk_id; 1825 1826 if (max > 0) { 1827 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1828 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1829 param, NULL); 1830 if (ret) 1831 return ret; 1832 } 1833 1834 if (min > 0) { 1835 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1836 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1837 param, NULL); 1838 if (ret) 1839 return ret; 1840 } 1841 1842 return ret; 1843} 1844 1845int smu_v11_0_set_performance_level(struct smu_context *smu, 1846 enum amd_dpm_forced_level level) 1847{ 1848 struct smu_11_0_dpm_context *dpm_context = 1849 smu->smu_dpm.dpm_context; 1850 struct smu_11_0_dpm_table *gfx_table = 1851 &dpm_context->dpm_tables.gfx_table; 1852 struct smu_11_0_dpm_table *mem_table = 1853 &dpm_context->dpm_tables.uclk_table; 1854 struct smu_11_0_dpm_table *soc_table = 1855 &dpm_context->dpm_tables.soc_table; 1856 struct smu_umd_pstate_table *pstate_table = 1857 &smu->pstate_table; 1858 struct amdgpu_device *adev = smu->adev; 1859 uint32_t sclk_min = 0, sclk_max = 0; 1860 uint32_t mclk_min = 0, mclk_max = 0; 1861 uint32_t socclk_min = 0, socclk_max = 0; 1862 int ret = 0; 1863 1864 switch (level) { 1865 case AMD_DPM_FORCED_LEVEL_HIGH: 1866 sclk_min = sclk_max = gfx_table->max; 1867 mclk_min = mclk_max = mem_table->max; 1868 socclk_min = socclk_max = soc_table->max; 1869 break; 1870 case AMD_DPM_FORCED_LEVEL_LOW: 1871 sclk_min = sclk_max = gfx_table->min; 1872 mclk_min = mclk_max = mem_table->min; 1873 socclk_min = socclk_max = soc_table->min; 1874 break; 1875 case AMD_DPM_FORCED_LEVEL_AUTO: 1876 sclk_min = gfx_table->min; 1877 sclk_max = gfx_table->max; 1878 mclk_min = mem_table->min; 1879 mclk_max = mem_table->max; 1880 socclk_min = soc_table->min; 1881 socclk_max = soc_table->max; 1882 break; 1883 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1884 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1885 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1886 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1887 break; 1888 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1889 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1890 break; 1891 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1892 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1893 break; 1894 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1895 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1896 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1897 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1898 break; 1899 case AMD_DPM_FORCED_LEVEL_MANUAL: 1900 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1901 return 0; 1902 default: 1903 dev_err(adev->dev, "Invalid performance level %d\n", level); 1904 return -EINVAL; 1905 } 1906 1907 /* 1908 * Separate MCLK and SOCCLK soft min/max settings are not allowed 1909 * on Arcturus. 1910 */ 1911 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) { 1912 mclk_min = mclk_max = 0; 1913 socclk_min = socclk_max = 0; 1914 } 1915 1916 if (sclk_min && sclk_max) { 1917 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1918 SMU_GFXCLK, 1919 sclk_min, 1920 sclk_max); 1921 if (ret) 1922 return ret; 1923 } 1924 1925 if (mclk_min && mclk_max) { 1926 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1927 SMU_MCLK, 1928 mclk_min, 1929 mclk_max); 1930 if (ret) 1931 return ret; 1932 } 1933 1934 if (socclk_min && socclk_max) { 1935 ret = smu_v11_0_set_soft_freq_limited_range(smu, 1936 SMU_SOCCLK, 1937 socclk_min, 1938 socclk_max); 1939 if (ret) 1940 return ret; 1941 } 1942 1943 return ret; 1944} 1945 1946int smu_v11_0_set_power_source(struct smu_context *smu, 1947 enum smu_power_src_type power_src) 1948{ 1949 int pwr_source; 1950 1951 pwr_source = smu_cmn_to_asic_specific_index(smu, 1952 CMN2ASIC_MAPPING_PWR, 1953 (uint32_t)power_src); 1954 if (pwr_source < 0) 1955 return -EINVAL; 1956 1957 return smu_cmn_send_smc_msg_with_param(smu, 1958 SMU_MSG_NotifyPowerSource, 1959 pwr_source, 1960 NULL); 1961} 1962 1963int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, 1964 enum smu_clk_type clk_type, 1965 uint16_t level, 1966 uint32_t *value) 1967{ 1968 int ret = 0, clk_id = 0; 1969 uint32_t param; 1970 1971 if (!value) 1972 return -EINVAL; 1973 1974 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1975 return 0; 1976 1977 clk_id = smu_cmn_to_asic_specific_index(smu, 1978 CMN2ASIC_MAPPING_CLK, 1979 clk_type); 1980 if (clk_id < 0) 1981 return clk_id; 1982 1983 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1984 1985 ret = smu_cmn_send_smc_msg_with_param(smu, 1986 SMU_MSG_GetDpmFreqByIndex, 1987 param, 1988 value); 1989 if (ret) 1990 return ret; 1991 1992 /* 1993 * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM 1994 * now, we un-support it 1995 */ 1996 *value = *value & 0x7fffffff; 1997 1998 return ret; 1999} 2000 2001int smu_v11_0_get_dpm_level_count(struct smu_context *smu, 2002 enum smu_clk_type clk_type, 2003 uint32_t *value) 2004{ 2005 return smu_v11_0_get_dpm_freq_by_index(smu, 2006 clk_type, 2007 0xff, 2008 value); 2009} 2010 2011int smu_v11_0_set_single_dpm_table(struct smu_context *smu, 2012 enum smu_clk_type clk_type, 2013 struct smu_11_0_dpm_table *single_dpm_table) 2014{ 2015 int ret = 0; 2016 uint32_t clk; 2017 int i; 2018 2019 ret = smu_v11_0_get_dpm_level_count(smu, 2020 clk_type, 2021 &single_dpm_table->count); 2022 if (ret) { 2023 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 2024 return ret; 2025 } 2026 2027 for (i = 0; i < single_dpm_table->count; i++) { 2028 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2029 clk_type, 2030 i, 2031 &clk); 2032 if (ret) { 2033 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2034 return ret; 2035 } 2036 2037 single_dpm_table->dpm_levels[i].value = clk; 2038 single_dpm_table->dpm_levels[i].enabled = true; 2039 2040 if (i == 0) 2041 single_dpm_table->min = clk; 2042 else if (i == single_dpm_table->count - 1) 2043 single_dpm_table->max = clk; 2044 } 2045 2046 return 0; 2047} 2048 2049int smu_v11_0_get_dpm_level_range(struct smu_context *smu, 2050 enum smu_clk_type clk_type, 2051 uint32_t *min_value, 2052 uint32_t *max_value) 2053{ 2054 uint32_t level_count = 0; 2055 int ret = 0; 2056 2057 if (!min_value && !max_value) 2058 return -EINVAL; 2059 2060 if (min_value) { 2061 /* by default, level 0 clock value as min value */ 2062 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2063 clk_type, 2064 0, 2065 min_value); 2066 if (ret) 2067 return ret; 2068 } 2069 2070 if (max_value) { 2071 ret = smu_v11_0_get_dpm_level_count(smu, 2072 clk_type, 2073 &level_count); 2074 if (ret) 2075 return ret; 2076 2077 ret = smu_v11_0_get_dpm_freq_by_index(smu, 2078 clk_type, 2079 level_count - 1, 2080 max_value); 2081 if (ret) 2082 return ret; 2083 } 2084 2085 return ret; 2086} 2087 2088int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu) 2089{ 2090 struct amdgpu_device *adev = smu->adev; 2091 2092 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2093 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2094 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2095} 2096 2097uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu) 2098{ 2099 uint32_t width_level; 2100 2101 width_level = smu_v11_0_get_current_pcie_link_width_level(smu); 2102 if (width_level > LINK_WIDTH_MAX) 2103 width_level = 0; 2104 2105 return link_width[width_level]; 2106} 2107 2108int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2109{ 2110 struct amdgpu_device *adev = smu->adev; 2111 2112 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2113 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2114 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2115} 2116 2117uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu) 2118{ 2119 uint32_t speed_level; 2120 2121 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu); 2122 if (speed_level > LINK_SPEED_MAX) 2123 speed_level = 0; 2124 2125 return link_speed[speed_level]; 2126} 2127 2128int smu_v11_0_gfx_ulv_control(struct smu_context *smu, 2129 bool enablement) 2130{ 2131 int ret = 0; 2132 2133 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2134 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2135 2136 return ret; 2137} 2138 2139int smu_v11_0_deep_sleep_control(struct smu_context *smu, 2140 bool enablement) 2141{ 2142 struct amdgpu_device *adev = smu->adev; 2143 int ret = 0; 2144 2145 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2146 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2147 if (ret) { 2148 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2149 return ret; 2150 } 2151 } 2152 2153 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2154 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2155 if (ret) { 2156 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2157 return ret; 2158 } 2159 } 2160 2161 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2162 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2163 if (ret) { 2164 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2165 return ret; 2166 } 2167 } 2168 2169 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2170 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2171 if (ret) { 2172 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2173 return ret; 2174 } 2175 } 2176 2177 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2178 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2179 if (ret) { 2180 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2181 return ret; 2182 } 2183 } 2184 2185 return ret; 2186} 2187 2188int smu_v11_0_restore_user_od_settings(struct smu_context *smu) 2189{ 2190 struct smu_table_context *table_context = &smu->smu_table; 2191 void *user_od_table = table_context->user_overdrive_table; 2192 int ret = 0; 2193 2194 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true); 2195 if (ret) 2196 dev_err(smu->adev->dev, "Failed to import overdrive table!\n"); 2197 2198 return ret; 2199}