smu_v13_0.c (65193B)
1/* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24#include <linux/module.h> 25#include <linux/pci.h> 26#include <linux/reboot.h> 27 28#define SMU_13_0_PARTIAL_PPTABLE 29#define SWSMU_CODE_LAYER_L3 30 31#include "amdgpu.h" 32#include "amdgpu_smu.h" 33#include "atomfirmware.h" 34#include "amdgpu_atomfirmware.h" 35#include "amdgpu_atombios.h" 36#include "smu_v13_0.h" 37#include "soc15_common.h" 38#include "atom.h" 39#include "amdgpu_ras.h" 40#include "smu_cmn.h" 41 42#include "asic_reg/thm/thm_13_0_2_offset.h" 43#include "asic_reg/thm/thm_13_0_2_sh_mask.h" 44#include "asic_reg/mp/mp_13_0_2_offset.h" 45#include "asic_reg/mp/mp_13_0_2_sh_mask.h" 46#include "asic_reg/smuio/smuio_13_0_2_offset.h" 47#include "asic_reg/smuio/smuio_13_0_2_sh_mask.h" 48 49/* 50 * DO NOT use these for err/warn/info/debug messages. 51 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52 * They are more MGPU friendly. 53 */ 54#undef pr_err 55#undef pr_warn 56#undef pr_info 57#undef pr_debug 58 59MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); 60MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); 61MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); 62 63#define SMU13_VOLTAGE_SCALE 4 64 65#define LINK_WIDTH_MAX 6 66#define LINK_SPEED_MAX 3 67 68#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 69#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L 70#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4 71#define smnPCIE_LC_SPEED_CNTL 0x11140290 72#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000 73#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE 74 75static const int link_width[] = {0, 1, 2, 4, 8, 12, 16}; 76static const int link_speed[] = {25, 50, 80, 160}; 77 78static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size, 79 uint32_t pptable_id); 80 81int smu_v13_0_init_microcode(struct smu_context *smu) 82{ 83 struct amdgpu_device *adev = smu->adev; 84 const char *chip_name; 85 char fw_name[30]; 86 char ucode_prefix[30]; 87 int err = 0; 88 const struct smc_firmware_header_v1_0 *hdr; 89 const struct common_firmware_header *header; 90 struct amdgpu_firmware_info *ucode = NULL; 91 92 /* doesn't need to load smu firmware in IOV mode */ 93 if (amdgpu_sriov_vf(adev)) 94 return 0; 95 96 switch (adev->ip_versions[MP1_HWIP][0]) { 97 case IP_VERSION(13, 0, 2): 98 chip_name = "aldebaran_smc"; 99 break; 100 default: 101 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 102 chip_name = ucode_prefix; 103 } 104 105 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name); 106 107 err = request_firmware(&adev->pm.fw, fw_name, adev->dev); 108 if (err) 109 goto out; 110 err = amdgpu_ucode_validate(adev->pm.fw); 111 if (err) 112 goto out; 113 114 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 115 amdgpu_ucode_print_smc_hdr(&hdr->header); 116 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 117 118 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 119 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 120 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 121 ucode->fw = adev->pm.fw; 122 header = (const struct common_firmware_header *)ucode->fw->data; 123 adev->firmware.fw_size += 124 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 125 } 126 127out: 128 if (err) { 129 DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n", 130 fw_name); 131 release_firmware(adev->pm.fw); 132 adev->pm.fw = NULL; 133 } 134 return err; 135} 136 137void smu_v13_0_fini_microcode(struct smu_context *smu) 138{ 139 struct amdgpu_device *adev = smu->adev; 140 141 release_firmware(adev->pm.fw); 142 adev->pm.fw = NULL; 143 adev->pm.fw_version = 0; 144} 145 146int smu_v13_0_load_microcode(struct smu_context *smu) 147{ 148#if 0 149 struct amdgpu_device *adev = smu->adev; 150 const uint32_t *src; 151 const struct smc_firmware_header_v1_0 *hdr; 152 uint32_t addr_start = MP1_SRAM; 153 uint32_t i; 154 uint32_t smc_fw_size; 155 uint32_t mp1_fw_flags; 156 157 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 158 src = (const uint32_t *)(adev->pm.fw->data + 159 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 160 smc_fw_size = hdr->header.ucode_size_bytes; 161 162 for (i = 1; i < smc_fw_size/4 - 1; i++) { 163 WREG32_PCIE(addr_start, src[i]); 164 addr_start += 4; 165 } 166 167 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 168 1 & MP1_SMN_PUB_CTRL__RESET_MASK); 169 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 170 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK); 171 172 for (i = 0; i < adev->usec_timeout; i++) { 173 mp1_fw_flags = RREG32_PCIE(MP1_Public | 174 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 175 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 176 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 177 break; 178 udelay(1); 179 } 180 181 if (i == adev->usec_timeout) 182 return -ETIME; 183#endif 184 185 return 0; 186} 187 188int smu_v13_0_init_pptable_microcode(struct smu_context *smu) 189{ 190 struct amdgpu_device *adev = smu->adev; 191 struct amdgpu_firmware_info *ucode = NULL; 192 uint32_t size = 0, pptable_id = 0; 193 int ret = 0; 194 void *table; 195 196 /* doesn't need to load smu firmware in IOV mode */ 197 if (amdgpu_sriov_vf(adev)) 198 return 0; 199 200 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 201 return 0; 202 203 if (!adev->scpm_enabled) 204 return 0; 205 206 /* override pptable_id from driver parameter */ 207 if (amdgpu_smu_pptable_id >= 0) { 208 pptable_id = amdgpu_smu_pptable_id; 209 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 210 } else { 211 pptable_id = smu->smu_table.boot_values.pp_table_id; 212 213 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) && 214 pptable_id == 3667) 215 pptable_id = 36671; 216 217 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) && 218 pptable_id == 3688) 219 pptable_id = 36881; 220 /* 221 * Temporary solution for SMU V13.0.0 with SCPM enabled: 222 * - use 36831 signed pptable when pp_table_id is 3683 223 * - use 36641 signed pptable when pp_table_id is 3664 or 0 224 * TODO: drop these when the pptable carried in vbios is ready. 225 */ 226 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { 227 switch (pptable_id) { 228 case 0: 229 case 3664: 230 pptable_id = 36641; 231 break; 232 case 3683: 233 pptable_id = 36831; 234 break; 235 default: 236 dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); 237 return -EINVAL; 238 } 239 } 240 } 241 242 /* "pptable_id == 0" means vbios carries the pptable. */ 243 if (!pptable_id) 244 return 0; 245 246 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 247 if (ret) 248 return ret; 249 250 smu->pptable_firmware.data = table; 251 smu->pptable_firmware.size = size; 252 253 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 254 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 255 ucode->fw = &smu->pptable_firmware; 256 adev->firmware.fw_size += 257 ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 258 259 return 0; 260} 261 262int smu_v13_0_check_fw_status(struct smu_context *smu) 263{ 264 struct amdgpu_device *adev = smu->adev; 265 uint32_t mp1_fw_flags; 266 267 mp1_fw_flags = RREG32_PCIE(MP1_Public | 268 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 269 270 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 271 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 272 return 0; 273 274 return -EIO; 275} 276 277int smu_v13_0_check_fw_version(struct smu_context *smu) 278{ 279 struct amdgpu_device *adev = smu->adev; 280 uint32_t if_version = 0xff, smu_version = 0xff; 281 uint8_t smu_program, smu_major, smu_minor, smu_debug; 282 int ret = 0; 283 284 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 285 if (ret) 286 return ret; 287 288 smu_program = (smu_version >> 24) & 0xff; 289 smu_major = (smu_version >> 16) & 0xff; 290 smu_minor = (smu_version >> 8) & 0xff; 291 smu_debug = (smu_version >> 0) & 0xff; 292 if (smu->is_apu) 293 adev->pm.fw_version = smu_version; 294 295 switch (adev->ip_versions[MP1_HWIP][0]) { 296 case IP_VERSION(13, 0, 2): 297 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; 298 break; 299 case IP_VERSION(13, 0, 0): 300 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0; 301 break; 302 case IP_VERSION(13, 0, 7): 303 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7; 304 break; 305 case IP_VERSION(13, 0, 1): 306 case IP_VERSION(13, 0, 3): 307 case IP_VERSION(13, 0, 8): 308 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; 309 break; 310 case IP_VERSION(13, 0, 4): 311 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4; 312 break; 313 case IP_VERSION(13, 0, 5): 314 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5; 315 break; 316 default: 317 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 318 adev->ip_versions[MP1_HWIP][0]); 319 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; 320 break; 321 } 322 323 /* only for dGPU w/ SMU13*/ 324 if (adev->pm.fw) 325 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 326 smu_program, smu_version, smu_major, smu_minor, smu_debug); 327 328 /* 329 * 1. if_version mismatch is not critical as our fw is designed 330 * to be backward compatible. 331 * 2. New fw usually brings some optimizations. But that's visible 332 * only on the paired driver. 333 * Considering above, we just leave user a warning message instead 334 * of halt driver loading. 335 */ 336 if (if_version != smu->smc_driver_if_version) { 337 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 338 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 339 smu->smc_driver_if_version, if_version, 340 smu_program, smu_version, smu_major, smu_minor, smu_debug); 341 dev_warn(adev->dev, "SMU driver if version not matched\n"); 342 } 343 344 return ret; 345} 346 347static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 348{ 349 struct amdgpu_device *adev = smu->adev; 350 uint32_t ppt_offset_bytes; 351 const struct smc_firmware_header_v2_0 *v2; 352 353 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 354 355 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 356 *size = le32_to_cpu(v2->ppt_size_bytes); 357 *table = (uint8_t *)v2 + ppt_offset_bytes; 358 359 return 0; 360} 361 362static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table, 363 uint32_t *size, uint32_t pptable_id) 364{ 365 struct amdgpu_device *adev = smu->adev; 366 const struct smc_firmware_header_v2_1 *v2_1; 367 struct smc_soft_pptable_entry *entries; 368 uint32_t pptable_count = 0; 369 int i = 0; 370 371 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 372 entries = (struct smc_soft_pptable_entry *) 373 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 374 pptable_count = le32_to_cpu(v2_1->pptable_count); 375 for (i = 0; i < pptable_count; i++) { 376 if (le32_to_cpu(entries[i].id) == pptable_id) { 377 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 378 *size = le32_to_cpu(entries[i].ppt_size_bytes); 379 break; 380 } 381 } 382 383 if (i == pptable_count) 384 return -EINVAL; 385 386 return 0; 387} 388 389static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 390{ 391 struct amdgpu_device *adev = smu->adev; 392 uint16_t atom_table_size; 393 uint8_t frev, crev; 394 int ret, index; 395 396 dev_info(adev->dev, "use vbios provided pptable\n"); 397 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 398 powerplayinfo); 399 400 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 401 (uint8_t **)table); 402 if (ret) 403 return ret; 404 405 if (size) 406 *size = atom_table_size; 407 408 return 0; 409} 410 411static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size, 412 uint32_t pptable_id) 413{ 414 const struct smc_firmware_header_v1_0 *hdr; 415 struct amdgpu_device *adev = smu->adev; 416 uint16_t version_major, version_minor; 417 int ret; 418 419 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 420 if (!hdr) 421 return -EINVAL; 422 423 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 424 425 version_major = le16_to_cpu(hdr->header.header_version_major); 426 version_minor = le16_to_cpu(hdr->header.header_version_minor); 427 if (version_major != 2) { 428 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 429 version_major, version_minor); 430 return -EINVAL; 431 } 432 433 switch (version_minor) { 434 case 0: 435 ret = smu_v13_0_set_pptable_v2_0(smu, table, size); 436 break; 437 case 1: 438 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id); 439 break; 440 default: 441 ret = -EINVAL; 442 break; 443 } 444 445 return ret; 446} 447 448int smu_v13_0_setup_pptable(struct smu_context *smu) 449{ 450 struct amdgpu_device *adev = smu->adev; 451 uint32_t size = 0, pptable_id = 0; 452 void *table; 453 int ret = 0; 454 455 /* override pptable_id from driver parameter */ 456 if (amdgpu_smu_pptable_id >= 0) { 457 pptable_id = amdgpu_smu_pptable_id; 458 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 459 } else { 460 pptable_id = smu->smu_table.boot_values.pp_table_id; 461 462 /* 463 * Temporary solution for SMU V13.0.0 with SCPM disabled: 464 * - use 3664 or 3683 on request 465 * - use 3664 when pptable_id is 0 466 * TODO: drop these when the pptable carried in vbios is ready. 467 */ 468 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) { 469 switch (pptable_id) { 470 case 0: 471 pptable_id = 3664; 472 break; 473 case 3664: 474 case 3683: 475 break; 476 default: 477 dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id); 478 return -EINVAL; 479 } 480 } 481 } 482 483 /* force using vbios pptable in sriov mode */ 484 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 485 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size); 486 else 487 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 488 489 if (ret) 490 return ret; 491 492 if (!smu->smu_table.power_play_table) 493 smu->smu_table.power_play_table = table; 494 if (!smu->smu_table.power_play_table_size) 495 smu->smu_table.power_play_table_size = size; 496 497 return 0; 498} 499 500int smu_v13_0_init_smc_tables(struct smu_context *smu) 501{ 502 struct smu_table_context *smu_table = &smu->smu_table; 503 struct smu_table *tables = smu_table->tables; 504 int ret = 0; 505 506 smu_table->driver_pptable = 507 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 508 if (!smu_table->driver_pptable) { 509 ret = -ENOMEM; 510 goto err0_out; 511 } 512 513 smu_table->max_sustainable_clocks = 514 kzalloc(sizeof(struct smu_13_0_max_sustainable_clocks), GFP_KERNEL); 515 if (!smu_table->max_sustainable_clocks) { 516 ret = -ENOMEM; 517 goto err1_out; 518 } 519 520 /* Aldebaran does not support OVERDRIVE */ 521 if (tables[SMU_TABLE_OVERDRIVE].size) { 522 smu_table->overdrive_table = 523 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 524 if (!smu_table->overdrive_table) { 525 ret = -ENOMEM; 526 goto err2_out; 527 } 528 529 smu_table->boot_overdrive_table = 530 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 531 if (!smu_table->boot_overdrive_table) { 532 ret = -ENOMEM; 533 goto err3_out; 534 } 535 } 536 537 smu_table->combo_pptable = 538 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 539 if (!smu_table->combo_pptable) { 540 ret = -ENOMEM; 541 goto err4_out; 542 } 543 544 return 0; 545 546err4_out: 547 kfree(smu_table->boot_overdrive_table); 548err3_out: 549 kfree(smu_table->overdrive_table); 550err2_out: 551 kfree(smu_table->max_sustainable_clocks); 552err1_out: 553 kfree(smu_table->driver_pptable); 554err0_out: 555 return ret; 556} 557 558int smu_v13_0_fini_smc_tables(struct smu_context *smu) 559{ 560 struct smu_table_context *smu_table = &smu->smu_table; 561 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 562 563 kfree(smu_table->gpu_metrics_table); 564 kfree(smu_table->combo_pptable); 565 kfree(smu_table->boot_overdrive_table); 566 kfree(smu_table->overdrive_table); 567 kfree(smu_table->max_sustainable_clocks); 568 kfree(smu_table->driver_pptable); 569 smu_table->gpu_metrics_table = NULL; 570 smu_table->combo_pptable = NULL; 571 smu_table->boot_overdrive_table = NULL; 572 smu_table->overdrive_table = NULL; 573 smu_table->max_sustainable_clocks = NULL; 574 smu_table->driver_pptable = NULL; 575 kfree(smu_table->hardcode_pptable); 576 smu_table->hardcode_pptable = NULL; 577 578 kfree(smu_table->ecc_table); 579 kfree(smu_table->metrics_table); 580 kfree(smu_table->watermarks_table); 581 smu_table->ecc_table = NULL; 582 smu_table->metrics_table = NULL; 583 smu_table->watermarks_table = NULL; 584 smu_table->metrics_time = 0; 585 586 kfree(smu_dpm->dpm_context); 587 kfree(smu_dpm->golden_dpm_context); 588 kfree(smu_dpm->dpm_current_power_state); 589 kfree(smu_dpm->dpm_request_power_state); 590 smu_dpm->dpm_context = NULL; 591 smu_dpm->golden_dpm_context = NULL; 592 smu_dpm->dpm_context_size = 0; 593 smu_dpm->dpm_current_power_state = NULL; 594 smu_dpm->dpm_request_power_state = NULL; 595 596 return 0; 597} 598 599int smu_v13_0_init_power(struct smu_context *smu) 600{ 601 struct smu_power_context *smu_power = &smu->smu_power; 602 603 if (smu_power->power_context || smu_power->power_context_size != 0) 604 return -EINVAL; 605 606 smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 607 GFP_KERNEL); 608 if (!smu_power->power_context) 609 return -ENOMEM; 610 smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); 611 612 return 0; 613} 614 615int smu_v13_0_fini_power(struct smu_context *smu) 616{ 617 struct smu_power_context *smu_power = &smu->smu_power; 618 619 if (!smu_power->power_context || smu_power->power_context_size == 0) 620 return -EINVAL; 621 622 kfree(smu_power->power_context); 623 smu_power->power_context = NULL; 624 smu_power->power_context_size = 0; 625 626 return 0; 627} 628 629int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu) 630{ 631 int ret, index; 632 uint16_t size; 633 uint8_t frev, crev; 634 struct atom_common_table_header *header; 635 struct atom_firmware_info_v3_4 *v_3_4; 636 struct atom_firmware_info_v3_3 *v_3_3; 637 struct atom_firmware_info_v3_1 *v_3_1; 638 struct atom_smu_info_v3_6 *smu_info_v3_6; 639 struct atom_smu_info_v4_0 *smu_info_v4_0; 640 641 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 642 firmwareinfo); 643 644 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 645 (uint8_t **)&header); 646 if (ret) 647 return ret; 648 649 if (header->format_revision != 3) { 650 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); 651 return -EINVAL; 652 } 653 654 switch (header->content_revision) { 655 case 0: 656 case 1: 657 case 2: 658 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 659 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 660 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 661 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 662 smu->smu_table.boot_values.socclk = 0; 663 smu->smu_table.boot_values.dcefclk = 0; 664 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 665 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 666 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 667 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 668 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 669 smu->smu_table.boot_values.pp_table_id = 0; 670 break; 671 case 3: 672 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 673 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 674 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 675 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 676 smu->smu_table.boot_values.socclk = 0; 677 smu->smu_table.boot_values.dcefclk = 0; 678 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 679 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 680 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 681 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 682 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 683 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 684 break; 685 case 4: 686 default: 687 v_3_4 = (struct atom_firmware_info_v3_4 *)header; 688 smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 689 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 690 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 691 smu->smu_table.boot_values.socclk = 0; 692 smu->smu_table.boot_values.dcefclk = 0; 693 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 694 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 695 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 696 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 697 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 698 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 699 break; 700 } 701 702 smu->smu_table.boot_values.format_revision = header->format_revision; 703 smu->smu_table.boot_values.content_revision = header->content_revision; 704 705 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 706 smu_info); 707 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 708 (uint8_t **)&header)) { 709 710 if ((frev == 3) && (crev == 6)) { 711 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 712 713 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 714 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 715 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 716 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 717 } else if ((frev == 4) && (crev == 0)) { 718 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 719 720 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 721 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 722 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 723 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 724 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 725 } else { 726 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 727 (uint32_t)frev, (uint32_t)crev); 728 } 729 } 730 731 return 0; 732} 733 734 735int smu_v13_0_notify_memory_pool_location(struct smu_context *smu) 736{ 737 struct smu_table_context *smu_table = &smu->smu_table; 738 struct smu_table *memory_pool = &smu_table->memory_pool; 739 int ret = 0; 740 uint64_t address; 741 uint32_t address_low, address_high; 742 743 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 744 return ret; 745 746 address = memory_pool->mc_address; 747 address_high = (uint32_t)upper_32_bits(address); 748 address_low = (uint32_t)lower_32_bits(address); 749 750 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 751 address_high, NULL); 752 if (ret) 753 return ret; 754 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 755 address_low, NULL); 756 if (ret) 757 return ret; 758 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 759 (uint32_t)memory_pool->size, NULL); 760 if (ret) 761 return ret; 762 763 return ret; 764} 765 766int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) 767{ 768 int ret; 769 770 ret = smu_cmn_send_smc_msg_with_param(smu, 771 SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL); 772 if (ret) 773 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!"); 774 775 return ret; 776} 777 778int smu_v13_0_set_driver_table_location(struct smu_context *smu) 779{ 780 struct smu_table *driver_table = &smu->smu_table.driver_table; 781 int ret = 0; 782 783 if (driver_table->mc_address) { 784 ret = smu_cmn_send_smc_msg_with_param(smu, 785 SMU_MSG_SetDriverDramAddrHigh, 786 upper_32_bits(driver_table->mc_address), 787 NULL); 788 if (!ret) 789 ret = smu_cmn_send_smc_msg_with_param(smu, 790 SMU_MSG_SetDriverDramAddrLow, 791 lower_32_bits(driver_table->mc_address), 792 NULL); 793 } 794 795 return ret; 796} 797 798int smu_v13_0_set_tool_table_location(struct smu_context *smu) 799{ 800 int ret = 0; 801 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 802 803 if (tool_table->mc_address) { 804 ret = smu_cmn_send_smc_msg_with_param(smu, 805 SMU_MSG_SetToolsDramAddrHigh, 806 upper_32_bits(tool_table->mc_address), 807 NULL); 808 if (!ret) 809 ret = smu_cmn_send_smc_msg_with_param(smu, 810 SMU_MSG_SetToolsDramAddrLow, 811 lower_32_bits(tool_table->mc_address), 812 NULL); 813 } 814 815 return ret; 816} 817 818int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count) 819{ 820 int ret = 0; 821 822 if (!smu->pm_enabled) 823 return ret; 824 825 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL); 826 827 return ret; 828} 829 830int smu_v13_0_set_allowed_mask(struct smu_context *smu) 831{ 832 struct smu_feature *feature = &smu->smu_feature; 833 int ret = 0; 834 uint32_t feature_mask[2]; 835 836 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 837 feature->feature_num < 64) 838 return -EINVAL; 839 840 bitmap_to_arr32(feature_mask, feature->allowed, 64); 841 842 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 843 feature_mask[1], NULL); 844 if (ret) 845 return ret; 846 847 return smu_cmn_send_smc_msg_with_param(smu, 848 SMU_MSG_SetAllowedFeaturesMaskLow, 849 feature_mask[0], 850 NULL); 851} 852 853int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) 854{ 855 int ret = 0; 856 struct amdgpu_device *adev = smu->adev; 857 858 switch (adev->ip_versions[MP1_HWIP][0]) { 859 case IP_VERSION(13, 0, 0): 860 case IP_VERSION(13, 0, 1): 861 case IP_VERSION(13, 0, 3): 862 case IP_VERSION(13, 0, 4): 863 case IP_VERSION(13, 0, 5): 864 case IP_VERSION(13, 0, 7): 865 case IP_VERSION(13, 0, 8): 866 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 867 return 0; 868 if (enable) 869 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 870 else 871 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 872 break; 873 default: 874 break; 875 } 876 877 return ret; 878} 879 880int smu_v13_0_system_features_control(struct smu_context *smu, 881 bool en) 882{ 883 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 884 SMU_MSG_DisableAllSmuFeatures), NULL); 885} 886 887int smu_v13_0_notify_display_change(struct smu_context *smu) 888{ 889 int ret = 0; 890 891 if (!smu->pm_enabled) 892 return ret; 893 894 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 895 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 896 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 897 898 return ret; 899} 900 901 static int 902smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, 903 enum smu_clk_type clock_select) 904{ 905 int ret = 0; 906 int clk_id; 907 908 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || 909 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0)) 910 return 0; 911 912 clk_id = smu_cmn_to_asic_specific_index(smu, 913 CMN2ASIC_MAPPING_CLK, 914 clock_select); 915 if (clk_id < 0) 916 return -EINVAL; 917 918 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, 919 clk_id << 16, clock); 920 if (ret) { 921 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); 922 return ret; 923 } 924 925 if (*clock != 0) 926 return 0; 927 928 /* if DC limit is zero, return AC limit */ 929 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, 930 clk_id << 16, clock); 931 if (ret) { 932 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!"); 933 return ret; 934 } 935 936 return 0; 937} 938 939int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu) 940{ 941 struct smu_13_0_max_sustainable_clocks *max_sustainable_clocks = 942 smu->smu_table.max_sustainable_clocks; 943 int ret = 0; 944 945 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100; 946 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100; 947 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100; 948 max_sustainable_clocks->display_clock = 0xFFFFFFFF; 949 max_sustainable_clocks->phy_clock = 0xFFFFFFFF; 950 max_sustainable_clocks->pixel_clock = 0xFFFFFFFF; 951 952 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 953 ret = smu_v13_0_get_max_sustainable_clock(smu, 954 &(max_sustainable_clocks->uclock), 955 SMU_UCLK); 956 if (ret) { 957 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!", 958 __func__); 959 return ret; 960 } 961 } 962 963 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 964 ret = smu_v13_0_get_max_sustainable_clock(smu, 965 &(max_sustainable_clocks->soc_clock), 966 SMU_SOCCLK); 967 if (ret) { 968 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!", 969 __func__); 970 return ret; 971 } 972 } 973 974 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { 975 ret = smu_v13_0_get_max_sustainable_clock(smu, 976 &(max_sustainable_clocks->dcef_clock), 977 SMU_DCEFCLK); 978 if (ret) { 979 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!", 980 __func__); 981 return ret; 982 } 983 984 ret = smu_v13_0_get_max_sustainable_clock(smu, 985 &(max_sustainable_clocks->display_clock), 986 SMU_DISPCLK); 987 if (ret) { 988 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!", 989 __func__); 990 return ret; 991 } 992 ret = smu_v13_0_get_max_sustainable_clock(smu, 993 &(max_sustainable_clocks->phy_clock), 994 SMU_PHYCLK); 995 if (ret) { 996 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!", 997 __func__); 998 return ret; 999 } 1000 ret = smu_v13_0_get_max_sustainable_clock(smu, 1001 &(max_sustainable_clocks->pixel_clock), 1002 SMU_PIXCLK); 1003 if (ret) { 1004 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!", 1005 __func__); 1006 return ret; 1007 } 1008 } 1009 1010 if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock) 1011 max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock; 1012 1013 return 0; 1014} 1015 1016int smu_v13_0_get_current_power_limit(struct smu_context *smu, 1017 uint32_t *power_limit) 1018{ 1019 int power_src; 1020 int ret = 0; 1021 1022 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 1023 return -EINVAL; 1024 1025 power_src = smu_cmn_to_asic_specific_index(smu, 1026 CMN2ASIC_MAPPING_PWR, 1027 smu->adev->pm.ac_power ? 1028 SMU_POWER_SOURCE_AC : 1029 SMU_POWER_SOURCE_DC); 1030 if (power_src < 0) 1031 return -EINVAL; 1032 1033 ret = smu_cmn_send_smc_msg_with_param(smu, 1034 SMU_MSG_GetPptLimit, 1035 power_src << 16, 1036 power_limit); 1037 if (ret) 1038 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 1039 1040 return ret; 1041} 1042 1043int smu_v13_0_set_power_limit(struct smu_context *smu, 1044 enum smu_ppt_limit_type limit_type, 1045 uint32_t limit) 1046{ 1047 int ret = 0; 1048 1049 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 1050 return -EINVAL; 1051 1052 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 1053 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 1054 return -EOPNOTSUPP; 1055 } 1056 1057 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 1058 if (ret) { 1059 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 1060 return ret; 1061 } 1062 1063 smu->current_power_limit = limit; 1064 1065 return 0; 1066} 1067 1068int smu_v13_0_enable_thermal_alert(struct smu_context *smu) 1069{ 1070 if (smu->smu_table.thermal_controller_type) 1071 return amdgpu_irq_get(smu->adev, &smu->irq_source, 0); 1072 1073 return 0; 1074} 1075 1076int smu_v13_0_disable_thermal_alert(struct smu_context *smu) 1077{ 1078 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0); 1079} 1080 1081static uint16_t convert_to_vddc(uint8_t vid) 1082{ 1083 return (uint16_t) ((6200 - (vid * 25)) / SMU13_VOLTAGE_SCALE); 1084} 1085 1086int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) 1087{ 1088 struct amdgpu_device *adev = smu->adev; 1089 uint32_t vdd = 0, val_vid = 0; 1090 1091 if (!value) 1092 return -EINVAL; 1093 val_vid = (RREG32_SOC15(SMUIO, 0, regSMUSVI0_TEL_PLANE0) & 1094 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> 1095 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; 1096 1097 vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid); 1098 1099 *value = vdd; 1100 1101 return 0; 1102 1103} 1104 1105int 1106smu_v13_0_display_clock_voltage_request(struct smu_context *smu, 1107 struct pp_display_clock_request 1108 *clock_req) 1109{ 1110 enum amd_pp_clock_type clk_type = clock_req->clock_type; 1111 int ret = 0; 1112 enum smu_clk_type clk_select = 0; 1113 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; 1114 1115 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) || 1116 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 1117 switch (clk_type) { 1118 case amd_pp_dcef_clock: 1119 clk_select = SMU_DCEFCLK; 1120 break; 1121 case amd_pp_disp_clock: 1122 clk_select = SMU_DISPCLK; 1123 break; 1124 case amd_pp_pixel_clock: 1125 clk_select = SMU_PIXCLK; 1126 break; 1127 case amd_pp_phy_clock: 1128 clk_select = SMU_PHYCLK; 1129 break; 1130 case amd_pp_mem_clock: 1131 clk_select = SMU_UCLK; 1132 break; 1133 default: 1134 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__); 1135 ret = -EINVAL; 1136 break; 1137 } 1138 1139 if (ret) 1140 goto failed; 1141 1142 if (clk_select == SMU_UCLK && smu->disable_uclk_switch) 1143 return 0; 1144 1145 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0); 1146 1147 if(clk_select == SMU_UCLK) 1148 smu->hard_min_uclk_req_from_dal = clk_freq; 1149 } 1150 1151failed: 1152 return ret; 1153} 1154 1155uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu) 1156{ 1157 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1158 return AMD_FAN_CTRL_MANUAL; 1159 else 1160 return AMD_FAN_CTRL_AUTO; 1161} 1162 1163 static int 1164smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) 1165{ 1166 int ret = 0; 1167 1168 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) 1169 return 0; 1170 1171 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); 1172 if (ret) 1173 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!", 1174 __func__, (auto_fan_control ? "Start" : "Stop")); 1175 1176 return ret; 1177} 1178 1179 static int 1180smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) 1181{ 1182 struct amdgpu_device *adev = smu->adev; 1183 1184 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1185 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1186 CG_FDO_CTRL2, TMIN, 0)); 1187 WREG32_SOC15(THM, 0, regCG_FDO_CTRL2, 1188 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL2), 1189 CG_FDO_CTRL2, FDO_PWM_MODE, mode)); 1190 1191 return 0; 1192} 1193 1194int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu, 1195 uint32_t speed) 1196{ 1197 struct amdgpu_device *adev = smu->adev; 1198 uint32_t duty100, duty; 1199 uint64_t tmp64; 1200 1201 speed = MIN(speed, 255); 1202 1203 if (smu_v13_0_auto_fan_control(smu, 0)) 1204 return -EINVAL; 1205 1206 duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL1), 1207 CG_FDO_CTRL1, FMAX_DUTY100); 1208 if (!duty100) 1209 return -EINVAL; 1210 1211 tmp64 = (uint64_t)speed * duty100; 1212 do_div(tmp64, 255); 1213 duty = (uint32_t)tmp64; 1214 1215 WREG32_SOC15(THM, 0, regCG_FDO_CTRL0, 1216 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_FDO_CTRL0), 1217 CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); 1218 1219 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); 1220} 1221 1222 int 1223smu_v13_0_set_fan_control_mode(struct smu_context *smu, 1224 uint32_t mode) 1225{ 1226 int ret = 0; 1227 1228 switch (mode) { 1229 case AMD_FAN_CTRL_NONE: 1230 ret = smu_v13_0_set_fan_speed_pwm(smu, 255); 1231 break; 1232 case AMD_FAN_CTRL_MANUAL: 1233 ret = smu_v13_0_auto_fan_control(smu, 0); 1234 break; 1235 case AMD_FAN_CTRL_AUTO: 1236 ret = smu_v13_0_auto_fan_control(smu, 1); 1237 break; 1238 default: 1239 break; 1240 } 1241 1242 if (ret) { 1243 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__); 1244 return -EINVAL; 1245 } 1246 1247 return ret; 1248} 1249 1250int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, 1251 uint32_t speed) 1252{ 1253 struct amdgpu_device *adev = smu->adev; 1254 uint32_t tach_period, crystal_clock_freq; 1255 int ret; 1256 1257 if (!speed) 1258 return -EINVAL; 1259 1260 ret = smu_v13_0_auto_fan_control(smu, 0); 1261 if (ret) 1262 return ret; 1263 1264 crystal_clock_freq = amdgpu_asic_get_xclk(adev); 1265 tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); 1266 WREG32_SOC15(THM, 0, regCG_TACH_CTRL, 1267 REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL), 1268 CG_TACH_CTRL, TARGET_PERIOD, 1269 tach_period)); 1270 1271 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); 1272} 1273 1274int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, 1275 uint32_t pstate) 1276{ 1277 int ret = 0; 1278 ret = smu_cmn_send_smc_msg_with_param(smu, 1279 SMU_MSG_SetXgmiMode, 1280 pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3, 1281 NULL); 1282 return ret; 1283} 1284 1285static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, 1286 struct amdgpu_irq_src *source, 1287 unsigned tyep, 1288 enum amdgpu_interrupt_state state) 1289{ 1290 struct smu_context *smu = adev->powerplay.pp_handle; 1291 uint32_t low, high; 1292 uint32_t val = 0; 1293 1294 switch (state) { 1295 case AMDGPU_IRQ_STATE_DISABLE: 1296 /* For THM irqs */ 1297 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1298 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1); 1299 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1); 1300 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1301 1302 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, 0); 1303 1304 /* For MP1 SW irqs */ 1305 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1306 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 1307 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1308 1309 break; 1310 case AMDGPU_IRQ_STATE_ENABLE: 1311 /* For THM irqs */ 1312 low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, 1313 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES); 1314 high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, 1315 smu->thermal_range.software_shutdown_temp); 1316 1317 val = RREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL); 1318 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); 1319 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); 1320 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); 1321 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); 1322 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); 1323 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); 1324 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); 1325 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_CTRL, val); 1326 1327 val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); 1328 val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); 1329 val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); 1330 WREG32_SOC15(THM, 0, regTHM_THERMAL_INT_ENA, val); 1331 1332 /* For MP1 SW irqs */ 1333 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 1334 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 1335 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 1336 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 1337 1338 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1339 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 1340 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 1341 1342 break; 1343 default: 1344 break; 1345 } 1346 1347 return 0; 1348} 1349 1350static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu) 1351{ 1352 return smu_cmn_send_smc_msg(smu, 1353 SMU_MSG_ReenableAcDcInterrupt, 1354 NULL); 1355} 1356 1357#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ 1358#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ 1359#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83 1360 1361static int smu_v13_0_irq_process(struct amdgpu_device *adev, 1362 struct amdgpu_irq_src *source, 1363 struct amdgpu_iv_entry *entry) 1364{ 1365 struct smu_context *smu = adev->powerplay.pp_handle; 1366 uint32_t client_id = entry->client_id; 1367 uint32_t src_id = entry->src_id; 1368 /* 1369 * ctxid is used to distinguish different 1370 * events for SMCToHost interrupt. 1371 */ 1372 uint32_t ctxid = entry->src_data[0]; 1373 uint32_t data; 1374 1375 if (client_id == SOC15_IH_CLIENTID_THM) { 1376 switch (src_id) { 1377 case THM_11_0__SRCID__THM_DIG_THERM_L2H: 1378 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1379 /* 1380 * SW CTF just occurred. 1381 * Try to do a graceful shutdown to prevent further damage. 1382 */ 1383 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1384 orderly_poweroff(true); 1385 break; 1386 case THM_11_0__SRCID__THM_DIG_THERM_H2L: 1387 dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); 1388 break; 1389 default: 1390 dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", 1391 src_id); 1392 break; 1393 } 1394 } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) { 1395 dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n"); 1396 /* 1397 * HW CTF just occurred. Shutdown to prevent further damage. 1398 */ 1399 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n"); 1400 orderly_poweroff(true); 1401 } else if (client_id == SOC15_IH_CLIENTID_MP1) { 1402 if (src_id == 0xfe) { 1403 /* ACK SMUToHost interrupt */ 1404 data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 1405 data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1); 1406 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data); 1407 1408 switch (ctxid) { 1409 case 0x3: 1410 dev_dbg(adev->dev, "Switched to AC mode!\n"); 1411 smu_v13_0_ack_ac_dc_interrupt(smu); 1412 break; 1413 case 0x4: 1414 dev_dbg(adev->dev, "Switched to DC mode!\n"); 1415 smu_v13_0_ack_ac_dc_interrupt(smu); 1416 break; 1417 case 0x7: 1418 /* 1419 * Increment the throttle interrupt counter 1420 */ 1421 atomic64_inc(&smu->throttle_int_counter); 1422 1423 if (!atomic_read(&adev->throttling_logging_enabled)) 1424 return 0; 1425 1426 if (__ratelimit(&adev->throttling_logging_rs)) 1427 schedule_work(&smu->throttling_logging_work); 1428 1429 break; 1430 } 1431 } 1432 } 1433 1434 return 0; 1435} 1436 1437static const struct amdgpu_irq_src_funcs smu_v13_0_irq_funcs = 1438{ 1439 .set = smu_v13_0_set_irq_state, 1440 .process = smu_v13_0_irq_process, 1441}; 1442 1443int smu_v13_0_register_irq_handler(struct smu_context *smu) 1444{ 1445 struct amdgpu_device *adev = smu->adev; 1446 struct amdgpu_irq_src *irq_src = &smu->irq_source; 1447 int ret = 0; 1448 1449 irq_src->num_types = 1; 1450 irq_src->funcs = &smu_v13_0_irq_funcs; 1451 1452 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1453 THM_11_0__SRCID__THM_DIG_THERM_L2H, 1454 irq_src); 1455 if (ret) 1456 return ret; 1457 1458 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, 1459 THM_11_0__SRCID__THM_DIG_THERM_H2L, 1460 irq_src); 1461 if (ret) 1462 return ret; 1463 1464 /* Register CTF(GPIO_19) interrupt */ 1465 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO, 1466 SMUIO_11_0__SRCID__SMUIO_GPIO19, 1467 irq_src); 1468 if (ret) 1469 return ret; 1470 1471 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 1472 0xfe, 1473 irq_src); 1474 if (ret) 1475 return ret; 1476 1477 return ret; 1478} 1479 1480int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, 1481 struct pp_smu_nv_clock_table *max_clocks) 1482{ 1483 struct smu_table_context *table_context = &smu->smu_table; 1484 struct smu_13_0_max_sustainable_clocks *sustainable_clocks = NULL; 1485 1486 if (!max_clocks || !table_context->max_sustainable_clocks) 1487 return -EINVAL; 1488 1489 sustainable_clocks = table_context->max_sustainable_clocks; 1490 1491 max_clocks->dcfClockInKhz = 1492 (unsigned int) sustainable_clocks->dcef_clock * 1000; 1493 max_clocks->displayClockInKhz = 1494 (unsigned int) sustainable_clocks->display_clock * 1000; 1495 max_clocks->phyClockInKhz = 1496 (unsigned int) sustainable_clocks->phy_clock * 1000; 1497 max_clocks->pixelClockInKhz = 1498 (unsigned int) sustainable_clocks->pixel_clock * 1000; 1499 max_clocks->uClockInKhz = 1500 (unsigned int) sustainable_clocks->uclock * 1000; 1501 max_clocks->socClockInKhz = 1502 (unsigned int) sustainable_clocks->soc_clock * 1000; 1503 max_clocks->dscClockInKhz = 0; 1504 max_clocks->dppClockInKhz = 0; 1505 max_clocks->fabricClockInKhz = 0; 1506 1507 return 0; 1508} 1509 1510int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) 1511{ 1512 int ret = 0; 1513 1514 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL); 1515 1516 return ret; 1517} 1518 1519static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, 1520 uint64_t event_arg) 1521{ 1522 int ret = 0; 1523 1524 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 1525 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 1526 1527 return ret; 1528} 1529 1530int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 1531 uint64_t event_arg) 1532{ 1533 int ret = -EINVAL; 1534 1535 switch (event) { 1536 case SMU_EVENT_RESET_COMPLETE: 1537 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg); 1538 break; 1539 default: 1540 break; 1541 } 1542 1543 return ret; 1544} 1545 1546int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 1547 uint32_t *min, uint32_t *max) 1548{ 1549 int ret = 0, clk_id = 0; 1550 uint32_t param = 0; 1551 uint32_t clock_limit; 1552 1553 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 1554 switch (clk_type) { 1555 case SMU_MCLK: 1556 case SMU_UCLK: 1557 clock_limit = smu->smu_table.boot_values.uclk; 1558 break; 1559 case SMU_GFXCLK: 1560 case SMU_SCLK: 1561 clock_limit = smu->smu_table.boot_values.gfxclk; 1562 break; 1563 case SMU_SOCCLK: 1564 clock_limit = smu->smu_table.boot_values.socclk; 1565 break; 1566 default: 1567 clock_limit = 0; 1568 break; 1569 } 1570 1571 /* clock in Mhz unit */ 1572 if (min) 1573 *min = clock_limit / 100; 1574 if (max) 1575 *max = clock_limit / 100; 1576 1577 return 0; 1578 } 1579 1580 clk_id = smu_cmn_to_asic_specific_index(smu, 1581 CMN2ASIC_MAPPING_CLK, 1582 clk_type); 1583 if (clk_id < 0) { 1584 ret = -EINVAL; 1585 goto failed; 1586 } 1587 param = (clk_id & 0xffff) << 16; 1588 1589 if (max) { 1590 if (smu->adev->pm.ac_power) 1591 ret = smu_cmn_send_smc_msg_with_param(smu, 1592 SMU_MSG_GetMaxDpmFreq, 1593 param, 1594 max); 1595 else 1596 ret = smu_cmn_send_smc_msg_with_param(smu, 1597 SMU_MSG_GetDcModeMaxDpmFreq, 1598 param, 1599 max); 1600 if (ret) 1601 goto failed; 1602 } 1603 1604 if (min) { 1605 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 1606 if (ret) 1607 goto failed; 1608 } 1609 1610failed: 1611 return ret; 1612} 1613 1614int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, 1615 enum smu_clk_type clk_type, 1616 uint32_t min, 1617 uint32_t max) 1618{ 1619 int ret = 0, clk_id = 0; 1620 uint32_t param; 1621 1622 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1623 return 0; 1624 1625 clk_id = smu_cmn_to_asic_specific_index(smu, 1626 CMN2ASIC_MAPPING_CLK, 1627 clk_type); 1628 if (clk_id < 0) 1629 return clk_id; 1630 1631 if (max > 0) { 1632 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1633 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1634 param, NULL); 1635 if (ret) 1636 goto out; 1637 } 1638 1639 if (min > 0) { 1640 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1641 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1642 param, NULL); 1643 if (ret) 1644 goto out; 1645 } 1646 1647out: 1648 return ret; 1649} 1650 1651int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, 1652 enum smu_clk_type clk_type, 1653 uint32_t min, 1654 uint32_t max) 1655{ 1656 int ret = 0, clk_id = 0; 1657 uint32_t param; 1658 1659 if (min <= 0 && max <= 0) 1660 return -EINVAL; 1661 1662 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1663 return 0; 1664 1665 clk_id = smu_cmn_to_asic_specific_index(smu, 1666 CMN2ASIC_MAPPING_CLK, 1667 clk_type); 1668 if (clk_id < 0) 1669 return clk_id; 1670 1671 if (max > 0) { 1672 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1673 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1674 param, NULL); 1675 if (ret) 1676 return ret; 1677 } 1678 1679 if (min > 0) { 1680 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1682 param, NULL); 1683 if (ret) 1684 return ret; 1685 } 1686 1687 return ret; 1688} 1689 1690int smu_v13_0_set_performance_level(struct smu_context *smu, 1691 enum amd_dpm_forced_level level) 1692{ 1693 struct smu_13_0_dpm_context *dpm_context = 1694 smu->smu_dpm.dpm_context; 1695 struct smu_13_0_dpm_table *gfx_table = 1696 &dpm_context->dpm_tables.gfx_table; 1697 struct smu_13_0_dpm_table *mem_table = 1698 &dpm_context->dpm_tables.uclk_table; 1699 struct smu_13_0_dpm_table *soc_table = 1700 &dpm_context->dpm_tables.soc_table; 1701 struct smu_13_0_dpm_table *vclk_table = 1702 &dpm_context->dpm_tables.vclk_table; 1703 struct smu_13_0_dpm_table *dclk_table = 1704 &dpm_context->dpm_tables.dclk_table; 1705 struct smu_13_0_dpm_table *fclk_table = 1706 &dpm_context->dpm_tables.fclk_table; 1707 struct smu_umd_pstate_table *pstate_table = 1708 &smu->pstate_table; 1709 struct amdgpu_device *adev = smu->adev; 1710 uint32_t sclk_min = 0, sclk_max = 0; 1711 uint32_t mclk_min = 0, mclk_max = 0; 1712 uint32_t socclk_min = 0, socclk_max = 0; 1713 uint32_t vclk_min = 0, vclk_max = 0; 1714 uint32_t dclk_min = 0, dclk_max = 0; 1715 uint32_t fclk_min = 0, fclk_max = 0; 1716 int ret = 0, i; 1717 1718 switch (level) { 1719 case AMD_DPM_FORCED_LEVEL_HIGH: 1720 sclk_min = sclk_max = gfx_table->max; 1721 mclk_min = mclk_max = mem_table->max; 1722 socclk_min = socclk_max = soc_table->max; 1723 vclk_min = vclk_max = vclk_table->max; 1724 dclk_min = dclk_max = dclk_table->max; 1725 fclk_min = fclk_max = fclk_table->max; 1726 break; 1727 case AMD_DPM_FORCED_LEVEL_LOW: 1728 sclk_min = sclk_max = gfx_table->min; 1729 mclk_min = mclk_max = mem_table->min; 1730 socclk_min = socclk_max = soc_table->min; 1731 vclk_min = vclk_max = vclk_table->min; 1732 dclk_min = dclk_max = dclk_table->min; 1733 fclk_min = fclk_max = fclk_table->min; 1734 break; 1735 case AMD_DPM_FORCED_LEVEL_AUTO: 1736 sclk_min = gfx_table->min; 1737 sclk_max = gfx_table->max; 1738 mclk_min = mem_table->min; 1739 mclk_max = mem_table->max; 1740 socclk_min = soc_table->min; 1741 socclk_max = soc_table->max; 1742 vclk_min = vclk_table->min; 1743 vclk_max = vclk_table->max; 1744 dclk_min = dclk_table->min; 1745 dclk_max = dclk_table->max; 1746 fclk_min = fclk_table->min; 1747 fclk_max = fclk_table->max; 1748 break; 1749 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1750 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1751 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1752 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1753 vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1754 dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1755 fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1756 break; 1757 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1758 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1759 break; 1760 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1761 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1762 break; 1763 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1764 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1765 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1766 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1767 vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1768 dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1769 fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1770 break; 1771 case AMD_DPM_FORCED_LEVEL_MANUAL: 1772 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1773 return 0; 1774 default: 1775 dev_err(adev->dev, "Invalid performance level %d\n", level); 1776 return -EINVAL; 1777 } 1778 1779 /* 1780 * Unset those settings for SMU 13.0.2. As soft limits settings 1781 * for those clock domains are not supported. 1782 */ 1783 if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) { 1784 mclk_min = mclk_max = 0; 1785 socclk_min = socclk_max = 0; 1786 vclk_min = vclk_max = 0; 1787 dclk_min = dclk_max = 0; 1788 fclk_min = fclk_max = 0; 1789 } 1790 1791 if (sclk_min && sclk_max) { 1792 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1793 SMU_GFXCLK, 1794 sclk_min, 1795 sclk_max); 1796 if (ret) 1797 return ret; 1798 1799 pstate_table->gfxclk_pstate.curr.min = sclk_min; 1800 pstate_table->gfxclk_pstate.curr.max = sclk_max; 1801 } 1802 1803 if (mclk_min && mclk_max) { 1804 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1805 SMU_MCLK, 1806 mclk_min, 1807 mclk_max); 1808 if (ret) 1809 return ret; 1810 1811 pstate_table->uclk_pstate.curr.min = mclk_min; 1812 pstate_table->uclk_pstate.curr.max = mclk_max; 1813 } 1814 1815 if (socclk_min && socclk_max) { 1816 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1817 SMU_SOCCLK, 1818 socclk_min, 1819 socclk_max); 1820 if (ret) 1821 return ret; 1822 1823 pstate_table->socclk_pstate.curr.min = socclk_min; 1824 pstate_table->socclk_pstate.curr.max = socclk_max; 1825 } 1826 1827 if (vclk_min && vclk_max) { 1828 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1829 if (adev->vcn.harvest_config & (1 << i)) 1830 continue; 1831 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1832 i ? SMU_VCLK1 : SMU_VCLK, 1833 vclk_min, 1834 vclk_max); 1835 if (ret) 1836 return ret; 1837 } 1838 pstate_table->vclk_pstate.curr.min = vclk_min; 1839 pstate_table->vclk_pstate.curr.max = vclk_max; 1840 } 1841 1842 if (dclk_min && dclk_max) { 1843 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1844 if (adev->vcn.harvest_config & (1 << i)) 1845 continue; 1846 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1847 i ? SMU_DCLK1 : SMU_DCLK, 1848 dclk_min, 1849 dclk_max); 1850 if (ret) 1851 return ret; 1852 } 1853 pstate_table->dclk_pstate.curr.min = dclk_min; 1854 pstate_table->dclk_pstate.curr.max = dclk_max; 1855 } 1856 1857 if (fclk_min && fclk_max) { 1858 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1859 SMU_FCLK, 1860 fclk_min, 1861 fclk_max); 1862 if (ret) 1863 return ret; 1864 1865 pstate_table->fclk_pstate.curr.min = fclk_min; 1866 pstate_table->fclk_pstate.curr.max = fclk_max; 1867 } 1868 1869 return ret; 1870} 1871 1872int smu_v13_0_set_power_source(struct smu_context *smu, 1873 enum smu_power_src_type power_src) 1874{ 1875 int pwr_source; 1876 1877 pwr_source = smu_cmn_to_asic_specific_index(smu, 1878 CMN2ASIC_MAPPING_PWR, 1879 (uint32_t)power_src); 1880 if (pwr_source < 0) 1881 return -EINVAL; 1882 1883 return smu_cmn_send_smc_msg_with_param(smu, 1884 SMU_MSG_NotifyPowerSource, 1885 pwr_source, 1886 NULL); 1887} 1888 1889static int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, 1890 enum smu_clk_type clk_type, 1891 uint16_t level, 1892 uint32_t *value) 1893{ 1894 int ret = 0, clk_id = 0; 1895 uint32_t param; 1896 1897 if (!value) 1898 return -EINVAL; 1899 1900 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1901 return 0; 1902 1903 clk_id = smu_cmn_to_asic_specific_index(smu, 1904 CMN2ASIC_MAPPING_CLK, 1905 clk_type); 1906 if (clk_id < 0) 1907 return clk_id; 1908 1909 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1910 1911 ret = smu_cmn_send_smc_msg_with_param(smu, 1912 SMU_MSG_GetDpmFreqByIndex, 1913 param, 1914 value); 1915 if (ret) 1916 return ret; 1917 1918 *value = *value & 0x7fffffff; 1919 1920 return ret; 1921} 1922 1923static int smu_v13_0_get_dpm_level_count(struct smu_context *smu, 1924 enum smu_clk_type clk_type, 1925 uint32_t *value) 1926{ 1927 int ret; 1928 1929 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1930 /* SMU v13.0.2 FW returns 0 based max level, increment by one for it */ 1931 if((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) && (!ret && value)) 1932 ++(*value); 1933 1934 return ret; 1935} 1936 1937static int smu_v13_0_get_fine_grained_status(struct smu_context *smu, 1938 enum smu_clk_type clk_type, 1939 bool *is_fine_grained_dpm) 1940{ 1941 int ret = 0, clk_id = 0; 1942 uint32_t param; 1943 uint32_t value; 1944 1945 if (!is_fine_grained_dpm) 1946 return -EINVAL; 1947 1948 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1949 return 0; 1950 1951 clk_id = smu_cmn_to_asic_specific_index(smu, 1952 CMN2ASIC_MAPPING_CLK, 1953 clk_type); 1954 if (clk_id < 0) 1955 return clk_id; 1956 1957 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1958 1959 ret = smu_cmn_send_smc_msg_with_param(smu, 1960 SMU_MSG_GetDpmFreqByIndex, 1961 param, 1962 &value); 1963 if (ret) 1964 return ret; 1965 1966 /* 1967 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 1968 * now, we un-support it 1969 */ 1970 *is_fine_grained_dpm = value & 0x80000000; 1971 1972 return 0; 1973} 1974 1975int smu_v13_0_set_single_dpm_table(struct smu_context *smu, 1976 enum smu_clk_type clk_type, 1977 struct smu_13_0_dpm_table *single_dpm_table) 1978{ 1979 int ret = 0; 1980 uint32_t clk; 1981 int i; 1982 1983 ret = smu_v13_0_get_dpm_level_count(smu, 1984 clk_type, 1985 &single_dpm_table->count); 1986 if (ret) { 1987 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 1988 return ret; 1989 } 1990 1991 if (smu->adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) { 1992 ret = smu_v13_0_get_fine_grained_status(smu, 1993 clk_type, 1994 &single_dpm_table->is_fine_grained); 1995 if (ret) { 1996 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 1997 return ret; 1998 } 1999 } 2000 2001 for (i = 0; i < single_dpm_table->count; i++) { 2002 ret = smu_v13_0_get_dpm_freq_by_index(smu, 2003 clk_type, 2004 i, 2005 &clk); 2006 if (ret) { 2007 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 2008 return ret; 2009 } 2010 2011 single_dpm_table->dpm_levels[i].value = clk; 2012 single_dpm_table->dpm_levels[i].enabled = true; 2013 2014 if (i == 0) 2015 single_dpm_table->min = clk; 2016 else if (i == single_dpm_table->count - 1) 2017 single_dpm_table->max = clk; 2018 } 2019 2020 return 0; 2021} 2022 2023int smu_v13_0_get_dpm_level_range(struct smu_context *smu, 2024 enum smu_clk_type clk_type, 2025 uint32_t *min_value, 2026 uint32_t *max_value) 2027{ 2028 uint32_t level_count = 0; 2029 int ret = 0; 2030 2031 if (!min_value && !max_value) 2032 return -EINVAL; 2033 2034 if (min_value) { 2035 /* by default, level 0 clock value as min value */ 2036 ret = smu_v13_0_get_dpm_freq_by_index(smu, 2037 clk_type, 2038 0, 2039 min_value); 2040 if (ret) 2041 return ret; 2042 } 2043 2044 if (max_value) { 2045 ret = smu_v13_0_get_dpm_level_count(smu, 2046 clk_type, 2047 &level_count); 2048 if (ret) 2049 return ret; 2050 2051 ret = smu_v13_0_get_dpm_freq_by_index(smu, 2052 clk_type, 2053 level_count - 1, 2054 max_value); 2055 if (ret) 2056 return ret; 2057 } 2058 2059 return ret; 2060} 2061 2062int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu) 2063{ 2064 struct amdgpu_device *adev = smu->adev; 2065 2066 return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 2067 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 2068 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 2069} 2070 2071int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu) 2072{ 2073 uint32_t width_level; 2074 2075 width_level = smu_v13_0_get_current_pcie_link_width_level(smu); 2076 if (width_level > LINK_WIDTH_MAX) 2077 width_level = 0; 2078 2079 return link_width[width_level]; 2080} 2081 2082int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu) 2083{ 2084 struct amdgpu_device *adev = smu->adev; 2085 2086 return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 2087 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 2088 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 2089} 2090 2091int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu) 2092{ 2093 uint32_t speed_level; 2094 2095 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu); 2096 if (speed_level > LINK_SPEED_MAX) 2097 speed_level = 0; 2098 2099 return link_speed[speed_level]; 2100} 2101 2102int smu_v13_0_set_vcn_enable(struct smu_context *smu, 2103 bool enable) 2104{ 2105 struct amdgpu_device *adev = smu->adev; 2106 int i, ret = 0; 2107 2108 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 2109 if (adev->vcn.harvest_config & (1 << i)) 2110 continue; 2111 2112 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 2113 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 2114 i << 16U, NULL); 2115 if (ret) 2116 return ret; 2117 } 2118 2119 return ret; 2120} 2121 2122int smu_v13_0_set_jpeg_enable(struct smu_context *smu, 2123 bool enable) 2124{ 2125 return smu_cmn_send_smc_msg_with_param(smu, enable ? 2126 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 2127 0, NULL); 2128} 2129 2130int smu_v13_0_run_btc(struct smu_context *smu) 2131{ 2132 int res; 2133 2134 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 2135 if (res) 2136 dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 2137 2138 return res; 2139} 2140 2141int smu_v13_0_deep_sleep_control(struct smu_context *smu, 2142 bool enablement) 2143{ 2144 struct amdgpu_device *adev = smu->adev; 2145 int ret = 0; 2146 2147 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 2148 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 2149 if (ret) { 2150 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 2151 return ret; 2152 } 2153 } 2154 2155 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 2156 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 2157 if (ret) { 2158 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 2159 return ret; 2160 } 2161 } 2162 2163 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 2164 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 2165 if (ret) { 2166 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 2167 return ret; 2168 } 2169 } 2170 2171 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 2172 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 2173 if (ret) { 2174 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 2175 return ret; 2176 } 2177 } 2178 2179 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 2180 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 2181 if (ret) { 2182 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 2183 return ret; 2184 } 2185 } 2186 2187 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 2188 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 2189 if (ret) { 2190 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 2191 return ret; 2192 } 2193 } 2194 2195 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 2196 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 2197 if (ret) { 2198 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 2199 return ret; 2200 } 2201 } 2202 2203 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 2204 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 2205 if (ret) { 2206 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 2207 return ret; 2208 } 2209 } 2210 2211 return ret; 2212} 2213 2214int smu_v13_0_gfx_ulv_control(struct smu_context *smu, 2215 bool enablement) 2216{ 2217 int ret = 0; 2218 2219 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 2220 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 2221 2222 return ret; 2223} 2224 2225bool smu_v13_0_baco_is_support(struct smu_context *smu) 2226{ 2227 struct smu_baco_context *smu_baco = &smu->smu_baco; 2228 2229 if (amdgpu_sriov_vf(smu->adev) || 2230 !smu_baco->platform_support) 2231 return false; 2232 2233 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 2234 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 2235 return false; 2236 2237 return true; 2238} 2239 2240enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu) 2241{ 2242 struct smu_baco_context *smu_baco = &smu->smu_baco; 2243 2244 return smu_baco->state; 2245} 2246 2247int smu_v13_0_baco_set_state(struct smu_context *smu, 2248 enum smu_baco_state state) 2249{ 2250 struct smu_baco_context *smu_baco = &smu->smu_baco; 2251 struct amdgpu_device *adev = smu->adev; 2252 int ret = 0; 2253 2254 if (smu_v13_0_baco_get_state(smu) == state) 2255 return 0; 2256 2257 if (state == SMU_BACO_STATE_ENTER) { 2258 ret = smu_cmn_send_smc_msg_with_param(smu, 2259 SMU_MSG_EnterBaco, 2260 0, 2261 NULL); 2262 } else { 2263 ret = smu_cmn_send_smc_msg(smu, 2264 SMU_MSG_ExitBaco, 2265 NULL); 2266 if (ret) 2267 return ret; 2268 2269 /* clear vbios scratch 6 and 7 for coming asic reinit */ 2270 WREG32(adev->bios_scratch_reg_offset + 6, 0); 2271 WREG32(adev->bios_scratch_reg_offset + 7, 0); 2272 } 2273 2274 if (!ret) 2275 smu_baco->state = state; 2276 2277 return ret; 2278} 2279 2280int smu_v13_0_baco_enter(struct smu_context *smu) 2281{ 2282 int ret = 0; 2283 2284 ret = smu_v13_0_baco_set_state(smu, 2285 SMU_BACO_STATE_ENTER); 2286 if (ret) 2287 return ret; 2288 2289 msleep(10); 2290 2291 return ret; 2292} 2293 2294int smu_v13_0_baco_exit(struct smu_context *smu) 2295{ 2296 return smu_v13_0_baco_set_state(smu, 2297 SMU_BACO_STATE_EXIT); 2298} 2299 2300int smu_v13_0_od_edit_dpm_table(struct smu_context *smu, 2301 enum PP_OD_DPM_TABLE_COMMAND type, 2302 long input[], uint32_t size) 2303{ 2304 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 2305 int ret = 0; 2306 2307 /* Only allowed in manual mode */ 2308 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 2309 return -EINVAL; 2310 2311 switch (type) { 2312 case PP_OD_EDIT_SCLK_VDDC_TABLE: 2313 if (size != 2) { 2314 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2315 return -EINVAL; 2316 } 2317 2318 if (input[0] == 0) { 2319 if (input[1] < smu->gfx_default_hard_min_freq) { 2320 dev_warn(smu->adev->dev, 2321 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 2322 input[1], smu->gfx_default_hard_min_freq); 2323 return -EINVAL; 2324 } 2325 smu->gfx_actual_hard_min_freq = input[1]; 2326 } else if (input[0] == 1) { 2327 if (input[1] > smu->gfx_default_soft_max_freq) { 2328 dev_warn(smu->adev->dev, 2329 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 2330 input[1], smu->gfx_default_soft_max_freq); 2331 return -EINVAL; 2332 } 2333 smu->gfx_actual_soft_max_freq = input[1]; 2334 } else { 2335 return -EINVAL; 2336 } 2337 break; 2338 case PP_OD_RESTORE_DEFAULT_TABLE: 2339 if (size != 0) { 2340 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2341 return -EINVAL; 2342 } 2343 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 2344 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 2345 break; 2346 case PP_OD_COMMIT_DPM_TABLE: 2347 if (size != 0) { 2348 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 2349 return -EINVAL; 2350 } 2351 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 2352 dev_err(smu->adev->dev, 2353 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 2354 smu->gfx_actual_hard_min_freq, 2355 smu->gfx_actual_soft_max_freq); 2356 return -EINVAL; 2357 } 2358 2359 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 2360 smu->gfx_actual_hard_min_freq, 2361 NULL); 2362 if (ret) { 2363 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 2364 return ret; 2365 } 2366 2367 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 2368 smu->gfx_actual_soft_max_freq, 2369 NULL); 2370 if (ret) { 2371 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 2372 return ret; 2373 } 2374 break; 2375 default: 2376 return -ENOSYS; 2377 } 2378 2379 return ret; 2380} 2381 2382int smu_v13_0_set_default_dpm_tables(struct smu_context *smu) 2383{ 2384 struct smu_table_context *smu_table = &smu->smu_table; 2385 2386 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 2387 smu_table->clocks_table, false); 2388}