amdgpu_dm_helpers.c (25382B)
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26#include <linux/string.h> 27#include <linux/acpi.h> 28#include <linux/i2c.h> 29 30#include <drm/drm_probe_helper.h> 31#include <drm/amdgpu_drm.h> 32#include <drm/drm_edid.h> 33 34#include "dm_services.h" 35#include "amdgpu.h" 36#include "dc.h" 37#include "amdgpu_dm.h" 38#include "amdgpu_dm_irq.h" 39#include "amdgpu_dm_mst_types.h" 40 41#include "dm_helpers.h" 42#include "ddc_service_types.h" 43 44struct monitor_patch_info { 45 unsigned int manufacturer_id; 46 unsigned int product_id; 47 void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param); 48 unsigned int patch_param; 49}; 50static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param); 51 52static const struct monitor_patch_info monitor_patch_table[] = { 53{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15}, 54{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15}, 55}; 56 57static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param) 58{ 59 if (edid_caps) 60 edid_caps->panel_patch.max_dsc_target_bpp_limit = param; 61} 62 63static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps) 64{ 65 int i, ret = 0; 66 67 for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++) 68 if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id) 69 && (edid_caps->product_id == monitor_patch_table[i].product_id)) { 70 monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param); 71 ret++; 72 } 73 74 return ret; 75} 76 77/* dm_helpers_parse_edid_caps 78 * 79 * Parse edid caps 80 * 81 * @edid: [in] pointer to edid 82 * edid_caps: [in] pointer to edid caps 83 * @return 84 * void 85 * */ 86enum dc_edid_status dm_helpers_parse_edid_caps( 87 struct dc_link *link, 88 const struct dc_edid *edid, 89 struct dc_edid_caps *edid_caps) 90{ 91 struct amdgpu_dm_connector *aconnector = link->priv; 92 struct drm_connector *connector = &aconnector->base; 93 struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; 94 struct cea_sad *sads; 95 int sad_count = -1; 96 int sadb_count = -1; 97 int i = 0; 98 uint8_t *sadb = NULL; 99 100 enum dc_edid_status result = EDID_OK; 101 102 if (!edid_caps || !edid) 103 return EDID_BAD_INPUT; 104 105 if (!drm_edid_is_valid(edid_buf)) 106 result = EDID_BAD_CHECKSUM; 107 108 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | 109 ((uint16_t) edid_buf->mfg_id[1])<<8; 110 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | 111 ((uint16_t) edid_buf->prod_code[1])<<8; 112 edid_caps->serial_number = edid_buf->serial; 113 edid_caps->manufacture_week = edid_buf->mfg_week; 114 edid_caps->manufacture_year = edid_buf->mfg_year; 115 116 drm_edid_get_monitor_name(edid_buf, 117 edid_caps->display_name, 118 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); 119 120 edid_caps->edid_hdmi = connector->display_info.is_hdmi; 121 122 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); 123 if (sad_count <= 0) 124 return result; 125 126 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; 127 for (i = 0; i < edid_caps->audio_mode_count; ++i) { 128 struct cea_sad *sad = &sads[i]; 129 130 edid_caps->audio_modes[i].format_code = sad->format; 131 edid_caps->audio_modes[i].channel_count = sad->channels + 1; 132 edid_caps->audio_modes[i].sample_rate = sad->freq; 133 edid_caps->audio_modes[i].sample_size = sad->byte2; 134 } 135 136 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); 137 138 if (sadb_count < 0) { 139 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); 140 sadb_count = 0; 141 } 142 143 if (sadb_count) 144 edid_caps->speaker_flags = sadb[0]; 145 else 146 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; 147 148 kfree(sads); 149 kfree(sadb); 150 151 amdgpu_dm_patch_edid_caps(edid_caps); 152 153 return result; 154} 155 156static void get_payload_table( 157 struct amdgpu_dm_connector *aconnector, 158 struct dp_mst_stream_allocation_table *proposed_table) 159{ 160 int i; 161 struct drm_dp_mst_topology_mgr *mst_mgr = 162 &aconnector->mst_port->mst_mgr; 163 164 mutex_lock(&mst_mgr->payload_lock); 165 166 proposed_table->stream_count = 0; 167 168 /* number of active streams */ 169 for (i = 0; i < mst_mgr->max_payloads; i++) { 170 if (mst_mgr->payloads[i].num_slots == 0) 171 break; /* end of vcp_id table */ 172 173 ASSERT(mst_mgr->payloads[i].payload_state != 174 DP_PAYLOAD_DELETE_LOCAL); 175 176 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || 177 mst_mgr->payloads[i].payload_state == 178 DP_PAYLOAD_REMOTE) { 179 180 struct dp_mst_stream_allocation *sa = 181 &proposed_table->stream_allocations[ 182 proposed_table->stream_count]; 183 184 sa->slot_count = mst_mgr->payloads[i].num_slots; 185 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; 186 proposed_table->stream_count++; 187 } 188 } 189 190 mutex_unlock(&mst_mgr->payload_lock); 191} 192 193void dm_helpers_dp_update_branch_info( 194 struct dc_context *ctx, 195 const struct dc_link *link) 196{} 197 198/* 199 * Writes payload allocation table in immediate downstream device. 200 */ 201bool dm_helpers_dp_mst_write_payload_allocation_table( 202 struct dc_context *ctx, 203 const struct dc_stream_state *stream, 204 struct dp_mst_stream_allocation_table *proposed_table, 205 bool enable) 206{ 207 struct amdgpu_dm_connector *aconnector; 208 struct dm_connector_state *dm_conn_state; 209 struct drm_dp_mst_topology_mgr *mst_mgr; 210 struct drm_dp_mst_port *mst_port; 211 bool ret; 212 u8 link_coding_cap = DP_8b_10b_ENCODING; 213 214 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 215 /* Accessing the connector state is required for vcpi_slots allocation 216 * and directly relies on behaviour in commit check 217 * that blocks before commit guaranteeing that the state 218 * is not gonna be swapped while still in use in commit tail */ 219 220 if (!aconnector || !aconnector->mst_port) 221 return false; 222 223 dm_conn_state = to_dm_connector_state(aconnector->base.state); 224 225 mst_mgr = &aconnector->mst_port->mst_mgr; 226 227 if (!mst_mgr->mst_state) 228 return false; 229 230 mst_port = aconnector->port; 231 232#if defined(CONFIG_DRM_AMD_DC_DCN) 233 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); 234#endif 235 236 if (enable) { 237 238 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, 239 dm_conn_state->pbn, 240 dm_conn_state->vcpi_slots); 241 if (!ret) 242 return false; 243 244 } else { 245 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 246 } 247 248 /* It's OK for this to fail */ 249 drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); 250 251 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 252 * AUX message. The sequence is slot 1-63 allocated sequence for each 253 * stream. AMD ASIC stream slot allocation should follow the same 254 * sequence. copy DRM MST allocation to dc */ 255 256 get_payload_table(aconnector, proposed_table); 257 258 return true; 259} 260 261/* 262 * poll pending down reply 263 */ 264void dm_helpers_dp_mst_poll_pending_down_reply( 265 struct dc_context *ctx, 266 const struct dc_link *link) 267{} 268 269/* 270 * Clear payload allocation table before enable MST DP link. 271 */ 272void dm_helpers_dp_mst_clear_payload_allocation_table( 273 struct dc_context *ctx, 274 const struct dc_link *link) 275{} 276 277/* 278 * Polls for ACT (allocation change trigger) handled and sends 279 * ALLOCATE_PAYLOAD message. 280 */ 281enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( 282 struct dc_context *ctx, 283 const struct dc_stream_state *stream) 284{ 285 struct amdgpu_dm_connector *aconnector; 286 struct drm_dp_mst_topology_mgr *mst_mgr; 287 int ret; 288 289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 290 291 if (!aconnector || !aconnector->mst_port) 292 return ACT_FAILED; 293 294 mst_mgr = &aconnector->mst_port->mst_mgr; 295 296 if (!mst_mgr->mst_state) 297 return ACT_FAILED; 298 299 ret = drm_dp_check_act_status(mst_mgr); 300 301 if (ret) 302 return ACT_FAILED; 303 304 return ACT_SUCCESS; 305} 306 307bool dm_helpers_dp_mst_send_payload_allocation( 308 struct dc_context *ctx, 309 const struct dc_stream_state *stream, 310 bool enable) 311{ 312 struct amdgpu_dm_connector *aconnector; 313 struct drm_dp_mst_topology_mgr *mst_mgr; 314 struct drm_dp_mst_port *mst_port; 315 316 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 317 318 if (!aconnector || !aconnector->mst_port) 319 return false; 320 321 mst_port = aconnector->port; 322 323 mst_mgr = &aconnector->mst_port->mst_mgr; 324 325 if (!mst_mgr->mst_state) 326 return false; 327 328 /* It's OK for this to fail */ 329 drm_dp_update_payload_part2(mst_mgr); 330 331 if (!enable) 332 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); 333 334 return true; 335} 336 337void dm_dtn_log_begin(struct dc_context *ctx, 338 struct dc_log_buffer_ctx *log_ctx) 339{ 340 static const char msg[] = "[dtn begin]\n"; 341 342 if (!log_ctx) { 343 pr_info("%s", msg); 344 return; 345 } 346 347 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 348} 349 350__printf(3, 4) 351void dm_dtn_log_append_v(struct dc_context *ctx, 352 struct dc_log_buffer_ctx *log_ctx, 353 const char *msg, ...) 354{ 355 va_list args; 356 size_t total; 357 int n; 358 359 if (!log_ctx) { 360 /* No context, redirect to dmesg. */ 361 struct va_format vaf; 362 363 vaf.fmt = msg; 364 vaf.va = &args; 365 366 va_start(args, msg); 367 pr_info("%pV", &vaf); 368 va_end(args); 369 370 return; 371 } 372 373 /* Measure the output. */ 374 va_start(args, msg); 375 n = vsnprintf(NULL, 0, msg, args); 376 va_end(args); 377 378 if (n <= 0) 379 return; 380 381 /* Reallocate the string buffer as needed. */ 382 total = log_ctx->pos + n + 1; 383 384 if (total > log_ctx->size) { 385 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL); 386 387 if (buf) { 388 memcpy(buf, log_ctx->buf, log_ctx->pos); 389 kfree(log_ctx->buf); 390 391 log_ctx->buf = buf; 392 log_ctx->size = total; 393 } 394 } 395 396 if (!log_ctx->buf) 397 return; 398 399 /* Write the formatted string to the log buffer. */ 400 va_start(args, msg); 401 n = vscnprintf( 402 log_ctx->buf + log_ctx->pos, 403 log_ctx->size - log_ctx->pos, 404 msg, 405 args); 406 va_end(args); 407 408 if (n > 0) 409 log_ctx->pos += n; 410} 411 412void dm_dtn_log_end(struct dc_context *ctx, 413 struct dc_log_buffer_ctx *log_ctx) 414{ 415 static const char msg[] = "[dtn end]\n"; 416 417 if (!log_ctx) { 418 pr_info("%s", msg); 419 return; 420 } 421 422 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); 423} 424 425bool dm_helpers_dp_mst_start_top_mgr( 426 struct dc_context *ctx, 427 const struct dc_link *link, 428 bool boot) 429{ 430 struct amdgpu_dm_connector *aconnector = link->priv; 431 432 if (!aconnector) { 433 DRM_ERROR("Failed to find connector for link!"); 434 return false; 435 } 436 437 if (boot) { 438 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", 439 aconnector, aconnector->base.base.id); 440 return true; 441 } 442 443 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", 444 aconnector, aconnector->base.base.id); 445 446 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); 447} 448 449bool dm_helpers_dp_mst_stop_top_mgr( 450 struct dc_context *ctx, 451 struct dc_link *link) 452{ 453 struct amdgpu_dm_connector *aconnector = link->priv; 454 uint8_t i; 455 456 if (!aconnector) { 457 DRM_ERROR("Failed to find connector for link!"); 458 return false; 459 } 460 461 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", 462 aconnector, aconnector->base.base.id); 463 464 if (aconnector->mst_mgr.mst_state == true) { 465 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); 466 467 for (i = 0; i < MAX_SINKS_PER_LINK; i++) { 468 if (link->remote_sinks[i] == NULL) 469 continue; 470 471 if (link->remote_sinks[i]->sink_signal == 472 SIGNAL_TYPE_DISPLAY_PORT_MST) { 473 dc_link_remove_remote_sink(link, link->remote_sinks[i]); 474 475 if (aconnector->dc_sink) { 476 dc_sink_release(aconnector->dc_sink); 477 aconnector->dc_sink = NULL; 478 aconnector->dc_link->cur_link_settings.lane_count = 0; 479 } 480 } 481 } 482 } 483 484 return false; 485} 486 487bool dm_helpers_dp_read_dpcd( 488 struct dc_context *ctx, 489 const struct dc_link *link, 490 uint32_t address, 491 uint8_t *data, 492 uint32_t size) 493{ 494 495 struct amdgpu_dm_connector *aconnector = link->priv; 496 497 if (!aconnector) { 498 DC_LOG_DC("Failed to find connector for link!\n"); 499 return false; 500 } 501 502 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, 503 data, size) > 0; 504} 505 506bool dm_helpers_dp_write_dpcd( 507 struct dc_context *ctx, 508 const struct dc_link *link, 509 uint32_t address, 510 const uint8_t *data, 511 uint32_t size) 512{ 513 struct amdgpu_dm_connector *aconnector = link->priv; 514 515 if (!aconnector) { 516 DRM_ERROR("Failed to find connector for link!"); 517 return false; 518 } 519 520 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, 521 address, (uint8_t *)data, size) > 0; 522} 523 524bool dm_helpers_submit_i2c( 525 struct dc_context *ctx, 526 const struct dc_link *link, 527 struct i2c_command *cmd) 528{ 529 struct amdgpu_dm_connector *aconnector = link->priv; 530 struct i2c_msg *msgs; 531 int i = 0; 532 int num = cmd->number_of_payloads; 533 bool result; 534 535 if (!aconnector) { 536 DRM_ERROR("Failed to find connector for link!"); 537 return false; 538 } 539 540 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); 541 542 if (!msgs) 543 return false; 544 545 for (i = 0; i < num; i++) { 546 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; 547 msgs[i].addr = cmd->payloads[i].address; 548 msgs[i].len = cmd->payloads[i].length; 549 msgs[i].buf = cmd->payloads[i].data; 550 } 551 552 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; 553 554 kfree(msgs); 555 556 return result; 557} 558 559#if defined(CONFIG_DRM_AMD_DC_DCN) 560static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, 561 bool is_write_cmd, 562 unsigned char cmd, 563 unsigned int length, 564 unsigned int offset, 565 unsigned char *data) 566{ 567 bool success = false; 568 unsigned char rc_data[16] = {0}; 569 unsigned char rc_offset[4] = {0}; 570 unsigned char rc_length[2] = {0}; 571 unsigned char rc_cmd = 0; 572 unsigned char rc_result = 0xFF; 573 unsigned char i = 0; 574 uint8_t ret = 0; 575 576 if (is_write_cmd) { 577 // write rc data 578 memmove(rc_data, data, length); 579 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); 580 } 581 582 // write rc offset 583 rc_offset[0] = (unsigned char) offset & 0xFF; 584 rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; 585 rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; 586 rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; 587 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); 588 589 // write rc length 590 rc_length[0] = (unsigned char) length & 0xFF; 591 rc_length[1] = (unsigned char) (length >> 8) & 0xFF; 592 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); 593 594 // write rc cmd 595 rc_cmd = cmd | 0x80; 596 ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 597 598 if (ret < 0) { 599 DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret); 600 return false; 601 } 602 603 // poll until active is 0 604 for (i = 0; i < 10; i++) { 605 drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); 606 if (rc_cmd == cmd) 607 // active is 0 608 break; 609 msleep(10); 610 } 611 612 // read rc result 613 drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); 614 success = (rc_result == 0); 615 616 if (success && !is_write_cmd) { 617 // read rc data 618 drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); 619 } 620 621 DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success); 622 623 return success; 624} 625 626static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) 627{ 628 unsigned char data[16] = {0}; 629 630 DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); 631 632 // Step 2 633 data[0] = 'P'; 634 data[1] = 'R'; 635 data[2] = 'I'; 636 data[3] = 'U'; 637 data[4] = 'S'; 638 639 if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) 640 return; 641 642 // Step 3 and 4 643 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 644 return; 645 646 data[0] &= (~(1 << 1)); // set bit 1 to 0 647 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 648 return; 649 650 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 651 return; 652 653 data[0] &= (~(1 << 1)); // set bit 1 to 0 654 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) 655 return; 656 657 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 658 return; 659 660 data[0] &= (~(1 << 1)); // set bit 1 to 0 661 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 662 return; 663 664 // Step 3 and 5 665 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) 666 return; 667 668 data[0] |= (1 << 1); // set bit 1 to 1 669 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) 670 return; 671 672 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) 673 return; 674 675 data[0] |= (1 << 1); // set bit 1 to 1 676 return; 677 678 if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) 679 return; 680 681 data[0] |= (1 << 1); // set bit 1 to 1 682 if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) 683 return; 684 685 // Step 6 686 if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) 687 return; 688 689 DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); 690} 691 692static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( 693 struct drm_dp_aux *aux, 694 const struct dc_stream_state *stream, 695 bool enable) 696{ 697 uint8_t ret = 0; 698 699 DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); 700 701 if (enable) { 702 /* When DSC is enabled on previous boot and reboot with the hub, 703 * there is a chance that Synaptics hub gets stuck during reboot sequence. 704 * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream 705 */ 706 if (!stream->link->link_status.link_active && 707 memcmp(stream->link->dpcd_caps.branch_dev_name, 708 (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) 709 apply_synaptics_fifo_reset_wa(aux); 710 711 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 712 DRM_INFO("Send DSC enable to synaptics\n"); 713 714 } else { 715 /* Synaptics hub not support virtual dpcd, 716 * external monitor occur garbage while disable DSC, 717 * Disable DSC only when entire link status turn to false, 718 */ 719 if (!stream->link->link_status.link_active) { 720 ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); 721 DRM_INFO("Send DSC disable to synaptics\n"); 722 } 723 } 724 725 return ret; 726} 727#endif 728 729bool dm_helpers_dp_write_dsc_enable( 730 struct dc_context *ctx, 731 const struct dc_stream_state *stream, 732 bool enable) 733{ 734 uint8_t enable_dsc = enable ? 1 : 0; 735 struct amdgpu_dm_connector *aconnector; 736 uint8_t ret = 0; 737 738 if (!stream) 739 return false; 740 741 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 742 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 743 744 if (!aconnector->dsc_aux) 745 return false; 746 747#if defined(CONFIG_DRM_AMD_DC_DCN) 748 // apply w/a to synaptics 749 if (needs_dsc_aux_workaround(aconnector->dc_link) && 750 (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) 751 return write_dsc_enable_synaptics_non_virtual_dpcd_mst( 752 aconnector->dsc_aux, stream, enable_dsc); 753#endif 754 755 ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); 756 DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable"); 757 } 758 759 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { 760#if defined(CONFIG_DRM_AMD_DC_DCN) 761 if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { 762#endif 763 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 764 DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); 765#if defined(CONFIG_DRM_AMD_DC_DCN) 766 } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 767 ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); 768 DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); 769 } 770#endif 771 } 772 773 return (ret > 0); 774} 775 776bool dm_helpers_is_dp_sink_present(struct dc_link *link) 777{ 778 bool dp_sink_present; 779 struct amdgpu_dm_connector *aconnector = link->priv; 780 781 if (!aconnector) { 782 BUG_ON("Failed to find connector for link!"); 783 return true; 784 } 785 786 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); 787 dp_sink_present = dc_link_is_dp_sink_present(link); 788 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); 789 return dp_sink_present; 790} 791 792enum dc_edid_status dm_helpers_read_local_edid( 793 struct dc_context *ctx, 794 struct dc_link *link, 795 struct dc_sink *sink) 796{ 797 struct amdgpu_dm_connector *aconnector = link->priv; 798 struct drm_connector *connector = &aconnector->base; 799 struct i2c_adapter *ddc; 800 int retry = 3; 801 enum dc_edid_status edid_status; 802 struct edid *edid; 803 804 if (link->aux_mode) 805 ddc = &aconnector->dm_dp_aux.aux.ddc; 806 else 807 ddc = &aconnector->i2c->base; 808 809 /* some dongles read edid incorrectly the first time, 810 * do check sum and retry to make sure read correct edid. 811 */ 812 do { 813 814 edid = drm_get_edid(&aconnector->base, ddc); 815 816 /* DP Compliance Test 4.2.2.6 */ 817 if (link->aux_mode && connector->edid_corrupt) 818 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); 819 820 if (!edid && connector->edid_corrupt) { 821 connector->edid_corrupt = false; 822 return EDID_BAD_CHECKSUM; 823 } 824 825 if (!edid) 826 return EDID_NO_RESPONSE; 827 828 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); 829 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); 830 831 /* We don't need the original edid anymore */ 832 kfree(edid); 833 834 edid_status = dm_helpers_parse_edid_caps( 835 link, 836 &sink->dc_edid, 837 &sink->edid_caps); 838 839 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); 840 841 if (edid_status != EDID_OK) 842 DRM_ERROR("EDID err: %d, on connector: %s", 843 edid_status, 844 aconnector->base.name); 845 846 /* DP Compliance Test 4.2.2.3 */ 847 if (link->aux_mode) 848 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]); 849 850 return edid_status; 851} 852int dm_helper_dmub_aux_transfer_sync( 853 struct dc_context *ctx, 854 const struct dc_link *link, 855 struct aux_payload *payload, 856 enum aux_return_code_type *operation_result) 857{ 858 return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx, 859 link->link_index, (void *)payload, 860 (void *)operation_result); 861} 862 863int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, 864 const struct dc_link *link, 865 struct set_config_cmd_payload *payload, 866 enum set_config_status *operation_result) 867{ 868 return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx, 869 link->link_index, (void *)payload, 870 (void *)operation_result); 871} 872 873void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) 874{ 875 /* TODO: something */ 876} 877 878void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) 879{ 880 // TODO: 881 //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); 882} 883 884void *dm_helpers_allocate_gpu_mem( 885 struct dc_context *ctx, 886 enum dc_gpu_mem_alloc_type type, 887 size_t size, 888 long long *addr) 889{ 890 struct amdgpu_device *adev = ctx->driver_context; 891 struct dal_allocation *da; 892 u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? 893 AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; 894 int ret; 895 896 da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); 897 if (!da) 898 return NULL; 899 900 ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 901 domain, &da->bo, 902 &da->gpu_addr, &da->cpu_ptr); 903 904 *addr = da->gpu_addr; 905 906 if (ret) { 907 kfree(da); 908 return NULL; 909 } 910 911 /* add da to list in dm */ 912 list_add(&da->list, &adev->dm.da_list); 913 914 return da->cpu_ptr; 915} 916 917void dm_helpers_free_gpu_mem( 918 struct dc_context *ctx, 919 enum dc_gpu_mem_alloc_type type, 920 void *pvMem) 921{ 922 struct amdgpu_device *adev = ctx->driver_context; 923 struct dal_allocation *da; 924 925 /* walk the da list in DM */ 926 list_for_each_entry(da, &adev->dm.da_list, list) { 927 if (pvMem == da->cpu_ptr) { 928 amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); 929 list_del(&da->list); 930 kfree(da); 931 break; 932 } 933 } 934} 935 936bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) 937{ 938 enum dc_irq_source irq_source; 939 bool ret; 940 941 irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; 942 943 ret = dc_interrupt_set(ctx->dc, irq_source, enable); 944 945 DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", 946 enable ? "en" : "dis", ret); 947 return ret; 948} 949 950void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) 951{ 952 /* TODO: virtual DPCD */ 953 struct dc_link *link = stream->link; 954 union down_spread_ctrl old_downspread; 955 union down_spread_ctrl new_downspread; 956 957 if (link->aux_access_disabled) 958 return; 959 960 if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 961 &old_downspread.raw, 962 sizeof(old_downspread))) 963 return; 964 965 new_downspread.raw = old_downspread.raw; 966 new_downspread.bits.IGNORE_MSA_TIMING_PARAM = 967 (stream->ignore_msa_timing_param) ? 1 : 0; 968 969 if (new_downspread.raw != old_downspread.raw) 970 dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, 971 &new_downspread.raw, 972 sizeof(new_downspread)); 973} 974 975void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) 976{ 977 // TODO 978} 979 980void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) 981{ 982 /* TODO: add periodic detection implementation */ 983}