dc.c (114658B)
1/* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 */ 24 25#include <linux/slab.h> 26#include <linux/mm.h> 27 28#include "dm_services.h" 29 30#include "dc.h" 31 32#include "core_status.h" 33#include "core_types.h" 34#include "hw_sequencer.h" 35#include "dce/dce_hwseq.h" 36 37#include "resource.h" 38 39#include "clk_mgr.h" 40#include "clock_source.h" 41#include "dc_bios_types.h" 42 43#include "bios_parser_interface.h" 44#include "bios/bios_parser_helper.h" 45#include "include/irq_service_interface.h" 46#include "transform.h" 47#include "dmcu.h" 48#include "dpp.h" 49#include "timing_generator.h" 50#include "abm.h" 51#include "virtual/virtual_link_encoder.h" 52#include "hubp.h" 53 54#include "link_hwss.h" 55#include "link_encoder.h" 56#include "link_enc_cfg.h" 57 58#include "dc_link.h" 59#include "dc_link_ddc.h" 60#include "dm_helpers.h" 61#include "mem_input.h" 62 63#include "dc_link_dp.h" 64#include "dc_dmub_srv.h" 65 66#include "dsc.h" 67 68#include "vm_helper.h" 69 70#include "dce/dce_i2c.h" 71 72#include "dmub/dmub_srv.h" 73 74#include "i2caux_interface.h" 75#include "dce/dmub_hw_lock_mgr.h" 76 77#include "dc_trace.h" 78 79#include "dce/dmub_outbox.h" 80 81#define CTX \ 82 dc->ctx 83 84#define DC_LOGGER \ 85 dc->ctx->logger 86 87static const char DC_BUILD_ID[] = "production-build"; 88 89/** 90 * DOC: Overview 91 * 92 * DC is the OS-agnostic component of the amdgpu DC driver. 93 * 94 * DC maintains and validates a set of structs representing the state of the 95 * driver and writes that state to AMD hardware 96 * 97 * Main DC HW structs: 98 * 99 * struct dc - The central struct. One per driver. Created on driver load, 100 * destroyed on driver unload. 101 * 102 * struct dc_context - One per driver. 103 * Used as a backpointer by most other structs in dc. 104 * 105 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP 106 * plugpoints). Created on driver load, destroyed on driver unload. 107 * 108 * struct dc_sink - One per display. Created on boot or hotplug. 109 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink 110 * (the display directly attached). It may also have one or more remote 111 * sinks (in the Multi-Stream Transport case) 112 * 113 * struct resource_pool - One per driver. Represents the hw blocks not in the 114 * main pipeline. Not directly accessible by dm. 115 * 116 * Main dc state structs: 117 * 118 * These structs can be created and destroyed as needed. There is a full set of 119 * these structs in dc->current_state representing the currently programmed state. 120 * 121 * struct dc_state - The global DC state to track global state information, 122 * such as bandwidth values. 123 * 124 * struct dc_stream_state - Represents the hw configuration for the pipeline from 125 * a framebuffer to a display. Maps one-to-one with dc_sink. 126 * 127 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one, 128 * and may have more in the Multi-Plane Overlay case. 129 * 130 * struct resource_context - Represents the programmable state of everything in 131 * the resource_pool. Not directly accessible by dm. 132 * 133 * struct pipe_ctx - A member of struct resource_context. Represents the 134 * internal hardware pipeline components. Each dc_plane_state has either 135 * one or two (in the pipe-split case). 136 */ 137 138/******************************************************************************* 139 * Private functions 140 ******************************************************************************/ 141 142static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new) 143{ 144 if (new > *original) 145 *original = new; 146} 147 148static void destroy_links(struct dc *dc) 149{ 150 uint32_t i; 151 152 for (i = 0; i < dc->link_count; i++) { 153 if (NULL != dc->links[i]) 154 link_destroy(&dc->links[i]); 155 } 156} 157 158static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links) 159{ 160 int i; 161 uint32_t count = 0; 162 163 for (i = 0; i < num_links; i++) { 164 if (links[i]->connector_signal == SIGNAL_TYPE_EDP || 165 links[i]->is_internal_display) 166 count++; 167 } 168 169 return count; 170} 171 172static int get_seamless_boot_stream_count(struct dc_state *ctx) 173{ 174 uint8_t i; 175 uint8_t seamless_boot_stream_count = 0; 176 177 for (i = 0; i < ctx->stream_count; i++) 178 if (ctx->streams[i]->apply_seamless_boot_optimization) 179 seamless_boot_stream_count++; 180 181 return seamless_boot_stream_count; 182} 183 184static bool create_links( 185 struct dc *dc, 186 uint32_t num_virtual_links) 187{ 188 int i; 189 int connectors_num; 190 struct dc_bios *bios = dc->ctx->dc_bios; 191 192 dc->link_count = 0; 193 194 connectors_num = bios->funcs->get_connectors_number(bios); 195 196 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num); 197 198 if (connectors_num > ENUM_ID_COUNT) { 199 dm_error( 200 "DC: Number of connectors %d exceeds maximum of %d!\n", 201 connectors_num, 202 ENUM_ID_COUNT); 203 return false; 204 } 205 206 dm_output_to_console( 207 "DC: %s: connectors_num: physical:%d, virtual:%d\n", 208 __func__, 209 connectors_num, 210 num_virtual_links); 211 212 for (i = 0; i < connectors_num; i++) { 213 struct link_init_data link_init_params = {0}; 214 struct dc_link *link; 215 216 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); 217 218 link_init_params.ctx = dc->ctx; 219 /* next BIOS object table connector */ 220 link_init_params.connector_index = i; 221 link_init_params.link_index = dc->link_count; 222 link_init_params.dc = dc; 223 link = link_create(&link_init_params); 224 225 if (link) { 226 dc->links[dc->link_count] = link; 227 link->dc = dc; 228 ++dc->link_count; 229 } 230 } 231 232 DC_LOG_DC("BIOS object table - end"); 233 234 /* Create a link for each usb4 dpia port */ 235 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { 236 struct link_init_data link_init_params = {0}; 237 struct dc_link *link; 238 239 link_init_params.ctx = dc->ctx; 240 link_init_params.connector_index = i; 241 link_init_params.link_index = dc->link_count; 242 link_init_params.dc = dc; 243 link_init_params.is_dpia_link = true; 244 245 link = link_create(&link_init_params); 246 if (link) { 247 dc->links[dc->link_count] = link; 248 link->dc = dc; 249 ++dc->link_count; 250 } 251 } 252 253 for (i = 0; i < num_virtual_links; i++) { 254 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL); 255 struct encoder_init_data enc_init = {0}; 256 257 if (link == NULL) { 258 BREAK_TO_DEBUGGER(); 259 goto failed_alloc; 260 } 261 262 link->link_index = dc->link_count; 263 dc->links[dc->link_count] = link; 264 dc->link_count++; 265 266 link->ctx = dc->ctx; 267 link->dc = dc; 268 link->connector_signal = SIGNAL_TYPE_VIRTUAL; 269 link->link_id.type = OBJECT_TYPE_CONNECTOR; 270 link->link_id.id = CONNECTOR_ID_VIRTUAL; 271 link->link_id.enum_id = ENUM_ID_1; 272 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 273 274 if (!link->link_enc) { 275 BREAK_TO_DEBUGGER(); 276 goto failed_alloc; 277 } 278 279 link->link_status.dpcd_caps = &link->dpcd_caps; 280 281 enc_init.ctx = dc->ctx; 282 enc_init.channel = CHANNEL_ID_UNKNOWN; 283 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN; 284 enc_init.transmitter = TRANSMITTER_UNKNOWN; 285 enc_init.connector = link->link_id; 286 enc_init.encoder.type = OBJECT_TYPE_ENCODER; 287 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 288 enc_init.encoder.enum_id = ENUM_ID_1; 289 virtual_link_encoder_construct(link->link_enc, &enc_init); 290 } 291 292 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count); 293 294 return true; 295 296failed_alloc: 297 return false; 298} 299 300/* Create additional DIG link encoder objects if fewer than the platform 301 * supports were created during link construction. This can happen if the 302 * number of physical connectors is less than the number of DIGs. 303 */ 304static bool create_link_encoders(struct dc *dc) 305{ 306 bool res = true; 307 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 308 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 309 int i; 310 311 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 312 * link encoders and physical display endpoints and does not require 313 * additional link encoder objects. 314 */ 315 if (num_usb4_dpia == 0) 316 return res; 317 318 /* Create as many link encoder objects as the platform supports. DPIA 319 * endpoints can be programmably mapped to any DIG. 320 */ 321 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) { 322 for (i = 0; i < num_dig_link_enc; i++) { 323 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 324 325 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) { 326 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx, 327 (enum engine_id)(ENGINE_ID_DIGA + i)); 328 if (link_enc) { 329 dc->res_pool->link_encoders[i] = link_enc; 330 dc->res_pool->dig_link_enc_count++; 331 } else { 332 res = false; 333 } 334 } 335 } 336 } 337 338 return res; 339} 340 341/* Destroy any additional DIG link encoder objects created by 342 * create_link_encoders(). 343 * NB: Must only be called after destroy_links(). 344 */ 345static void destroy_link_encoders(struct dc *dc) 346{ 347 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia; 348 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc; 349 int i; 350 351 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG 352 * link encoders and physical display endpoints and does not require 353 * additional link encoder objects. 354 */ 355 if (num_usb4_dpia == 0) 356 return; 357 358 for (i = 0; i < num_dig_link_enc; i++) { 359 struct link_encoder *link_enc = dc->res_pool->link_encoders[i]; 360 361 if (link_enc) { 362 link_enc->funcs->destroy(&link_enc); 363 dc->res_pool->link_encoders[i] = NULL; 364 dc->res_pool->dig_link_enc_count--; 365 } 366 } 367} 368 369static struct dc_perf_trace *dc_perf_trace_create(void) 370{ 371 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL); 372} 373 374static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) 375{ 376 kfree(*perf_trace); 377 *perf_trace = NULL; 378} 379 380/** 381 * dc_stream_adjust_vmin_vmax: 382 * 383 * Looks up the pipe context of dc_stream_state and updates the 384 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh 385 * Rate, which is a power-saving feature that targets reducing panel 386 * refresh rate while the screen is static 387 * 388 * @dc: dc reference 389 * @stream: Initial dc stream state 390 * @adjust: Updated parameters for vertical_total_min and vertical_total_max 391 */ 392bool dc_stream_adjust_vmin_vmax(struct dc *dc, 393 struct dc_stream_state *stream, 394 struct dc_crtc_timing_adjust *adjust) 395{ 396 int i; 397 bool ret = false; 398 399 stream->adjust.v_total_max = adjust->v_total_max; 400 stream->adjust.v_total_mid = adjust->v_total_mid; 401 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; 402 stream->adjust.v_total_min = adjust->v_total_min; 403 404 for (i = 0; i < MAX_PIPES; i++) { 405 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 406 407 if (pipe->stream == stream && pipe->stream_res.tg) { 408 dc->hwss.set_drr(&pipe, 409 1, 410 *adjust); 411 412 ret = true; 413 } 414 } 415 return ret; 416} 417 418/** 419 ***************************************************************************** 420 * Function: dc_stream_get_last_vrr_vtotal 421 * 422 * @brief 423 * Looks up the pipe context of dc_stream_state and gets the 424 * last VTOTAL used by DRR (Dynamic Refresh Rate) 425 * 426 * @param [in] dc: dc reference 427 * @param [in] stream: Initial dc stream state 428 * @param [in] adjust: Updated parameters for vertical_total_min and 429 * vertical_total_max 430 ***************************************************************************** 431 */ 432bool dc_stream_get_last_used_drr_vtotal(struct dc *dc, 433 struct dc_stream_state *stream, 434 uint32_t *refresh_rate) 435{ 436 bool status = false; 437 438 int i = 0; 439 440 for (i = 0; i < MAX_PIPES; i++) { 441 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 442 443 if (pipe->stream == stream && pipe->stream_res.tg) { 444 /* Only execute if a function pointer has been defined for 445 * the DC version in question 446 */ 447 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) { 448 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate); 449 450 status = true; 451 452 break; 453 } 454 } 455 } 456 457 return status; 458} 459 460bool dc_stream_get_crtc_position(struct dc *dc, 461 struct dc_stream_state **streams, int num_streams, 462 unsigned int *v_pos, unsigned int *nom_v_pos) 463{ 464 /* TODO: Support multiple streams */ 465 const struct dc_stream_state *stream = streams[0]; 466 int i; 467 bool ret = false; 468 struct crtc_position position; 469 470 for (i = 0; i < MAX_PIPES; i++) { 471 struct pipe_ctx *pipe = 472 &dc->current_state->res_ctx.pipe_ctx[i]; 473 474 if (pipe->stream == stream && pipe->stream_res.stream_enc) { 475 dc->hwss.get_position(&pipe, 1, &position); 476 477 *v_pos = position.vertical_count; 478 *nom_v_pos = position.nominal_vcount; 479 ret = true; 480 } 481 } 482 return ret; 483} 484 485#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 486bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream, 487 struct crc_params *crc_window) 488{ 489 int i; 490 struct dmcu *dmcu = dc->res_pool->dmcu; 491 struct pipe_ctx *pipe; 492 struct crc_region tmp_win, *crc_win; 493 struct otg_phy_mux mapping_tmp, *mux_mapping; 494 495 /*crc window can't be null*/ 496 if (!crc_window) 497 return false; 498 499 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { 500 crc_win = &tmp_win; 501 mux_mapping = &mapping_tmp; 502 /*set crc window*/ 503 tmp_win.x_start = crc_window->windowa_x_start; 504 tmp_win.y_start = crc_window->windowa_y_start; 505 tmp_win.x_end = crc_window->windowa_x_end; 506 tmp_win.y_end = crc_window->windowa_y_end; 507 508 for (i = 0; i < MAX_PIPES; i++) { 509 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 510 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 511 break; 512 } 513 514 /* Stream not found */ 515 if (i == MAX_PIPES) 516 return false; 517 518 519 /*set mux routing info*/ 520 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; 521 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; 522 523 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping); 524 } else { 525 DC_LOG_DC("dmcu is not initialized"); 526 return false; 527 } 528 529 return true; 530} 531 532bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream) 533{ 534 int i; 535 struct dmcu *dmcu = dc->res_pool->dmcu; 536 struct pipe_ctx *pipe; 537 struct otg_phy_mux mapping_tmp, *mux_mapping; 538 539 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) { 540 mux_mapping = &mapping_tmp; 541 542 for (i = 0; i < MAX_PIPES; i++) { 543 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 545 break; 546 } 547 548 /* Stream not found */ 549 if (i == MAX_PIPES) 550 return false; 551 552 553 /*set mux routing info*/ 554 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst; 555 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst; 556 557 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping); 558 } else { 559 DC_LOG_DC("dmcu is not initialized"); 560 return false; 561 } 562 563 return true; 564} 565#endif 566 567/** 568 * dc_stream_configure_crc() - Configure CRC capture for the given stream. 569 * @dc: DC Object 570 * @stream: The stream to configure CRC on. 571 * @enable: Enable CRC if true, disable otherwise. 572 * @crc_window: CRC window (x/y start/end) information 573 * @continuous: Capture CRC on every frame if true. Otherwise, only capture 574 * once. 575 * 576 * By default, only CRC0 is configured, and the entire frame is used to 577 * calculate the crc. 578 */ 579bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream, 580 struct crc_params *crc_window, bool enable, bool continuous) 581{ 582 int i; 583 struct pipe_ctx *pipe; 584 struct crc_params param; 585 struct timing_generator *tg; 586 587 for (i = 0; i < MAX_PIPES; i++) { 588 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 589 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe) 590 break; 591 } 592 /* Stream not found */ 593 if (i == MAX_PIPES) 594 return false; 595 596 /* By default, capture the full frame */ 597 param.windowa_x_start = 0; 598 param.windowa_y_start = 0; 599 param.windowa_x_end = pipe->stream->timing.h_addressable; 600 param.windowa_y_end = pipe->stream->timing.v_addressable; 601 param.windowb_x_start = 0; 602 param.windowb_y_start = 0; 603 param.windowb_x_end = pipe->stream->timing.h_addressable; 604 param.windowb_y_end = pipe->stream->timing.v_addressable; 605 606 if (crc_window) { 607 param.windowa_x_start = crc_window->windowa_x_start; 608 param.windowa_y_start = crc_window->windowa_y_start; 609 param.windowa_x_end = crc_window->windowa_x_end; 610 param.windowa_y_end = crc_window->windowa_y_end; 611 param.windowb_x_start = crc_window->windowb_x_start; 612 param.windowb_y_start = crc_window->windowb_y_start; 613 param.windowb_x_end = crc_window->windowb_x_end; 614 param.windowb_y_end = crc_window->windowb_y_end; 615 } 616 617 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0; 618 param.odm_mode = pipe->next_odm_pipe ? 1:0; 619 620 /* Default to the union of both windows */ 621 param.selection = UNION_WINDOW_A_B; 622 param.continuous_mode = continuous; 623 param.enable = enable; 624 625 tg = pipe->stream_res.tg; 626 627 /* Only call if supported */ 628 if (tg->funcs->configure_crc) 629 return tg->funcs->configure_crc(tg, ¶m); 630 DC_LOG_WARNING("CRC capture not supported."); 631 return false; 632} 633 634/** 635 * dc_stream_get_crc() - Get CRC values for the given stream. 636 * @dc: DC object 637 * @stream: The DC stream state of the stream to get CRCs from. 638 * @r_cr: CRC value for the first of the 3 channels stored here. 639 * @g_y: CRC value for the second of the 3 channels stored here. 640 * @b_cb: CRC value for the third of the 3 channels stored here. 641 * 642 * dc_stream_configure_crc needs to be called beforehand to enable CRCs. 643 * Return false if stream is not found, or if CRCs are not enabled. 644 */ 645bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, 646 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb) 647{ 648 int i; 649 struct pipe_ctx *pipe; 650 struct timing_generator *tg; 651 652 for (i = 0; i < MAX_PIPES; i++) { 653 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 654 if (pipe->stream == stream) 655 break; 656 } 657 /* Stream not found */ 658 if (i == MAX_PIPES) 659 return false; 660 661 tg = pipe->stream_res.tg; 662 663 if (tg->funcs->get_crc) 664 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb); 665 DC_LOG_WARNING("CRC capture not supported."); 666 return false; 667} 668 669void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, 670 enum dc_dynamic_expansion option) 671{ 672 /* OPP FMT dyn expansion updates*/ 673 int i; 674 struct pipe_ctx *pipe_ctx; 675 676 for (i = 0; i < MAX_PIPES; i++) { 677 if (dc->current_state->res_ctx.pipe_ctx[i].stream 678 == stream) { 679 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 680 pipe_ctx->stream_res.opp->dyn_expansion = option; 681 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( 682 pipe_ctx->stream_res.opp, 683 COLOR_SPACE_YCBCR601, 684 stream->timing.display_color_depth, 685 stream->signal); 686 } 687 } 688} 689 690void dc_stream_set_dither_option(struct dc_stream_state *stream, 691 enum dc_dither_option option) 692{ 693 struct bit_depth_reduction_params params; 694 struct dc_link *link = stream->link; 695 struct pipe_ctx *pipes = NULL; 696 int i; 697 698 for (i = 0; i < MAX_PIPES; i++) { 699 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream == 700 stream) { 701 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i]; 702 break; 703 } 704 } 705 706 if (!pipes) 707 return; 708 if (option > DITHER_OPTION_MAX) 709 return; 710 711 stream->dither_option = option; 712 713 memset(¶ms, 0, sizeof(params)); 714 resource_build_bit_depth_reduction_params(stream, ¶ms); 715 stream->bit_depth_params = params; 716 717 if (pipes->plane_res.xfm && 718 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) { 719 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth( 720 pipes->plane_res.xfm, 721 pipes->plane_res.scl_data.lb_params.depth, 722 &stream->bit_depth_params); 723 } 724 725 pipes->stream_res.opp->funcs-> 726 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms); 727} 728 729bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream) 730{ 731 int i; 732 bool ret = false; 733 struct pipe_ctx *pipes; 734 735 for (i = 0; i < MAX_PIPES; i++) { 736 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) { 737 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 738 dc->hwss.program_gamut_remap(pipes); 739 ret = true; 740 } 741 } 742 743 return ret; 744} 745 746bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) 747{ 748 int i; 749 bool ret = false; 750 struct pipe_ctx *pipes; 751 752 for (i = 0; i < MAX_PIPES; i++) { 753 if (dc->current_state->res_ctx.pipe_ctx[i].stream 754 == stream) { 755 756 pipes = &dc->current_state->res_ctx.pipe_ctx[i]; 757 dc->hwss.program_output_csc(dc, 758 pipes, 759 stream->output_color_space, 760 stream->csc_color_matrix.matrix, 761 pipes->stream_res.opp->inst); 762 ret = true; 763 } 764 } 765 766 return ret; 767} 768 769void dc_stream_set_static_screen_params(struct dc *dc, 770 struct dc_stream_state **streams, 771 int num_streams, 772 const struct dc_static_screen_params *params) 773{ 774 int i, j; 775 struct pipe_ctx *pipes_affected[MAX_PIPES]; 776 int num_pipes_affected = 0; 777 778 for (i = 0; i < num_streams; i++) { 779 struct dc_stream_state *stream = streams[i]; 780 781 for (j = 0; j < MAX_PIPES; j++) { 782 if (dc->current_state->res_ctx.pipe_ctx[j].stream 783 == stream) { 784 pipes_affected[num_pipes_affected++] = 785 &dc->current_state->res_ctx.pipe_ctx[j]; 786 } 787 } 788 } 789 790 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params); 791} 792 793static void dc_destruct(struct dc *dc) 794{ 795 // reset link encoder assignment table on destruct 796 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) 797 link_enc_cfg_init(dc, dc->current_state); 798 799 if (dc->current_state) { 800 dc_release_state(dc->current_state); 801 dc->current_state = NULL; 802 } 803 804 destroy_links(dc); 805 806 destroy_link_encoders(dc); 807 808 if (dc->clk_mgr) { 809 dc_destroy_clk_mgr(dc->clk_mgr); 810 dc->clk_mgr = NULL; 811 } 812 813 dc_destroy_resource_pool(dc); 814 815 if (dc->ctx->gpio_service) 816 dal_gpio_service_destroy(&dc->ctx->gpio_service); 817 818 if (dc->ctx->created_bios) 819 dal_bios_parser_destroy(&dc->ctx->dc_bios); 820 821 dc_perf_trace_destroy(&dc->ctx->perf_trace); 822 823 kfree(dc->ctx); 824 dc->ctx = NULL; 825 826 kfree(dc->bw_vbios); 827 dc->bw_vbios = NULL; 828 829 kfree(dc->bw_dceip); 830 dc->bw_dceip = NULL; 831 832 kfree(dc->dcn_soc); 833 dc->dcn_soc = NULL; 834 835 kfree(dc->dcn_ip); 836 dc->dcn_ip = NULL; 837 838 kfree(dc->vm_helper); 839 dc->vm_helper = NULL; 840 841} 842 843static bool dc_construct_ctx(struct dc *dc, 844 const struct dc_init_data *init_params) 845{ 846 struct dc_context *dc_ctx; 847 enum dce_version dc_version = DCE_VERSION_UNKNOWN; 848 849 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 850 if (!dc_ctx) 851 return false; 852 853 dc_ctx->cgs_device = init_params->cgs_device; 854 dc_ctx->driver_context = init_params->driver; 855 dc_ctx->dc = dc; 856 dc_ctx->asic_id = init_params->asic_id; 857 dc_ctx->dc_sink_id_count = 0; 858 dc_ctx->dc_stream_id_count = 0; 859 dc_ctx->dce_environment = init_params->dce_environment; 860 861 /* Create logger */ 862 863 dc_version = resource_parse_asic_id(init_params->asic_id); 864 dc_ctx->dce_version = dc_version; 865 866 dc_ctx->perf_trace = dc_perf_trace_create(); 867 if (!dc_ctx->perf_trace) { 868 ASSERT_CRITICAL(false); 869 return false; 870 } 871 872 dc->ctx = dc_ctx; 873 874 return true; 875} 876 877static bool dc_construct(struct dc *dc, 878 const struct dc_init_data *init_params) 879{ 880 struct dc_context *dc_ctx; 881 struct bw_calcs_dceip *dc_dceip; 882 struct bw_calcs_vbios *dc_vbios; 883 struct dcn_soc_bounding_box *dcn_soc; 884 struct dcn_ip_params *dcn_ip; 885 886 dc->config = init_params->flags; 887 888 // Allocate memory for the vm_helper 889 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL); 890 if (!dc->vm_helper) { 891 dm_error("%s: failed to create dc->vm_helper\n", __func__); 892 goto fail; 893 } 894 895 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides)); 896 897 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); 898 if (!dc_dceip) { 899 dm_error("%s: failed to create dceip\n", __func__); 900 goto fail; 901 } 902 903 dc->bw_dceip = dc_dceip; 904 905 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL); 906 if (!dc_vbios) { 907 dm_error("%s: failed to create vbios\n", __func__); 908 goto fail; 909 } 910 911 dc->bw_vbios = dc_vbios; 912 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL); 913 if (!dcn_soc) { 914 dm_error("%s: failed to create dcn_soc\n", __func__); 915 goto fail; 916 } 917 918 dc->dcn_soc = dcn_soc; 919 920 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL); 921 if (!dcn_ip) { 922 dm_error("%s: failed to create dcn_ip\n", __func__); 923 goto fail; 924 } 925 926 dc->dcn_ip = dcn_ip; 927 928 if (!dc_construct_ctx(dc, init_params)) { 929 dm_error("%s: failed to create ctx\n", __func__); 930 goto fail; 931 } 932 933 dc_ctx = dc->ctx; 934 935 /* Resource should construct all asic specific resources. 936 * This should be the only place where we need to parse the asic id 937 */ 938 if (init_params->vbios_override) 939 dc_ctx->dc_bios = init_params->vbios_override; 940 else { 941 /* Create BIOS parser */ 942 struct bp_init_data bp_init_data; 943 944 bp_init_data.ctx = dc_ctx; 945 bp_init_data.bios = init_params->asic_id.atombios_base_address; 946 947 dc_ctx->dc_bios = dal_bios_parser_create( 948 &bp_init_data, dc_ctx->dce_version); 949 950 if (!dc_ctx->dc_bios) { 951 ASSERT_CRITICAL(false); 952 goto fail; 953 } 954 955 dc_ctx->created_bios = true; 956 } 957 958 dc->vendor_signature = init_params->vendor_signature; 959 960 /* Create GPIO service */ 961 dc_ctx->gpio_service = dal_gpio_service_create( 962 dc_ctx->dce_version, 963 dc_ctx->dce_environment, 964 dc_ctx); 965 966 if (!dc_ctx->gpio_service) { 967 ASSERT_CRITICAL(false); 968 goto fail; 969 } 970 971 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 972 if (!dc->res_pool) 973 goto fail; 974 975 /* set i2c speed if not done by the respective dcnxxx__resource.c */ 976 if (dc->caps.i2c_speed_in_khz_hdcp == 0) 977 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz; 978 979 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg); 980 if (!dc->clk_mgr) 981 goto fail; 982#ifdef CONFIG_DRM_AMD_DC_DCN 983 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 984 985 if (dc->res_pool->funcs->update_bw_bounding_box) { 986 DC_FP_START(); 987 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 988 DC_FP_END(); 989 } 990#endif 991 992 /* Creation of current_state must occur after dc->dml 993 * is initialized in dc_create_resource_pool because 994 * on creation it copies the contents of dc->dml 995 */ 996 997 dc->current_state = dc_create_state(dc); 998 999 if (!dc->current_state) { 1000 dm_error("%s: failed to create validate ctx\n", __func__); 1001 goto fail; 1002 } 1003 1004 if (!create_links(dc, init_params->num_virtual_links)) 1005 goto fail; 1006 1007 /* Create additional DIG link encoder objects if fewer than the platform 1008 * supports were created during link construction. 1009 */ 1010 if (!create_link_encoders(dc)) 1011 goto fail; 1012 1013 dc_resource_state_construct(dc, dc->current_state); 1014 1015 return true; 1016 1017fail: 1018 return false; 1019} 1020 1021static void disable_all_writeback_pipes_for_stream( 1022 const struct dc *dc, 1023 struct dc_stream_state *stream, 1024 struct dc_state *context) 1025{ 1026 int i; 1027 1028 for (i = 0; i < stream->num_wb_info; i++) 1029 stream->writeback_info[i].wb_enabled = false; 1030} 1031 1032static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, 1033 struct dc_stream_state *stream, bool lock) 1034{ 1035 int i; 1036 1037 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ 1038 if (dc->hwss.interdependent_update_lock) 1039 dc->hwss.interdependent_update_lock(dc, context, lock); 1040 else { 1041 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1042 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1043 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 1044 1045 // Copied conditions that were previously in dce110_apply_ctx_for_surface 1046 if (stream == pipe_ctx->stream) { 1047 if (!pipe_ctx->top_pipe && 1048 (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 1049 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock); 1050 } 1051 } 1052 } 1053} 1054 1055static void disable_dangling_plane(struct dc *dc, struct dc_state *context) 1056{ 1057 int i, j; 1058 struct dc_state *dangling_context = dc_create_state(dc); 1059 struct dc_state *current_ctx; 1060 1061 if (dangling_context == NULL) 1062 return; 1063 1064 dc_resource_state_copy_construct(dc->current_state, dangling_context); 1065 1066 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1067 struct dc_stream_state *old_stream = 1068 dc->current_state->res_ctx.pipe_ctx[i].stream; 1069 bool should_disable = true; 1070 bool pipe_split_change = 1071 context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe; 1072 1073 for (j = 0; j < context->stream_count; j++) { 1074 if (old_stream == context->streams[j]) { 1075 should_disable = false; 1076 break; 1077 } 1078 } 1079 if (!should_disable && pipe_split_change && 1080 dc->current_state->stream_count != context->stream_count) 1081 should_disable = true; 1082 1083 if (should_disable && old_stream) { 1084 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context); 1085 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); 1086 1087 if (dc->hwss.apply_ctx_for_surface) { 1088 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true); 1089 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); 1090 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false); 1091 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1092 } 1093 if (dc->hwss.program_front_end_for_ctx) { 1094 dc->hwss.interdependent_update_lock(dc, dc->current_state, true); 1095 dc->hwss.program_front_end_for_ctx(dc, dangling_context); 1096 dc->hwss.interdependent_update_lock(dc, dc->current_state, false); 1097 dc->hwss.post_unlock_program_front_end(dc, dangling_context); 1098 } 1099 } 1100 } 1101 1102 current_ctx = dc->current_state; 1103 dc->current_state = dangling_context; 1104 dc_release_state(current_ctx); 1105} 1106 1107static void disable_vbios_mode_if_required( 1108 struct dc *dc, 1109 struct dc_state *context) 1110{ 1111 unsigned int i, j; 1112 1113 /* check if timing_changed, disable stream*/ 1114 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1115 struct dc_stream_state *stream = NULL; 1116 struct dc_link *link = NULL; 1117 struct pipe_ctx *pipe = NULL; 1118 1119 pipe = &context->res_ctx.pipe_ctx[i]; 1120 stream = pipe->stream; 1121 if (stream == NULL) 1122 continue; 1123 1124 // only looking for first odm pipe 1125 if (pipe->prev_odm_pipe) 1126 continue; 1127 1128 if (stream->link->local_sink && 1129 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { 1130 link = stream->link; 1131 } 1132 1133 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { 1134 unsigned int enc_inst, tg_inst = 0; 1135 unsigned int pix_clk_100hz; 1136 1137 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1138 if (enc_inst != ENGINE_ID_UNKNOWN) { 1139 for (j = 0; j < dc->res_pool->stream_enc_count; j++) { 1140 if (dc->res_pool->stream_enc[j]->id == enc_inst) { 1141 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg( 1142 dc->res_pool->stream_enc[j]); 1143 break; 1144 } 1145 } 1146 1147 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1148 dc->res_pool->dp_clock_source, 1149 tg_inst, &pix_clk_100hz); 1150 1151 if (link->link_status.link_active) { 1152 uint32_t requested_pix_clk_100hz = 1153 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz; 1154 1155 if (pix_clk_100hz != requested_pix_clk_100hz) { 1156 core_link_disable_stream(pipe); 1157 pipe->stream->dpms_off = false; 1158 } 1159 } 1160 } 1161 } 1162 } 1163} 1164 1165static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 1166{ 1167 int i; 1168 PERF_TRACE(); 1169 for (i = 0; i < MAX_PIPES; i++) { 1170 int count = 0; 1171 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1172 1173 if (!pipe->plane_state) 1174 continue; 1175 1176 /* Timeout 100 ms */ 1177 while (count < 100000) { 1178 /* Must set to false to start with, due to OR in update function */ 1179 pipe->plane_state->status.is_flip_pending = false; 1180 dc->hwss.update_pending_status(pipe); 1181 if (!pipe->plane_state->status.is_flip_pending) 1182 break; 1183 udelay(1); 1184 count++; 1185 } 1186 ASSERT(!pipe->plane_state->status.is_flip_pending); 1187 } 1188 PERF_TRACE(); 1189} 1190 1191/******************************************************************************* 1192 * Public functions 1193 ******************************************************************************/ 1194 1195struct dc *dc_create(const struct dc_init_data *init_params) 1196{ 1197 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL); 1198 unsigned int full_pipe_count; 1199 1200 if (!dc) 1201 return NULL; 1202 1203 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 1204 if (!dc_construct_ctx(dc, init_params)) 1205 goto destruct_dc; 1206 } else { 1207 if (!dc_construct(dc, init_params)) 1208 goto destruct_dc; 1209 1210 full_pipe_count = dc->res_pool->pipe_count; 1211 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 1212 full_pipe_count--; 1213 dc->caps.max_streams = min( 1214 full_pipe_count, 1215 dc->res_pool->stream_enc_count); 1216 1217 dc->caps.max_links = dc->link_count; 1218 dc->caps.max_audios = dc->res_pool->audio_count; 1219 dc->caps.linear_pitch_alignment = 64; 1220 1221 dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 1222 1223 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator; 1224 1225 if (dc->res_pool->dmcu != NULL) 1226 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 1227 } 1228 1229 /* Populate versioning information */ 1230 dc->versions.dc_ver = DC_VER; 1231 1232 dc->build_id = DC_BUILD_ID; 1233 1234 DC_LOG_DC("Display Core initialized\n"); 1235 1236 1237 1238 return dc; 1239 1240destruct_dc: 1241 dc_destruct(dc); 1242 kfree(dc); 1243 return NULL; 1244} 1245 1246static void detect_edp_presence(struct dc *dc) 1247{ 1248 struct dc_link *edp_links[MAX_NUM_EDP]; 1249 struct dc_link *edp_link = NULL; 1250 enum dc_connection_type type; 1251 int i; 1252 int edp_num; 1253 1254 get_edp_links(dc, edp_links, &edp_num); 1255 if (!edp_num) 1256 return; 1257 1258 for (i = 0; i < edp_num; i++) { 1259 edp_link = edp_links[i]; 1260 if (dc->config.edp_not_connected) { 1261 edp_link->edp_sink_present = false; 1262 } else { 1263 dc_link_detect_sink(edp_link, &type); 1264 edp_link->edp_sink_present = (type != dc_connection_none); 1265 } 1266 } 1267} 1268 1269void dc_hardware_init(struct dc *dc) 1270{ 1271 1272 detect_edp_presence(dc); 1273 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 1274 dc->hwss.init_hw(dc); 1275} 1276 1277void dc_init_callbacks(struct dc *dc, 1278 const struct dc_callback_init *init_params) 1279{ 1280#ifdef CONFIG_DRM_AMD_DC_HDCP 1281 dc->ctx->cp_psp = init_params->cp_psp; 1282#endif 1283} 1284 1285void dc_deinit_callbacks(struct dc *dc) 1286{ 1287#ifdef CONFIG_DRM_AMD_DC_HDCP 1288 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); 1289#endif 1290} 1291 1292void dc_destroy(struct dc **dc) 1293{ 1294 dc_destruct(*dc); 1295 kfree(*dc); 1296 *dc = NULL; 1297} 1298 1299static void enable_timing_multisync( 1300 struct dc *dc, 1301 struct dc_state *ctx) 1302{ 1303 int i, multisync_count = 0; 1304 int pipe_count = dc->res_pool->pipe_count; 1305 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL }; 1306 1307 for (i = 0; i < pipe_count; i++) { 1308 if (!ctx->res_ctx.pipe_ctx[i].stream || 1309 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled) 1310 continue; 1311 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source) 1312 continue; 1313 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i]; 1314 multisync_count++; 1315 } 1316 1317 if (multisync_count > 0) { 1318 dc->hwss.enable_per_frame_crtc_position_reset( 1319 dc, multisync_count, multisync_pipes); 1320 } 1321} 1322 1323static void program_timing_sync( 1324 struct dc *dc, 1325 struct dc_state *ctx) 1326{ 1327 int i, j, k; 1328 int group_index = 0; 1329 int num_group = 0; 1330 int pipe_count = dc->res_pool->pipe_count; 1331 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; 1332 1333 for (i = 0; i < pipe_count; i++) { 1334 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe) 1335 continue; 1336 1337 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i]; 1338 } 1339 1340 for (i = 0; i < pipe_count; i++) { 1341 int group_size = 1; 1342 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE; 1343 struct pipe_ctx *pipe_set[MAX_PIPES]; 1344 1345 if (!unsynced_pipes[i]) 1346 continue; 1347 1348 pipe_set[0] = unsynced_pipes[i]; 1349 unsynced_pipes[i] = NULL; 1350 1351 /* Add tg to the set, search rest of the tg's for ones with 1352 * same timing, add all tgs with same timing to the group 1353 */ 1354 for (j = i + 1; j < pipe_count; j++) { 1355 if (!unsynced_pipes[j]) 1356 continue; 1357 if (sync_type != TIMING_SYNCHRONIZABLE && 1358 dc->hwss.enable_vblanks_synchronization && 1359 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks && 1360 resource_are_vblanks_synchronizable( 1361 unsynced_pipes[j]->stream, 1362 pipe_set[0]->stream)) { 1363 sync_type = VBLANK_SYNCHRONIZABLE; 1364 pipe_set[group_size] = unsynced_pipes[j]; 1365 unsynced_pipes[j] = NULL; 1366 group_size++; 1367 } else 1368 if (sync_type != VBLANK_SYNCHRONIZABLE && 1369 resource_are_streams_timing_synchronizable( 1370 unsynced_pipes[j]->stream, 1371 pipe_set[0]->stream)) { 1372 sync_type = TIMING_SYNCHRONIZABLE; 1373 pipe_set[group_size] = unsynced_pipes[j]; 1374 unsynced_pipes[j] = NULL; 1375 group_size++; 1376 } 1377 } 1378 1379 /* set first unblanked pipe as master */ 1380 for (j = 0; j < group_size; j++) { 1381 bool is_blanked; 1382 1383 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1384 is_blanked = 1385 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1386 else 1387 is_blanked = 1388 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1389 if (!is_blanked) { 1390 if (j == 0) 1391 break; 1392 1393 swap(pipe_set[0], pipe_set[j]); 1394 break; 1395 } 1396 } 1397 1398 for (k = 0; k < group_size; k++) { 1399 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); 1400 1401 status->timing_sync_info.group_id = num_group; 1402 status->timing_sync_info.group_size = group_size; 1403 if (k == 0) 1404 status->timing_sync_info.master = true; 1405 else 1406 status->timing_sync_info.master = false; 1407 1408 } 1409 1410 /* remove any other pipes that are already been synced */ 1411 if (dc->config.use_pipe_ctx_sync_logic) { 1412 /* check pipe's syncd to decide which pipe to be removed */ 1413 for (j = 1; j < group_size; j++) { 1414 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) { 1415 group_size--; 1416 pipe_set[j] = pipe_set[group_size]; 1417 j--; 1418 } else 1419 /* link slave pipe's syncd with master pipe */ 1420 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd; 1421 } 1422 } else { 1423 for (j = j + 1; j < group_size; j++) { 1424 bool is_blanked; 1425 1426 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked) 1427 is_blanked = 1428 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp); 1429 else 1430 is_blanked = 1431 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg); 1432 if (!is_blanked) { 1433 group_size--; 1434 pipe_set[j] = pipe_set[group_size]; 1435 j--; 1436 } 1437 } 1438 } 1439 1440 if (group_size > 1) { 1441 if (sync_type == TIMING_SYNCHRONIZABLE) { 1442 dc->hwss.enable_timing_synchronization( 1443 dc, group_index, group_size, pipe_set); 1444 } else 1445 if (sync_type == VBLANK_SYNCHRONIZABLE) { 1446 dc->hwss.enable_vblanks_synchronization( 1447 dc, group_index, group_size, pipe_set); 1448 } 1449 group_index++; 1450 } 1451 num_group++; 1452 } 1453} 1454 1455static bool context_changed( 1456 struct dc *dc, 1457 struct dc_state *context) 1458{ 1459 uint8_t i; 1460 1461 if (context->stream_count != dc->current_state->stream_count) 1462 return true; 1463 1464 for (i = 0; i < dc->current_state->stream_count; i++) { 1465 if (dc->current_state->streams[i] != context->streams[i]) 1466 return true; 1467 } 1468 1469 return false; 1470} 1471 1472bool dc_validate_boot_timing(const struct dc *dc, 1473 const struct dc_sink *sink, 1474 struct dc_crtc_timing *crtc_timing) 1475{ 1476 struct timing_generator *tg; 1477 struct stream_encoder *se = NULL; 1478 1479 struct dc_crtc_timing hw_crtc_timing = {0}; 1480 1481 struct dc_link *link = sink->link; 1482 unsigned int i, enc_inst, tg_inst = 0; 1483 1484 /* Support seamless boot on EDP displays only */ 1485 if (sink->sink_signal != SIGNAL_TYPE_EDP) { 1486 return false; 1487 } 1488 1489 /* Check for enabled DIG to identify enabled display */ 1490 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) 1491 return false; 1492 1493 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); 1494 1495 if (enc_inst == ENGINE_ID_UNKNOWN) 1496 return false; 1497 1498 for (i = 0; i < dc->res_pool->stream_enc_count; i++) { 1499 if (dc->res_pool->stream_enc[i]->id == enc_inst) { 1500 1501 se = dc->res_pool->stream_enc[i]; 1502 1503 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( 1504 dc->res_pool->stream_enc[i]); 1505 break; 1506 } 1507 } 1508 1509 // tg_inst not found 1510 if (i == dc->res_pool->stream_enc_count) 1511 return false; 1512 1513 if (tg_inst >= dc->res_pool->timing_generator_count) 1514 return false; 1515 1516 tg = dc->res_pool->timing_generators[tg_inst]; 1517 1518 if (!tg->funcs->get_hw_timing) 1519 return false; 1520 1521 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) 1522 return false; 1523 1524 if (crtc_timing->h_total != hw_crtc_timing.h_total) 1525 return false; 1526 1527 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) 1528 return false; 1529 1530 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) 1531 return false; 1532 1533 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) 1534 return false; 1535 1536 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) 1537 return false; 1538 1539 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) 1540 return false; 1541 1542 if (crtc_timing->v_total != hw_crtc_timing.v_total) 1543 return false; 1544 1545 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) 1546 return false; 1547 1548 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) 1549 return false; 1550 1551 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) 1552 return false; 1553 1554 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) 1555 return false; 1556 1557 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) 1558 return false; 1559 1560 /* block DSC for now, as VBIOS does not currently support DSC timings */ 1561 if (crtc_timing->flags.DSC) 1562 return false; 1563 1564 if (dc_is_dp_signal(link->connector_signal)) { 1565 unsigned int pix_clk_100hz; 1566 uint32_t numOdmPipes = 1; 1567 uint32_t id_src[4] = {0}; 1568 1569 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( 1570 dc->res_pool->dp_clock_source, 1571 tg_inst, &pix_clk_100hz); 1572 1573 if (tg->funcs->get_optc_source) 1574 tg->funcs->get_optc_source(tg, 1575 &numOdmPipes, &id_src[0], &id_src[1]); 1576 1577 if (numOdmPipes == 2) 1578 pix_clk_100hz *= 2; 1579 if (numOdmPipes == 4) 1580 pix_clk_100hz *= 4; 1581 1582 // Note: In rare cases, HW pixclk may differ from crtc's pixclk 1583 // slightly due to rounding issues in 10 kHz units. 1584 if (crtc_timing->pix_clk_100hz != pix_clk_100hz) 1585 return false; 1586 1587 if (!se->funcs->dp_get_pixel_format) 1588 return false; 1589 1590 if (!se->funcs->dp_get_pixel_format( 1591 se, 1592 &hw_crtc_timing.pixel_encoding, 1593 &hw_crtc_timing.display_color_depth)) 1594 return false; 1595 1596 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) 1597 return false; 1598 1599 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) 1600 return false; 1601 } 1602 1603 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) { 1604 return false; 1605 } 1606 1607 if (is_edp_ilr_optimization_required(link, crtc_timing)) { 1608 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n"); 1609 return false; 1610 } 1611 1612 return true; 1613} 1614 1615static inline bool should_update_pipe_for_stream( 1616 struct dc_state *context, 1617 struct pipe_ctx *pipe_ctx, 1618 struct dc_stream_state *stream) 1619{ 1620 return (pipe_ctx->stream && pipe_ctx->stream == stream); 1621} 1622 1623static inline bool should_update_pipe_for_plane( 1624 struct dc_state *context, 1625 struct pipe_ctx *pipe_ctx, 1626 struct dc_plane_state *plane_state) 1627{ 1628 return (pipe_ctx->plane_state == plane_state); 1629} 1630 1631void dc_enable_stereo( 1632 struct dc *dc, 1633 struct dc_state *context, 1634 struct dc_stream_state *streams[], 1635 uint8_t stream_count) 1636{ 1637 int i, j; 1638 struct pipe_ctx *pipe; 1639 1640 for (i = 0; i < MAX_PIPES; i++) { 1641 if (context != NULL) { 1642 pipe = &context->res_ctx.pipe_ctx[i]; 1643 } else { 1644 context = dc->current_state; 1645 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 1646 } 1647 1648 for (j = 0; pipe && j < stream_count; j++) { 1649 if (should_update_pipe_for_stream(context, pipe, streams[j]) && 1650 dc->hwss.setup_stereo) 1651 dc->hwss.setup_stereo(pipe, dc); 1652 } 1653 } 1654} 1655 1656void dc_trigger_sync(struct dc *dc, struct dc_state *context) 1657{ 1658 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { 1659 enable_timing_multisync(dc, context); 1660 program_timing_sync(dc, context); 1661 } 1662} 1663 1664static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context) 1665{ 1666 int i; 1667 unsigned int stream_mask = 0; 1668 1669 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1670 if (context->res_ctx.pipe_ctx[i].stream) 1671 stream_mask |= 1 << i; 1672 } 1673 1674 return stream_mask; 1675} 1676 1677void dc_z10_restore(const struct dc *dc) 1678{ 1679 if (dc->hwss.z10_restore) 1680 dc->hwss.z10_restore(dc); 1681} 1682 1683void dc_z10_save_init(struct dc *dc) 1684{ 1685 if (dc->hwss.z10_save_init) 1686 dc->hwss.z10_save_init(dc); 1687} 1688 1689/* 1690 * Applies given context to HW and copy it into current context. 1691 * It's up to the user to release the src context afterwards. 1692 */ 1693static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context) 1694{ 1695 struct dc_bios *dcb = dc->ctx->dc_bios; 1696 enum dc_status result = DC_ERROR_UNEXPECTED; 1697 struct pipe_ctx *pipe; 1698 int i, k, l; 1699 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; 1700 struct dc_state *old_state; 1701 1702 dc_z10_restore(dc); 1703 dc_allow_idle_optimizations(dc, false); 1704 1705 for (i = 0; i < context->stream_count; i++) 1706 dc_streams[i] = context->streams[i]; 1707 1708 if (!dcb->funcs->is_accelerated_mode(dcb)) { 1709 disable_vbios_mode_if_required(dc, context); 1710 dc->hwss.enable_accelerated_mode(dc, context); 1711 } 1712 1713 if (context->stream_count > get_seamless_boot_stream_count(context) || 1714 context->stream_count == 0) 1715 dc->hwss.prepare_bandwidth(dc, context); 1716 1717 disable_dangling_plane(dc, context); 1718 /* re-program planes for existing stream, in case we need to 1719 * free up plane resource for later use 1720 */ 1721 if (dc->hwss.apply_ctx_for_surface) { 1722 for (i = 0; i < context->stream_count; i++) { 1723 if (context->streams[i]->mode_changed) 1724 continue; 1725 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1726 dc->hwss.apply_ctx_for_surface( 1727 dc, context->streams[i], 1728 context->stream_status[i].plane_count, 1729 context); /* use new pipe config in new context */ 1730 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1731 dc->hwss.post_unlock_program_front_end(dc, context); 1732 } 1733 } 1734 1735 /* Program hardware */ 1736 for (i = 0; i < dc->res_pool->pipe_count; i++) { 1737 pipe = &context->res_ctx.pipe_ctx[i]; 1738 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); 1739 } 1740 1741 result = dc->hwss.apply_ctx_to_hw(dc, context); 1742 1743 if (result != DC_OK) 1744 return result; 1745 1746 dc_trigger_sync(dc, context); 1747 1748 /* Program all planes within new context*/ 1749 if (dc->hwss.program_front_end_for_ctx) { 1750 dc->hwss.interdependent_update_lock(dc, context, true); 1751 dc->hwss.program_front_end_for_ctx(dc, context); 1752 dc->hwss.interdependent_update_lock(dc, context, false); 1753 dc->hwss.post_unlock_program_front_end(dc, context); 1754 } 1755 for (i = 0; i < context->stream_count; i++) { 1756 const struct dc_link *link = context->streams[i]->link; 1757 1758 if (!context->streams[i]->mode_changed) 1759 continue; 1760 1761 if (dc->hwss.apply_ctx_for_surface) { 1762 apply_ctx_interdependent_lock(dc, context, context->streams[i], true); 1763 dc->hwss.apply_ctx_for_surface( 1764 dc, context->streams[i], 1765 context->stream_status[i].plane_count, 1766 context); 1767 apply_ctx_interdependent_lock(dc, context, context->streams[i], false); 1768 dc->hwss.post_unlock_program_front_end(dc, context); 1769 } 1770 1771 /* 1772 * enable stereo 1773 * TODO rework dc_enable_stereo call to work with validation sets? 1774 */ 1775 for (k = 0; k < MAX_PIPES; k++) { 1776 pipe = &context->res_ctx.pipe_ctx[k]; 1777 1778 for (l = 0 ; pipe && l < context->stream_count; l++) { 1779 if (context->streams[l] && 1780 context->streams[l] == pipe->stream && 1781 dc->hwss.setup_stereo) 1782 dc->hwss.setup_stereo(pipe, dc); 1783 } 1784 } 1785 1786 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", 1787 context->streams[i]->timing.h_addressable, 1788 context->streams[i]->timing.v_addressable, 1789 context->streams[i]->timing.h_total, 1790 context->streams[i]->timing.v_total, 1791 context->streams[i]->timing.pix_clk_100hz / 10); 1792 } 1793 1794 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1795 1796 if (context->stream_count > get_seamless_boot_stream_count(context) || 1797 context->stream_count == 0) { 1798 /* Must wait for no flips to be pending before doing optimize bw */ 1799 wait_for_no_pipes_pending(dc, context); 1800 /* pplib is notified if disp_num changed */ 1801 dc->hwss.optimize_bandwidth(dc, context); 1802 } 1803 1804 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1805 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1806 else 1807 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1808 1809 context->stream_mask = get_stream_mask(dc, context); 1810 1811 if (context->stream_mask != dc->current_state->stream_mask) 1812 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask); 1813 1814 for (i = 0; i < context->stream_count; i++) 1815 context->streams[i]->mode_changed = false; 1816 1817 old_state = dc->current_state; 1818 dc->current_state = context; 1819 1820 dc_release_state(old_state); 1821 1822 dc_retain_state(dc->current_state); 1823 1824 return result; 1825} 1826 1827bool dc_commit_state(struct dc *dc, struct dc_state *context) 1828{ 1829 enum dc_status result = DC_ERROR_UNEXPECTED; 1830 int i; 1831 1832 if (!context_changed(dc, context)) 1833 return DC_OK; 1834 1835 DC_LOG_DC("%s: %d streams\n", 1836 __func__, context->stream_count); 1837 1838 for (i = 0; i < context->stream_count; i++) { 1839 struct dc_stream_state *stream = context->streams[i]; 1840 1841 dc_stream_log(dc, stream); 1842 } 1843 1844 /* 1845 * Previous validation was perfomred with fast_validation = true and 1846 * the full DML state required for hardware programming was skipped. 1847 * 1848 * Re-validate here to calculate these parameters / watermarks. 1849 */ 1850 result = dc_validate_global_state(dc, context, false); 1851 if (result != DC_OK) { 1852 DC_LOG_ERROR("DC commit global validation failure: %s (%d)", 1853 dc_status_to_str(result), result); 1854 return result; 1855 } 1856 1857 result = dc_commit_state_no_check(dc, context); 1858 1859 return (result == DC_OK); 1860} 1861 1862bool dc_acquire_release_mpc_3dlut( 1863 struct dc *dc, bool acquire, 1864 struct dc_stream_state *stream, 1865 struct dc_3dlut **lut, 1866 struct dc_transfer_func **shaper) 1867{ 1868 int pipe_idx; 1869 bool ret = false; 1870 bool found_pipe_idx = false; 1871 const struct resource_pool *pool = dc->res_pool; 1872 struct resource_context *res_ctx = &dc->current_state->res_ctx; 1873 int mpcc_id = 0; 1874 1875 if (pool && res_ctx) { 1876 if (acquire) { 1877 /*find pipe idx for the given stream*/ 1878 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) { 1879 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) { 1880 found_pipe_idx = true; 1881 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst; 1882 break; 1883 } 1884 } 1885 } else 1886 found_pipe_idx = true;/*for release pipe_idx is not required*/ 1887 1888 if (found_pipe_idx) { 1889 if (acquire && pool->funcs->acquire_post_bldn_3dlut) 1890 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper); 1891 else if (!acquire && pool->funcs->release_post_bldn_3dlut) 1892 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper); 1893 } 1894 } 1895 return ret; 1896} 1897 1898static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) 1899{ 1900 int i; 1901 struct pipe_ctx *pipe; 1902 1903 for (i = 0; i < MAX_PIPES; i++) { 1904 pipe = &context->res_ctx.pipe_ctx[i]; 1905 1906 if (!pipe->plane_state) 1907 continue; 1908 1909 /* Must set to false to start with, due to OR in update function */ 1910 pipe->plane_state->status.is_flip_pending = false; 1911 dc->hwss.update_pending_status(pipe); 1912 if (pipe->plane_state->status.is_flip_pending) 1913 return true; 1914 } 1915 return false; 1916} 1917 1918/* Perform updates here which need to be deferred until next vupdate 1919 * 1920 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered 1921 * but forcing lut memory to shutdown state is immediate. This causes 1922 * single frame corruption as lut gets disabled mid-frame unless shutdown 1923 * is deferred until after entering bypass. 1924 */ 1925static void process_deferred_updates(struct dc *dc) 1926{ 1927 int i = 0; 1928 1929 if (dc->debug.enable_mem_low_power.bits.cm) { 1930 ASSERT(dc->dcn_ip->max_num_dpp); 1931 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++) 1932 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update) 1933 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]); 1934 } 1935} 1936 1937void dc_post_update_surfaces_to_stream(struct dc *dc) 1938{ 1939 int i; 1940 struct dc_state *context = dc->current_state; 1941 1942 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0) 1943 return; 1944 1945 post_surface_trace(dc); 1946 1947 if (dc->ctx->dce_version >= DCE_VERSION_MAX) 1948 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk); 1949 else 1950 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 1951 1952 if (is_flip_pending_in_pipes(dc, context)) 1953 return; 1954 1955 for (i = 0; i < dc->res_pool->pipe_count; i++) 1956 if (context->res_ctx.pipe_ctx[i].stream == NULL || 1957 context->res_ctx.pipe_ctx[i].plane_state == NULL) { 1958 context->res_ctx.pipe_ctx[i].pipe_idx = i; 1959 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]); 1960 } 1961 1962 process_deferred_updates(dc); 1963 1964 dc->hwss.optimize_bandwidth(dc, context); 1965 1966 dc->optimized_required = false; 1967 dc->wm_optimized_required = false; 1968} 1969 1970static void init_state(struct dc *dc, struct dc_state *context) 1971{ 1972 /* Each context must have their own instance of VBA and in order to 1973 * initialize and obtain IP and SOC the base DML instance from DC is 1974 * initially copied into every context 1975 */ 1976 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); 1977} 1978 1979struct dc_state *dc_create_state(struct dc *dc) 1980{ 1981 struct dc_state *context = kvzalloc(sizeof(struct dc_state), 1982 GFP_KERNEL); 1983 1984 if (!context) 1985 return NULL; 1986 1987 init_state(dc, context); 1988 1989 kref_init(&context->refcount); 1990 1991 return context; 1992} 1993 1994struct dc_state *dc_copy_state(struct dc_state *src_ctx) 1995{ 1996 int i, j; 1997 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL); 1998 1999 if (!new_ctx) 2000 return NULL; 2001 memcpy(new_ctx, src_ctx, sizeof(struct dc_state)); 2002 2003 for (i = 0; i < MAX_PIPES; i++) { 2004 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i]; 2005 2006 if (cur_pipe->top_pipe) 2007 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx]; 2008 2009 if (cur_pipe->bottom_pipe) 2010 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx]; 2011 2012 if (cur_pipe->prev_odm_pipe) 2013 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx]; 2014 2015 if (cur_pipe->next_odm_pipe) 2016 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx]; 2017 2018 } 2019 2020 for (i = 0; i < new_ctx->stream_count; i++) { 2021 dc_stream_retain(new_ctx->streams[i]); 2022 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++) 2023 dc_plane_state_retain( 2024 new_ctx->stream_status[i].plane_states[j]); 2025 } 2026 2027 kref_init(&new_ctx->refcount); 2028 2029 return new_ctx; 2030} 2031 2032void dc_retain_state(struct dc_state *context) 2033{ 2034 kref_get(&context->refcount); 2035} 2036 2037static void dc_state_free(struct kref *kref) 2038{ 2039 struct dc_state *context = container_of(kref, struct dc_state, refcount); 2040 dc_resource_state_destruct(context); 2041 kvfree(context); 2042} 2043 2044void dc_release_state(struct dc_state *context) 2045{ 2046 kref_put(&context->refcount, dc_state_free); 2047} 2048 2049bool dc_set_generic_gpio_for_stereo(bool enable, 2050 struct gpio_service *gpio_service) 2051{ 2052 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; 2053 struct gpio_pin_info pin_info; 2054 struct gpio *generic; 2055 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), 2056 GFP_KERNEL); 2057 2058 if (!config) 2059 return false; 2060 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); 2061 2062 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { 2063 kfree(config); 2064 return false; 2065 } else { 2066 generic = dal_gpio_service_create_generic_mux( 2067 gpio_service, 2068 pin_info.offset, 2069 pin_info.mask); 2070 } 2071 2072 if (!generic) { 2073 kfree(config); 2074 return false; 2075 } 2076 2077 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); 2078 2079 config->enable_output_from_mux = enable; 2080 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; 2081 2082 if (gpio_result == GPIO_RESULT_OK) 2083 gpio_result = dal_mux_setup_config(generic, config); 2084 2085 if (gpio_result == GPIO_RESULT_OK) { 2086 dal_gpio_close(generic); 2087 dal_gpio_destroy_generic_mux(&generic); 2088 kfree(config); 2089 return true; 2090 } else { 2091 dal_gpio_close(generic); 2092 dal_gpio_destroy_generic_mux(&generic); 2093 kfree(config); 2094 return false; 2095 } 2096} 2097 2098static bool is_surface_in_context( 2099 const struct dc_state *context, 2100 const struct dc_plane_state *plane_state) 2101{ 2102 int j; 2103 2104 for (j = 0; j < MAX_PIPES; j++) { 2105 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2106 2107 if (plane_state == pipe_ctx->plane_state) { 2108 return true; 2109 } 2110 } 2111 2112 return false; 2113} 2114 2115static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) 2116{ 2117 union surface_update_flags *update_flags = &u->surface->update_flags; 2118 enum surface_update_type update_type = UPDATE_TYPE_FAST; 2119 2120 if (!u->plane_info) 2121 return UPDATE_TYPE_FAST; 2122 2123 if (u->plane_info->color_space != u->surface->color_space) { 2124 update_flags->bits.color_space_change = 1; 2125 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2126 } 2127 2128 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) { 2129 update_flags->bits.horizontal_mirror_change = 1; 2130 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2131 } 2132 2133 if (u->plane_info->rotation != u->surface->rotation) { 2134 update_flags->bits.rotation_change = 1; 2135 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2136 } 2137 2138 if (u->plane_info->format != u->surface->format) { 2139 update_flags->bits.pixel_format_change = 1; 2140 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2141 } 2142 2143 if (u->plane_info->stereo_format != u->surface->stereo_format) { 2144 update_flags->bits.stereo_format_change = 1; 2145 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2146 } 2147 2148 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) { 2149 update_flags->bits.per_pixel_alpha_change = 1; 2150 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2151 } 2152 2153 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) { 2154 update_flags->bits.global_alpha_change = 1; 2155 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2156 } 2157 2158 if (u->plane_info->dcc.enable != u->surface->dcc.enable 2159 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk 2160 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { 2161 /* During DCC on/off, stutter period is calculated before 2162 * DCC has fully transitioned. This results in incorrect 2163 * stutter period calculation. Triggering a full update will 2164 * recalculate stutter period. 2165 */ 2166 update_flags->bits.dcc_change = 1; 2167 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2168 } 2169 2170 if (resource_pixel_format_to_bpp(u->plane_info->format) != 2171 resource_pixel_format_to_bpp(u->surface->format)) { 2172 /* different bytes per element will require full bandwidth 2173 * and DML calculation 2174 */ 2175 update_flags->bits.bpp_change = 1; 2176 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2177 } 2178 2179 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch 2180 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { 2181 update_flags->bits.plane_size_change = 1; 2182 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2183 } 2184 2185 2186 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, 2187 sizeof(union dc_tiling_info)) != 0) { 2188 update_flags->bits.swizzle_change = 1; 2189 elevate_update_type(&update_type, UPDATE_TYPE_MED); 2190 2191 /* todo: below are HW dependent, we should add a hook to 2192 * DCE/N resource and validated there. 2193 */ 2194 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) { 2195 /* swizzled mode requires RQ to be setup properly, 2196 * thus need to run DML to calculate RQ settings 2197 */ 2198 update_flags->bits.bandwidth_change = 1; 2199 elevate_update_type(&update_type, UPDATE_TYPE_FULL); 2200 } 2201 } 2202 2203 /* This should be UPDATE_TYPE_FAST if nothing has changed. */ 2204 return update_type; 2205} 2206 2207static enum surface_update_type get_scaling_info_update_type( 2208 const struct dc_surface_update *u) 2209{ 2210 union surface_update_flags *update_flags = &u->surface->update_flags; 2211 2212 if (!u->scaling_info) 2213 return UPDATE_TYPE_FAST; 2214 2215 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width 2216 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height 2217 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width 2218 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height 2219 || u->scaling_info->scaling_quality.integer_scaling != 2220 u->surface->scaling_quality.integer_scaling 2221 ) { 2222 update_flags->bits.scaling_change = 1; 2223 2224 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width 2225 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height) 2226 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width 2227 || u->scaling_info->dst_rect.height < u->surface->src_rect.height)) 2228 /* Making dst rect smaller requires a bandwidth change */ 2229 update_flags->bits.bandwidth_change = 1; 2230 } 2231 2232 if (u->scaling_info->src_rect.width != u->surface->src_rect.width 2233 || u->scaling_info->src_rect.height != u->surface->src_rect.height) { 2234 2235 update_flags->bits.scaling_change = 1; 2236 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 2237 || u->scaling_info->src_rect.height > u->surface->src_rect.height) 2238 /* Making src rect bigger requires a bandwidth change */ 2239 update_flags->bits.clock_change = 1; 2240 } 2241 2242 if (u->scaling_info->src_rect.x != u->surface->src_rect.x 2243 || u->scaling_info->src_rect.y != u->surface->src_rect.y 2244 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x 2245 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y 2246 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x 2247 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) 2248 update_flags->bits.position_change = 1; 2249 2250 if (update_flags->bits.clock_change 2251 || update_flags->bits.bandwidth_change 2252 || update_flags->bits.scaling_change) 2253 return UPDATE_TYPE_FULL; 2254 2255 if (update_flags->bits.position_change) 2256 return UPDATE_TYPE_MED; 2257 2258 return UPDATE_TYPE_FAST; 2259} 2260 2261static enum surface_update_type det_surface_update(const struct dc *dc, 2262 const struct dc_surface_update *u) 2263{ 2264 const struct dc_state *context = dc->current_state; 2265 enum surface_update_type type; 2266 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2267 union surface_update_flags *update_flags = &u->surface->update_flags; 2268 2269 if (u->flip_addr) 2270 update_flags->bits.addr_update = 1; 2271 2272 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { 2273 update_flags->raw = 0xFFFFFFFF; 2274 return UPDATE_TYPE_FULL; 2275 } 2276 2277 update_flags->raw = 0; // Reset all flags 2278 2279 type = get_plane_info_update_type(u); 2280 elevate_update_type(&overall_type, type); 2281 2282 type = get_scaling_info_update_type(u); 2283 elevate_update_type(&overall_type, type); 2284 2285 if (u->flip_addr) 2286 update_flags->bits.addr_update = 1; 2287 2288 if (u->in_transfer_func) 2289 update_flags->bits.in_transfer_func_change = 1; 2290 2291 if (u->input_csc_color_matrix) 2292 update_flags->bits.input_csc_change = 1; 2293 2294 if (u->coeff_reduction_factor) 2295 update_flags->bits.coeff_reduction_change = 1; 2296 2297 if (u->gamut_remap_matrix) 2298 update_flags->bits.gamut_remap_change = 1; 2299 2300 if (u->gamma) { 2301 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; 2302 2303 if (u->plane_info) 2304 format = u->plane_info->format; 2305 else if (u->surface) 2306 format = u->surface->format; 2307 2308 if (dce_use_lut(format)) 2309 update_flags->bits.gamma_change = 1; 2310 } 2311 2312 if (u->lut3d_func || u->func_shaper) 2313 update_flags->bits.lut_3d = 1; 2314 2315 if (u->hdr_mult.value) 2316 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2317 update_flags->bits.hdr_mult = 1; 2318 elevate_update_type(&overall_type, UPDATE_TYPE_MED); 2319 } 2320 2321 if (update_flags->bits.in_transfer_func_change) { 2322 type = UPDATE_TYPE_MED; 2323 elevate_update_type(&overall_type, type); 2324 } 2325 2326 if (update_flags->bits.input_csc_change 2327 || update_flags->bits.coeff_reduction_change 2328 || update_flags->bits.lut_3d 2329 || update_flags->bits.gamma_change 2330 || update_flags->bits.gamut_remap_change) { 2331 type = UPDATE_TYPE_FULL; 2332 elevate_update_type(&overall_type, type); 2333 } 2334 2335 return overall_type; 2336} 2337 2338static enum surface_update_type check_update_surfaces_for_stream( 2339 struct dc *dc, 2340 struct dc_surface_update *updates, 2341 int surface_count, 2342 struct dc_stream_update *stream_update, 2343 const struct dc_stream_status *stream_status) 2344{ 2345 int i; 2346 enum surface_update_type overall_type = UPDATE_TYPE_FAST; 2347 2348 if (dc->idle_optimizations_allowed) 2349 overall_type = UPDATE_TYPE_FULL; 2350 2351 if (stream_status == NULL || stream_status->plane_count != surface_count) 2352 overall_type = UPDATE_TYPE_FULL; 2353 2354 if (stream_update && stream_update->pending_test_pattern) { 2355 overall_type = UPDATE_TYPE_FULL; 2356 } 2357 2358 /* some stream updates require passive update */ 2359 if (stream_update) { 2360 union stream_update_flags *su_flags = &stream_update->stream->update_flags; 2361 2362 if ((stream_update->src.height != 0 && stream_update->src.width != 0) || 2363 (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 2364 stream_update->integer_scaling_update) 2365 su_flags->bits.scaling = 1; 2366 2367 if (stream_update->out_transfer_func) 2368 su_flags->bits.out_tf = 1; 2369 2370 if (stream_update->abm_level) 2371 su_flags->bits.abm_level = 1; 2372 2373 if (stream_update->dpms_off) 2374 su_flags->bits.dpms_off = 1; 2375 2376 if (stream_update->gamut_remap) 2377 su_flags->bits.gamut_remap = 1; 2378 2379 if (stream_update->wb_update) 2380 su_flags->bits.wb_update = 1; 2381 2382 if (stream_update->dsc_config) 2383 su_flags->bits.dsc_changed = 1; 2384 2385 if (stream_update->mst_bw_update) 2386 su_flags->bits.mst_bw = 1; 2387 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc)) 2388 su_flags->bits.crtc_timing_adjust = 1; 2389 2390 if (su_flags->raw != 0) 2391 overall_type = UPDATE_TYPE_FULL; 2392 2393 if (stream_update->output_csc_transform || stream_update->output_color_space) 2394 su_flags->bits.out_csc = 1; 2395 } 2396 2397 for (i = 0 ; i < surface_count; i++) { 2398 enum surface_update_type type = 2399 det_surface_update(dc, &updates[i]); 2400 2401 elevate_update_type(&overall_type, type); 2402 } 2403 2404 return overall_type; 2405} 2406 2407/* 2408 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full) 2409 * 2410 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 2411 */ 2412enum surface_update_type dc_check_update_surfaces_for_stream( 2413 struct dc *dc, 2414 struct dc_surface_update *updates, 2415 int surface_count, 2416 struct dc_stream_update *stream_update, 2417 const struct dc_stream_status *stream_status) 2418{ 2419 int i; 2420 enum surface_update_type type; 2421 2422 if (stream_update) 2423 stream_update->stream->update_flags.raw = 0; 2424 for (i = 0; i < surface_count; i++) 2425 updates[i].surface->update_flags.raw = 0; 2426 2427 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); 2428 if (type == UPDATE_TYPE_FULL) { 2429 if (stream_update) { 2430 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 2431 stream_update->stream->update_flags.raw = 0xFFFFFFFF; 2432 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed; 2433 } 2434 for (i = 0; i < surface_count; i++) 2435 updates[i].surface->update_flags.raw = 0xFFFFFFFF; 2436 } 2437 2438 if (type == UPDATE_TYPE_FAST) { 2439 // If there's an available clock comparator, we use that. 2440 if (dc->clk_mgr->funcs->are_clock_states_equal) { 2441 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) 2442 dc->optimized_required = true; 2443 // Else we fallback to mem compare. 2444 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { 2445 dc->optimized_required = true; 2446 } 2447 2448 dc->optimized_required |= dc->wm_optimized_required; 2449 } 2450 2451 return type; 2452} 2453 2454static struct dc_stream_status *stream_get_status( 2455 struct dc_state *ctx, 2456 struct dc_stream_state *stream) 2457{ 2458 uint8_t i; 2459 2460 for (i = 0; i < ctx->stream_count; i++) { 2461 if (stream == ctx->streams[i]) { 2462 return &ctx->stream_status[i]; 2463 } 2464 } 2465 2466 return NULL; 2467} 2468 2469static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; 2470 2471static void copy_surface_update_to_plane( 2472 struct dc_plane_state *surface, 2473 struct dc_surface_update *srf_update) 2474{ 2475 if (srf_update->flip_addr) { 2476 surface->address = srf_update->flip_addr->address; 2477 surface->flip_immediate = 2478 srf_update->flip_addr->flip_immediate; 2479 surface->time.time_elapsed_in_us[surface->time.index] = 2480 srf_update->flip_addr->flip_timestamp_in_us - 2481 surface->time.prev_update_time_in_us; 2482 surface->time.prev_update_time_in_us = 2483 srf_update->flip_addr->flip_timestamp_in_us; 2484 surface->time.index++; 2485 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) 2486 surface->time.index = 0; 2487 2488 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips; 2489 } 2490 2491 if (srf_update->scaling_info) { 2492 surface->scaling_quality = 2493 srf_update->scaling_info->scaling_quality; 2494 surface->dst_rect = 2495 srf_update->scaling_info->dst_rect; 2496 surface->src_rect = 2497 srf_update->scaling_info->src_rect; 2498 surface->clip_rect = 2499 srf_update->scaling_info->clip_rect; 2500 } 2501 2502 if (srf_update->plane_info) { 2503 surface->color_space = 2504 srf_update->plane_info->color_space; 2505 surface->format = 2506 srf_update->plane_info->format; 2507 surface->plane_size = 2508 srf_update->plane_info->plane_size; 2509 surface->rotation = 2510 srf_update->plane_info->rotation; 2511 surface->horizontal_mirror = 2512 srf_update->plane_info->horizontal_mirror; 2513 surface->stereo_format = 2514 srf_update->plane_info->stereo_format; 2515 surface->tiling_info = 2516 srf_update->plane_info->tiling_info; 2517 surface->visible = 2518 srf_update->plane_info->visible; 2519 surface->per_pixel_alpha = 2520 srf_update->plane_info->per_pixel_alpha; 2521 surface->global_alpha = 2522 srf_update->plane_info->global_alpha; 2523 surface->global_alpha_value = 2524 srf_update->plane_info->global_alpha_value; 2525 surface->dcc = 2526 srf_update->plane_info->dcc; 2527 surface->layer_index = 2528 srf_update->plane_info->layer_index; 2529 } 2530 2531 if (srf_update->gamma && 2532 (surface->gamma_correction != 2533 srf_update->gamma)) { 2534 memcpy(&surface->gamma_correction->entries, 2535 &srf_update->gamma->entries, 2536 sizeof(struct dc_gamma_entries)); 2537 surface->gamma_correction->is_identity = 2538 srf_update->gamma->is_identity; 2539 surface->gamma_correction->num_entries = 2540 srf_update->gamma->num_entries; 2541 surface->gamma_correction->type = 2542 srf_update->gamma->type; 2543 } 2544 2545 if (srf_update->in_transfer_func && 2546 (surface->in_transfer_func != 2547 srf_update->in_transfer_func)) { 2548 surface->in_transfer_func->sdr_ref_white_level = 2549 srf_update->in_transfer_func->sdr_ref_white_level; 2550 surface->in_transfer_func->tf = 2551 srf_update->in_transfer_func->tf; 2552 surface->in_transfer_func->type = 2553 srf_update->in_transfer_func->type; 2554 memcpy(&surface->in_transfer_func->tf_pts, 2555 &srf_update->in_transfer_func->tf_pts, 2556 sizeof(struct dc_transfer_func_distributed_points)); 2557 } 2558 2559 if (srf_update->func_shaper && 2560 (surface->in_shaper_func != 2561 srf_update->func_shaper)) 2562 memcpy(surface->in_shaper_func, srf_update->func_shaper, 2563 sizeof(*surface->in_shaper_func)); 2564 2565 if (srf_update->lut3d_func && 2566 (surface->lut3d_func != 2567 srf_update->lut3d_func)) 2568 memcpy(surface->lut3d_func, srf_update->lut3d_func, 2569 sizeof(*surface->lut3d_func)); 2570 2571 if (srf_update->hdr_mult.value) 2572 surface->hdr_mult = 2573 srf_update->hdr_mult; 2574 2575 if (srf_update->blend_tf && 2576 (surface->blend_tf != 2577 srf_update->blend_tf)) 2578 memcpy(surface->blend_tf, srf_update->blend_tf, 2579 sizeof(*surface->blend_tf)); 2580 2581 if (srf_update->input_csc_color_matrix) 2582 surface->input_csc_color_matrix = 2583 *srf_update->input_csc_color_matrix; 2584 2585 if (srf_update->coeff_reduction_factor) 2586 surface->coeff_reduction_factor = 2587 *srf_update->coeff_reduction_factor; 2588 2589 if (srf_update->gamut_remap_matrix) 2590 surface->gamut_remap_matrix = 2591 *srf_update->gamut_remap_matrix; 2592} 2593 2594static void copy_stream_update_to_stream(struct dc *dc, 2595 struct dc_state *context, 2596 struct dc_stream_state *stream, 2597 struct dc_stream_update *update) 2598{ 2599 struct dc_context *dc_ctx = dc->ctx; 2600 2601 if (update == NULL || stream == NULL) 2602 return; 2603 2604 if (update->src.height && update->src.width) 2605 stream->src = update->src; 2606 2607 if (update->dst.height && update->dst.width) 2608 stream->dst = update->dst; 2609 2610 if (update->out_transfer_func && 2611 stream->out_transfer_func != update->out_transfer_func) { 2612 stream->out_transfer_func->sdr_ref_white_level = 2613 update->out_transfer_func->sdr_ref_white_level; 2614 stream->out_transfer_func->tf = update->out_transfer_func->tf; 2615 stream->out_transfer_func->type = 2616 update->out_transfer_func->type; 2617 memcpy(&stream->out_transfer_func->tf_pts, 2618 &update->out_transfer_func->tf_pts, 2619 sizeof(struct dc_transfer_func_distributed_points)); 2620 } 2621 2622 if (update->hdr_static_metadata) 2623 stream->hdr_static_metadata = *update->hdr_static_metadata; 2624 2625 if (update->abm_level) 2626 stream->abm_level = *update->abm_level; 2627 2628 if (update->periodic_interrupt0) 2629 stream->periodic_interrupt0 = *update->periodic_interrupt0; 2630 2631 if (update->periodic_interrupt1) 2632 stream->periodic_interrupt1 = *update->periodic_interrupt1; 2633 2634 if (update->gamut_remap) 2635 stream->gamut_remap_matrix = *update->gamut_remap; 2636 2637 /* Note: this being updated after mode set is currently not a use case 2638 * however if it arises OCSC would need to be reprogrammed at the 2639 * minimum 2640 */ 2641 if (update->output_color_space) 2642 stream->output_color_space = *update->output_color_space; 2643 2644 if (update->output_csc_transform) 2645 stream->csc_color_matrix = *update->output_csc_transform; 2646 2647 if (update->vrr_infopacket) 2648 stream->vrr_infopacket = *update->vrr_infopacket; 2649 2650 if (update->crtc_timing_adjust) 2651 stream->adjust = *update->crtc_timing_adjust; 2652 2653 if (update->dpms_off) 2654 stream->dpms_off = *update->dpms_off; 2655 2656 if (update->vsc_infopacket) 2657 stream->vsc_infopacket = *update->vsc_infopacket; 2658 2659 if (update->vsp_infopacket) 2660 stream->vsp_infopacket = *update->vsp_infopacket; 2661 2662 if (update->dither_option) 2663 stream->dither_option = *update->dither_option; 2664 2665 if (update->pending_test_pattern) 2666 stream->test_pattern = *update->pending_test_pattern; 2667 /* update current stream with writeback info */ 2668 if (update->wb_update) { 2669 int i; 2670 2671 stream->num_wb_info = update->wb_update->num_wb_info; 2672 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES); 2673 for (i = 0; i < stream->num_wb_info; i++) 2674 stream->writeback_info[i] = 2675 update->wb_update->writeback_info[i]; 2676 } 2677 if (update->dsc_config) { 2678 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg; 2679 uint32_t old_dsc_enabled = stream->timing.flags.DSC; 2680 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 && 2681 update->dsc_config->num_slices_v != 0); 2682 2683 /* Use temporarry context for validating new DSC config */ 2684 struct dc_state *dsc_validate_context = dc_create_state(dc); 2685 2686 if (dsc_validate_context) { 2687 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context); 2688 2689 stream->timing.dsc_cfg = *update->dsc_config; 2690 stream->timing.flags.DSC = enable_dsc; 2691 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) { 2692 stream->timing.dsc_cfg = old_dsc_cfg; 2693 stream->timing.flags.DSC = old_dsc_enabled; 2694 update->dsc_config = NULL; 2695 } 2696 2697 dc_release_state(dsc_validate_context); 2698 } else { 2699 DC_ERROR("Failed to allocate new validate context for DSC change\n"); 2700 update->dsc_config = NULL; 2701 } 2702 } 2703} 2704 2705static void commit_planes_do_stream_update(struct dc *dc, 2706 struct dc_stream_state *stream, 2707 struct dc_stream_update *stream_update, 2708 enum surface_update_type update_type, 2709 struct dc_state *context) 2710{ 2711 int j; 2712 2713 // Stream updates 2714 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2715 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2716 2717 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) { 2718 2719 if (stream_update->periodic_interrupt0 && 2720 dc->hwss.setup_periodic_interrupt) 2721 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0); 2722 2723 if (stream_update->periodic_interrupt1 && 2724 dc->hwss.setup_periodic_interrupt) 2725 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1); 2726 2727 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || 2728 stream_update->vrr_infopacket || 2729 stream_update->vsc_infopacket || 2730 stream_update->vsp_infopacket) { 2731 resource_build_info_frame(pipe_ctx); 2732 dc->hwss.update_info_frame(pipe_ctx); 2733 2734 if (dc_is_dp_signal(pipe_ctx->stream->signal)) 2735 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); 2736 } 2737 2738 if (stream_update->hdr_static_metadata && 2739 stream->use_dynamic_meta && 2740 dc->hwss.set_dmdata_attributes && 2741 pipe_ctx->stream->dmdata_address.quad_part != 0) 2742 dc->hwss.set_dmdata_attributes(pipe_ctx); 2743 2744 if (stream_update->gamut_remap) 2745 dc_stream_set_gamut_remap(dc, stream); 2746 2747 if (stream_update->output_csc_transform) 2748 dc_stream_program_csc_matrix(dc, stream); 2749 2750 if (stream_update->dither_option) { 2751 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe; 2752 resource_build_bit_depth_reduction_params(pipe_ctx->stream, 2753 &pipe_ctx->stream->bit_depth_params); 2754 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp, 2755 &stream->bit_depth_params, 2756 &stream->clamping); 2757 while (odm_pipe) { 2758 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp, 2759 &stream->bit_depth_params, 2760 &stream->clamping); 2761 odm_pipe = odm_pipe->next_odm_pipe; 2762 } 2763 } 2764 2765 2766 /* Full fe update*/ 2767 if (update_type == UPDATE_TYPE_FAST) 2768 continue; 2769 2770 if (stream_update->dsc_config) 2771 dp_update_dsc_config(pipe_ctx); 2772 2773 if (stream_update->mst_bw_update) { 2774 if (stream_update->mst_bw_update->is_increase) 2775 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 2776 else 2777 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); 2778 } 2779 2780 if (stream_update->pending_test_pattern) { 2781 dc_link_dp_set_test_pattern(stream->link, 2782 stream->test_pattern.type, 2783 stream->test_pattern.color_space, 2784 stream->test_pattern.p_link_settings, 2785 stream->test_pattern.p_custom_pattern, 2786 stream->test_pattern.cust_pattern_size); 2787 } 2788 2789 if (stream_update->dpms_off) { 2790 if (*stream_update->dpms_off) { 2791 core_link_disable_stream(pipe_ctx); 2792 /* for dpms, keep acquired resources*/ 2793 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only) 2794 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 2795 2796 dc->optimized_required = true; 2797 2798 } else { 2799 if (get_seamless_boot_stream_count(context) == 0) 2800 dc->hwss.prepare_bandwidth(dc, dc->current_state); 2801 2802 core_link_enable_stream(dc->current_state, pipe_ctx); 2803 } 2804 } 2805 2806 if (stream_update->abm_level && pipe_ctx->stream_res.abm) { 2807 bool should_program_abm = true; 2808 2809 // if otg funcs defined check if blanked before programming 2810 if (pipe_ctx->stream_res.tg->funcs->is_blanked) 2811 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) 2812 should_program_abm = false; 2813 2814 if (should_program_abm) { 2815 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { 2816 dc->hwss.set_abm_immediate_disable(pipe_ctx); 2817 } else { 2818 pipe_ctx->stream_res.abm->funcs->set_abm_level( 2819 pipe_ctx->stream_res.abm, stream->abm_level); 2820 } 2821 } 2822 } 2823 } 2824 } 2825} 2826 2827static void commit_planes_for_stream(struct dc *dc, 2828 struct dc_surface_update *srf_updates, 2829 int surface_count, 2830 struct dc_stream_state *stream, 2831 struct dc_stream_update *stream_update, 2832 enum surface_update_type update_type, 2833 struct dc_state *context) 2834{ 2835 int i, j; 2836 struct pipe_ctx *top_pipe_to_program = NULL; 2837 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 2838 2839 dc_z10_restore(dc); 2840 2841 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { 2842 /* Optimize seamless boot flag keeps clocks and watermarks high until 2843 * first flip. After first flip, optimization is required to lower 2844 * bandwidth. Important to note that it is expected UEFI will 2845 * only light up a single display on POST, therefore we only expect 2846 * one stream with seamless boot flag set. 2847 */ 2848 if (stream->apply_seamless_boot_optimization) { 2849 stream->apply_seamless_boot_optimization = false; 2850 2851 if (get_seamless_boot_stream_count(context) == 0) 2852 dc->optimized_required = true; 2853 } 2854 } 2855 2856 if (update_type == UPDATE_TYPE_FULL) { 2857 dc_allow_idle_optimizations(dc, false); 2858 2859 if (get_seamless_boot_stream_count(context) == 0) 2860 dc->hwss.prepare_bandwidth(dc, context); 2861 2862 context_clock_trace(dc, context); 2863 } 2864 2865 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2866 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2867 2868 if (!pipe_ctx->top_pipe && 2869 !pipe_ctx->prev_odm_pipe && 2870 pipe_ctx->stream && 2871 pipe_ctx->stream == stream) { 2872 top_pipe_to_program = pipe_ctx; 2873 } 2874 } 2875 2876 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) { 2877 struct pipe_ctx *mpcc_pipe; 2878 struct pipe_ctx *odm_pipe; 2879 2880 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe) 2881 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) 2882 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 2883 } 2884 2885 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 2886 if (top_pipe_to_program && 2887 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 2888 if (should_use_dmub_lock(stream->link)) { 2889 union dmub_hw_lock_flags hw_locks = { 0 }; 2890 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 2891 2892 hw_locks.bits.lock_dig = 1; 2893 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 2894 2895 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 2896 true, 2897 &hw_locks, 2898 &inst_flags); 2899 } else 2900 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable( 2901 top_pipe_to_program->stream_res.tg); 2902 } 2903 2904 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 2905 dc->hwss.interdependent_update_lock(dc, context, true); 2906 } else { 2907 /* Lock the top pipe while updating plane addrs, since freesync requires 2908 * plane addr update event triggers to be synchronized. 2909 * top_pipe_to_program is expected to never be NULL 2910 */ 2911 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true); 2912 } 2913 2914 // Stream updates 2915 if (stream_update) 2916 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context); 2917 2918 if (surface_count == 0) { 2919 /* 2920 * In case of turning off screen, no need to program front end a second time. 2921 * just return after program blank. 2922 */ 2923 if (dc->hwss.apply_ctx_for_surface) 2924 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); 2925 if (dc->hwss.program_front_end_for_ctx) 2926 dc->hwss.program_front_end_for_ctx(dc, context); 2927 2928 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 2929 dc->hwss.interdependent_update_lock(dc, context, false); 2930 } else { 2931 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 2932 } 2933 dc->hwss.post_unlock_program_front_end(dc, context); 2934 return; 2935 } 2936 2937 if (!IS_DIAG_DC(dc->ctx->dce_environment)) { 2938 for (i = 0; i < surface_count; i++) { 2939 struct dc_plane_state *plane_state = srf_updates[i].surface; 2940 /*set logical flag for lock/unlock use*/ 2941 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2942 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2943 if (!pipe_ctx->plane_state) 2944 continue; 2945 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 2946 continue; 2947 pipe_ctx->plane_state->triplebuffer_flips = false; 2948 if (update_type == UPDATE_TYPE_FAST && 2949 dc->hwss.program_triplebuffer != NULL && 2950 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 2951 /*triple buffer for VUpdate only*/ 2952 pipe_ctx->plane_state->triplebuffer_flips = true; 2953 } 2954 } 2955 if (update_type == UPDATE_TYPE_FULL) { 2956 /* force vsync flip when reconfiguring pipes to prevent underflow */ 2957 plane_state->flip_immediate = false; 2958 } 2959 } 2960 } 2961 2962 // Update Type FULL, Surface updates 2963 for (j = 0; j < dc->res_pool->pipe_count; j++) { 2964 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 2965 2966 if (!pipe_ctx->top_pipe && 2967 !pipe_ctx->prev_odm_pipe && 2968 should_update_pipe_for_stream(context, pipe_ctx, stream)) { 2969 struct dc_stream_status *stream_status = NULL; 2970 2971 if (!pipe_ctx->plane_state) 2972 continue; 2973 2974 /* Full fe update*/ 2975 if (update_type == UPDATE_TYPE_FAST) 2976 continue; 2977 2978 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); 2979 2980 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 2981 /*turn off triple buffer for full update*/ 2982 dc->hwss.program_triplebuffer( 2983 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 2984 } 2985 stream_status = 2986 stream_get_status(context, pipe_ctx->stream); 2987 2988 if (dc->hwss.apply_ctx_for_surface) 2989 dc->hwss.apply_ctx_for_surface( 2990 dc, pipe_ctx->stream, stream_status->plane_count, context); 2991 } 2992 } 2993 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 2994 dc->hwss.program_front_end_for_ctx(dc, context); 2995 if (dc->debug.validate_dml_output) { 2996 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2997 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; 2998 if (cur_pipe->stream == NULL) 2999 continue; 3000 3001 cur_pipe->plane_res.hubp->funcs->validate_dml_output( 3002 cur_pipe->plane_res.hubp, dc->ctx, 3003 &context->res_ctx.pipe_ctx[i].rq_regs, 3004 &context->res_ctx.pipe_ctx[i].dlg_regs, 3005 &context->res_ctx.pipe_ctx[i].ttu_regs); 3006 } 3007 } 3008 } 3009 3010 // Update Type FAST, Surface updates 3011 if (update_type == UPDATE_TYPE_FAST) { 3012 if (dc->hwss.set_flip_control_gsl) 3013 for (i = 0; i < surface_count; i++) { 3014 struct dc_plane_state *plane_state = srf_updates[i].surface; 3015 3016 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3017 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3018 3019 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3020 continue; 3021 3022 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3023 continue; 3024 3025 // GSL has to be used for flip immediate 3026 dc->hwss.set_flip_control_gsl(pipe_ctx, 3027 pipe_ctx->plane_state->flip_immediate); 3028 } 3029 } 3030 3031 /* Perform requested Updates */ 3032 for (i = 0; i < surface_count; i++) { 3033 struct dc_plane_state *plane_state = srf_updates[i].surface; 3034 3035 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3036 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3037 3038 if (!should_update_pipe_for_stream(context, pipe_ctx, stream)) 3039 continue; 3040 3041 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 3042 continue; 3043 3044 /*program triple buffer after lock based on flip type*/ 3045 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { 3046 /*only enable triplebuffer for fast_update*/ 3047 dc->hwss.program_triplebuffer( 3048 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); 3049 } 3050 if (pipe_ctx->plane_state->update_flags.bits.addr_update) 3051 dc->hwss.update_plane_addr(dc, pipe_ctx); 3052 } 3053 } 3054 3055 } 3056 3057 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) { 3058 dc->hwss.interdependent_update_lock(dc, context, false); 3059 } else { 3060 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 3061 } 3062 3063 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 3064 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 3065 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3066 top_pipe_to_program->stream_res.tg, 3067 CRTC_STATE_VACTIVE); 3068 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3069 top_pipe_to_program->stream_res.tg, 3070 CRTC_STATE_VBLANK); 3071 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( 3072 top_pipe_to_program->stream_res.tg, 3073 CRTC_STATE_VACTIVE); 3074 3075 if (stream && should_use_dmub_lock(stream->link)) { 3076 union dmub_hw_lock_flags hw_locks = { 0 }; 3077 struct dmub_hw_lock_inst_flags inst_flags = { 0 }; 3078 3079 hw_locks.bits.lock_dig = 1; 3080 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst; 3081 3082 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv, 3083 false, 3084 &hw_locks, 3085 &inst_flags); 3086 } else 3087 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable( 3088 top_pipe_to_program->stream_res.tg); 3089 } 3090 3091 if (update_type != UPDATE_TYPE_FAST) 3092 dc->hwss.post_unlock_program_front_end(dc, context); 3093 3094 // Fire manual trigger only when bottom plane is flipped 3095 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3096 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 3097 3098 if (!pipe_ctx->plane_state) 3099 continue; 3100 3101 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe || 3102 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) || 3103 !pipe_ctx->plane_state->update_flags.bits.addr_update || 3104 pipe_ctx->plane_state->skip_manual_trigger) 3105 continue; 3106 3107 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger) 3108 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg); 3109 } 3110} 3111 3112void dc_commit_updates_for_stream(struct dc *dc, 3113 struct dc_surface_update *srf_updates, 3114 int surface_count, 3115 struct dc_stream_state *stream, 3116 struct dc_stream_update *stream_update, 3117 struct dc_state *state) 3118{ 3119 const struct dc_stream_status *stream_status; 3120 enum surface_update_type update_type; 3121 struct dc_state *context; 3122 struct dc_context *dc_ctx = dc->ctx; 3123 int i, j; 3124 3125 stream_status = dc_stream_get_status(stream); 3126 context = dc->current_state; 3127 3128 update_type = dc_check_update_surfaces_for_stream( 3129 dc, srf_updates, surface_count, stream_update, stream_status); 3130 3131 if (update_type >= update_surface_trace_level) 3132 update_surface_trace(dc, srf_updates, surface_count); 3133 3134 3135 if (update_type >= UPDATE_TYPE_FULL) { 3136 3137 /* initialize scratch memory for building context */ 3138 context = dc_create_state(dc); 3139 if (context == NULL) { 3140 DC_ERROR("Failed to allocate new validate context!\n"); 3141 return; 3142 } 3143 3144 dc_resource_state_copy_construct(state, context); 3145 3146 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3147 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; 3148 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 3149 3150 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) 3151 new_pipe->plane_state->force_full_update = true; 3152 } 3153 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) { 3154 /* 3155 * Previous frame finished and HW is ready for optimization. 3156 * 3157 * Only relevant for DCN behavior where we can guarantee the optimization 3158 * is safe to apply - retain the legacy behavior for DCE. 3159 */ 3160 dc_post_update_surfaces_to_stream(dc); 3161 } 3162 3163 3164 for (i = 0; i < surface_count; i++) { 3165 struct dc_plane_state *surface = srf_updates[i].surface; 3166 3167 copy_surface_update_to_plane(surface, &srf_updates[i]); 3168 3169 if (update_type >= UPDATE_TYPE_MED) { 3170 for (j = 0; j < dc->res_pool->pipe_count; j++) { 3171 struct pipe_ctx *pipe_ctx = 3172 &context->res_ctx.pipe_ctx[j]; 3173 3174 if (pipe_ctx->plane_state != surface) 3175 continue; 3176 3177 resource_build_scaling_params(pipe_ctx); 3178 } 3179 } 3180 } 3181 3182 copy_stream_update_to_stream(dc, context, stream, stream_update); 3183 3184 if (update_type >= UPDATE_TYPE_FULL) { 3185 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { 3186 DC_ERROR("Mode validation failed for stream update!\n"); 3187 dc_release_state(context); 3188 return; 3189 } 3190 } 3191 3192 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); 3193 3194 commit_planes_for_stream( 3195 dc, 3196 srf_updates, 3197 surface_count, 3198 stream, 3199 stream_update, 3200 update_type, 3201 context); 3202 /*update current_State*/ 3203 if (dc->current_state != context) { 3204 3205 struct dc_state *old = dc->current_state; 3206 3207 dc->current_state = context; 3208 dc_release_state(old); 3209 3210 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3211 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 3212 3213 if (pipe_ctx->plane_state && pipe_ctx->stream == stream) 3214 pipe_ctx->plane_state->force_full_update = false; 3215 } 3216 } 3217 3218 /* Legacy optimization path for DCE. */ 3219 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) { 3220 dc_post_update_surfaces_to_stream(dc); 3221 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); 3222 } 3223 3224 return; 3225 3226} 3227 3228uint8_t dc_get_current_stream_count(struct dc *dc) 3229{ 3230 return dc->current_state->stream_count; 3231} 3232 3233struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i) 3234{ 3235 if (i < dc->current_state->stream_count) 3236 return dc->current_state->streams[i]; 3237 return NULL; 3238} 3239 3240struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link) 3241{ 3242 uint8_t i; 3243 struct dc_context *ctx = link->ctx; 3244 3245 for (i = 0; i < ctx->dc->current_state->stream_count; i++) { 3246 if (ctx->dc->current_state->streams[i]->link == link) 3247 return ctx->dc->current_state->streams[i]; 3248 } 3249 3250 return NULL; 3251} 3252 3253enum dc_irq_source dc_interrupt_to_irq_source( 3254 struct dc *dc, 3255 uint32_t src_id, 3256 uint32_t ext_id) 3257{ 3258 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id); 3259} 3260 3261/* 3262 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source 3263 */ 3264bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable) 3265{ 3266 3267 if (dc == NULL) 3268 return false; 3269 3270 return dal_irq_service_set(dc->res_pool->irqs, src, enable); 3271} 3272 3273void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) 3274{ 3275 dal_irq_service_ack(dc->res_pool->irqs, src); 3276} 3277 3278void dc_power_down_on_boot(struct dc *dc) 3279{ 3280 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && 3281 dc->hwss.power_down_on_boot) 3282 dc->hwss.power_down_on_boot(dc); 3283} 3284 3285void dc_set_power_state( 3286 struct dc *dc, 3287 enum dc_acpi_cm_power_state power_state) 3288{ 3289 struct kref refcount; 3290 struct display_mode_lib *dml; 3291 3292 if (!dc->current_state) 3293 return; 3294 3295 switch (power_state) { 3296 case DC_ACPI_CM_POWER_STATE_D0: 3297 dc_resource_state_construct(dc, dc->current_state); 3298 3299 dc_z10_restore(dc); 3300 3301 if (dc->ctx->dmub_srv) 3302 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv); 3303 3304 dc->hwss.init_hw(dc); 3305 3306 if (dc->hwss.init_sys_ctx != NULL && 3307 dc->vm_pa_config.valid) { 3308 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); 3309 } 3310 3311 break; 3312 default: 3313 ASSERT(dc->current_state->stream_count == 0); 3314 /* Zero out the current context so that on resume we start with 3315 * clean state, and dc hw programming optimizations will not 3316 * cause any trouble. 3317 */ 3318 dml = kzalloc(sizeof(struct display_mode_lib), 3319 GFP_KERNEL); 3320 3321 ASSERT(dml); 3322 if (!dml) 3323 return; 3324 3325 /* Preserve refcount */ 3326 refcount = dc->current_state->refcount; 3327 /* Preserve display mode lib */ 3328 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib)); 3329 3330 dc_resource_state_destruct(dc->current_state); 3331 memset(dc->current_state, 0, 3332 sizeof(*dc->current_state)); 3333 3334 dc->current_state->refcount = refcount; 3335 dc->current_state->bw_ctx.dml = *dml; 3336 3337 kfree(dml); 3338 3339 break; 3340 } 3341} 3342 3343void dc_resume(struct dc *dc) 3344{ 3345 uint32_t i; 3346 3347 for (i = 0; i < dc->link_count; i++) 3348 core_link_resume(dc->links[i]); 3349} 3350 3351bool dc_is_dmcu_initialized(struct dc *dc) 3352{ 3353 struct dmcu *dmcu = dc->res_pool->dmcu; 3354 3355 if (dmcu) 3356 return dmcu->funcs->is_dmcu_initialized(dmcu); 3357 return false; 3358} 3359 3360bool dc_is_oem_i2c_device_present( 3361 struct dc *dc, 3362 size_t slave_address) 3363{ 3364 if (dc->res_pool->oem_device) 3365 return dce_i2c_oem_device_present( 3366 dc->res_pool, 3367 dc->res_pool->oem_device, 3368 slave_address); 3369 3370 return false; 3371} 3372 3373bool dc_submit_i2c( 3374 struct dc *dc, 3375 uint32_t link_index, 3376 struct i2c_command *cmd) 3377{ 3378 3379 struct dc_link *link = dc->links[link_index]; 3380 struct ddc_service *ddc = link->ddc; 3381 return dce_i2c_submit_command( 3382 dc->res_pool, 3383 ddc->ddc_pin, 3384 cmd); 3385} 3386 3387bool dc_submit_i2c_oem( 3388 struct dc *dc, 3389 struct i2c_command *cmd) 3390{ 3391 struct ddc_service *ddc = dc->res_pool->oem_device; 3392 return dce_i2c_submit_command( 3393 dc->res_pool, 3394 ddc->ddc_pin, 3395 cmd); 3396} 3397 3398static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) 3399{ 3400 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { 3401 BREAK_TO_DEBUGGER(); 3402 return false; 3403 } 3404 3405 dc_sink_retain(sink); 3406 3407 dc_link->remote_sinks[dc_link->sink_count] = sink; 3408 dc_link->sink_count++; 3409 3410 return true; 3411} 3412 3413/* 3414 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link 3415 * 3416 * EDID length is in bytes 3417 */ 3418struct dc_sink *dc_link_add_remote_sink( 3419 struct dc_link *link, 3420 const uint8_t *edid, 3421 int len, 3422 struct dc_sink_init_data *init_data) 3423{ 3424 struct dc_sink *dc_sink; 3425 enum dc_edid_status edid_status; 3426 3427 if (len > DC_MAX_EDID_BUFFER_SIZE) { 3428 dm_error("Max EDID buffer size breached!\n"); 3429 return NULL; 3430 } 3431 3432 if (!init_data) { 3433 BREAK_TO_DEBUGGER(); 3434 return NULL; 3435 } 3436 3437 if (!init_data->link) { 3438 BREAK_TO_DEBUGGER(); 3439 return NULL; 3440 } 3441 3442 dc_sink = dc_sink_create(init_data); 3443 3444 if (!dc_sink) 3445 return NULL; 3446 3447 memmove(dc_sink->dc_edid.raw_edid, edid, len); 3448 dc_sink->dc_edid.length = len; 3449 3450 if (!link_add_remote_sink_helper( 3451 link, 3452 dc_sink)) 3453 goto fail_add_sink; 3454 3455 edid_status = dm_helpers_parse_edid_caps( 3456 link, 3457 &dc_sink->dc_edid, 3458 &dc_sink->edid_caps); 3459 3460 /* 3461 * Treat device as no EDID device if EDID 3462 * parsing fails 3463 */ 3464 if (edid_status != EDID_OK) { 3465 dc_sink->dc_edid.length = 0; 3466 dm_error("Bad EDID, status%d!\n", edid_status); 3467 } 3468 3469 return dc_sink; 3470 3471fail_add_sink: 3472 dc_sink_release(dc_sink); 3473 return NULL; 3474} 3475 3476/* 3477 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link 3478 * 3479 * Note that this just removes the struct dc_sink - it doesn't 3480 * program hardware or alter other members of dc_link 3481 */ 3482void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) 3483{ 3484 int i; 3485 3486 if (!link->sink_count) { 3487 BREAK_TO_DEBUGGER(); 3488 return; 3489 } 3490 3491 for (i = 0; i < link->sink_count; i++) { 3492 if (link->remote_sinks[i] == sink) { 3493 dc_sink_release(sink); 3494 link->remote_sinks[i] = NULL; 3495 3496 /* shrink array to remove empty place */ 3497 while (i < link->sink_count - 1) { 3498 link->remote_sinks[i] = link->remote_sinks[i+1]; 3499 i++; 3500 } 3501 link->remote_sinks[i] = NULL; 3502 link->sink_count--; 3503 return; 3504 } 3505 } 3506} 3507 3508void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) 3509{ 3510 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz; 3511 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz; 3512 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz; 3513 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; 3514 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz; 3515 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz; 3516 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz; 3517 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; 3518 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; 3519} 3520enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) 3521{ 3522 if (dc->hwss.set_clock) 3523 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); 3524 return DC_ERROR_UNEXPECTED; 3525} 3526void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) 3527{ 3528 if (dc->hwss.get_clock) 3529 dc->hwss.get_clock(dc, clock_type, clock_cfg); 3530} 3531 3532/* enable/disable eDP PSR without specify stream for eDP */ 3533bool dc_set_psr_allow_active(struct dc *dc, bool enable) 3534{ 3535 int i; 3536 bool allow_active; 3537 3538 for (i = 0; i < dc->current_state->stream_count ; i++) { 3539 struct dc_link *link; 3540 struct dc_stream_state *stream = dc->current_state->streams[i]; 3541 3542 link = stream->link; 3543 if (!link) 3544 continue; 3545 3546 if (link->psr_settings.psr_feature_enabled) { 3547 if (enable && !link->psr_settings.psr_allow_active) { 3548 allow_active = true; 3549 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL)) 3550 return false; 3551 } else if (!enable && link->psr_settings.psr_allow_active) { 3552 allow_active = false; 3553 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL)) 3554 return false; 3555 } 3556 } 3557 } 3558 3559 return true; 3560} 3561 3562void dc_allow_idle_optimizations(struct dc *dc, bool allow) 3563{ 3564 if (dc->debug.disable_idle_power_optimizations) 3565 return; 3566 3567 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present) 3568 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) 3569 return; 3570 3571 if (allow == dc->idle_optimizations_allowed) 3572 return; 3573 3574 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) 3575 dc->idle_optimizations_allowed = allow; 3576} 3577 3578/* 3579 * blank all streams, and set min and max memory clock to 3580 * lowest and highest DPM level, respectively 3581 */ 3582void dc_unlock_memory_clock_frequency(struct dc *dc) 3583{ 3584 unsigned int i; 3585 3586 for (i = 0; i < MAX_PIPES; i++) 3587 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 3588 core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]); 3589 3590 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false); 3591 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 3592} 3593 3594/* 3595 * set min memory clock to the min required for current mode, 3596 * max to maxDPM, and unblank streams 3597 */ 3598void dc_lock_memory_clock_frequency(struct dc *dc) 3599{ 3600 unsigned int i; 3601 3602 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr); 3603 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true); 3604 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); 3605 3606 for (i = 0; i < MAX_PIPES; i++) 3607 if (dc->current_state->res_ctx.pipe_ctx[i].plane_state) 3608 core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); 3609} 3610 3611static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 3612{ 3613 struct dc_state *context = dc->current_state; 3614 struct hubp *hubp; 3615 struct pipe_ctx *pipe; 3616 int i; 3617 3618 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3619 pipe = &context->res_ctx.pipe_ctx[i]; 3620 3621 if (pipe->stream != NULL) { 3622 dc->hwss.disable_pixel_data(dc, pipe, true); 3623 3624 // wait for double buffer 3625 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 3626 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); 3627 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); 3628 3629 hubp = pipe->plane_res.hubp; 3630 hubp->funcs->set_blank_regs(hubp, true); 3631 } 3632 } 3633 3634 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); 3635 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); 3636 3637 for (i = 0; i < dc->res_pool->pipe_count; i++) { 3638 pipe = &context->res_ctx.pipe_ctx[i]; 3639 3640 if (pipe->stream != NULL) { 3641 dc->hwss.disable_pixel_data(dc, pipe, false); 3642 3643 hubp = pipe->plane_res.hubp; 3644 hubp->funcs->set_blank_regs(hubp, false); 3645 } 3646 } 3647} 3648 3649 3650/** 3651 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode 3652 * @dc: pointer to dc of the dm calling this 3653 * @enable: True = transition to DC mode, false = transition back to AC mode 3654 * 3655 * Some SoCs define additional clock limits when in DC mode, DM should 3656 * invoke this function when the platform undergoes a power source transition 3657 * so DC can apply/unapply the limit. This interface may be disruptive to 3658 * the onscreen content. 3659 * 3660 * Context: Triggered by OS through DM interface, or manually by escape calls. 3661 * Need to hold a dclock when doing so. 3662 * 3663 * Return: none (void function) 3664 * 3665 */ 3666void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) 3667{ 3668 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; 3669 unsigned int softMax, maxDPM, funcMin; 3670 bool p_state_change_support; 3671 3672 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) 3673 return; 3674 3675 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; 3676 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; 3677 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; 3678 p_state_change_support = dc->clk_mgr->clks.p_state_change_support; 3679 3680 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { 3681 if (p_state_change_support) { 3682 if (funcMin <= softMax) 3683 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); 3684 // else: No-Op 3685 } else { 3686 if (funcMin <= softMax) 3687 blank_and_force_memclk(dc, true, softMax); 3688 // else: No-Op 3689 } 3690 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { 3691 if (p_state_change_support) { 3692 if (funcMin <= softMax) 3693 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); 3694 // else: No-Op 3695 } else { 3696 if (funcMin <= softMax) 3697 blank_and_force_memclk(dc, true, maxDPM); 3698 // else: No-Op 3699 } 3700 } 3701 dc->clk_mgr->dc_mode_softmax_enabled = enable; 3702} 3703bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, 3704 struct dc_cursor_attributes *cursor_attr) 3705{ 3706 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) 3707 return true; 3708 return false; 3709} 3710 3711/* cleanup on driver unload */ 3712void dc_hardware_release(struct dc *dc) 3713{ 3714 if (dc->hwss.hardware_release) 3715 dc->hwss.hardware_release(dc); 3716} 3717 3718/* 3719 ***************************************************************************** 3720 * Function: dc_is_dmub_outbox_supported - 3721 * 3722 * @brief 3723 * Checks whether DMUB FW supports outbox notifications, if supported 3724 * DM should register outbox interrupt prior to actually enabling interrupts 3725 * via dc_enable_dmub_outbox 3726 * 3727 * @param 3728 * [in] dc: dc structure 3729 * 3730 * @return 3731 * True if DMUB FW supports outbox notifications, False otherwise 3732 ***************************************************************************** 3733 */ 3734bool dc_is_dmub_outbox_supported(struct dc *dc) 3735{ 3736 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */ 3737 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP && 3738 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 && 3739 !dc->debug.dpia_debug.bits.disable_dpia) 3740 return true; 3741 3742 /* dmub aux needs dmub notifications to be enabled */ 3743 return dc->debug.enable_dmub_aux_for_legacy_ddc; 3744} 3745 3746/* 3747 ***************************************************************************** 3748 * Function: dc_enable_dmub_notifications 3749 * 3750 * @brief 3751 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox 3752 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. 3753 * This API shall be removed after switching. 3754 * 3755 * @param 3756 * [in] dc: dc structure 3757 * 3758 * @return 3759 * True if DMUB FW supports outbox notifications, False otherwise 3760 ***************************************************************************** 3761 */ 3762bool dc_enable_dmub_notifications(struct dc *dc) 3763{ 3764 return dc_is_dmub_outbox_supported(dc); 3765} 3766 3767/** 3768 ***************************************************************************** 3769 * Function: dc_enable_dmub_outbox 3770 * 3771 * @brief 3772 * Enables DMUB unsolicited notifications to x86 via outbox 3773 * 3774 * @param 3775 * [in] dc: dc structure 3776 * 3777 * @return 3778 * None 3779 ***************************************************************************** 3780 */ 3781void dc_enable_dmub_outbox(struct dc *dc) 3782{ 3783 struct dc_context *dc_ctx = dc->ctx; 3784 3785 dmub_enable_outbox_notification(dc_ctx->dmub_srv); 3786} 3787 3788/** 3789 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message 3790 * Sets port index appropriately for legacy DDC 3791 * @dc: dc structure 3792 * @link_index: link index 3793 * @payload: aux payload 3794 * 3795 * Returns: True if successful, False if failure 3796 */ 3797bool dc_process_dmub_aux_transfer_async(struct dc *dc, 3798 uint32_t link_index, 3799 struct aux_payload *payload) 3800{ 3801 uint8_t action; 3802 union dmub_rb_cmd cmd = {0}; 3803 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 3804 3805 ASSERT(payload->length <= 16); 3806 3807 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS; 3808 cmd.dp_aux_access.header.payload_bytes = 0; 3809 /* For dpia, ddc_pin is set to NULL */ 3810 if (!dc->links[link_index]->ddc->ddc_pin) 3811 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA; 3812 else 3813 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC; 3814 3815 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst; 3816 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0; 3817 cmd.dp_aux_access.aux_control.timeout = 0; 3818 cmd.dp_aux_access.aux_control.dpaux.address = payload->address; 3819 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux; 3820 cmd.dp_aux_access.aux_control.dpaux.length = payload->length; 3821 3822 /* set aux action */ 3823 if (payload->i2c_over_aux) { 3824 if (payload->write) { 3825 if (payload->mot) 3826 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT; 3827 else 3828 action = DP_AUX_REQ_ACTION_I2C_WRITE; 3829 } else { 3830 if (payload->mot) 3831 action = DP_AUX_REQ_ACTION_I2C_READ_MOT; 3832 else 3833 action = DP_AUX_REQ_ACTION_I2C_READ; 3834 } 3835 } else { 3836 if (payload->write) 3837 action = DP_AUX_REQ_ACTION_DPCD_WRITE; 3838 else 3839 action = DP_AUX_REQ_ACTION_DPCD_READ; 3840 } 3841 3842 cmd.dp_aux_access.aux_control.dpaux.action = action; 3843 3844 if (payload->length && payload->write) { 3845 memcpy(cmd.dp_aux_access.aux_control.dpaux.data, 3846 payload->data, 3847 payload->length 3848 ); 3849 } 3850 3851 dc_dmub_srv_cmd_queue(dmub_srv, &cmd); 3852 dc_dmub_srv_cmd_execute(dmub_srv); 3853 dc_dmub_srv_wait_idle(dmub_srv); 3854 3855 return true; 3856} 3857 3858uint8_t get_link_index_from_dpia_port_index(const struct dc *dc, 3859 uint8_t dpia_port_index) 3860{ 3861 uint8_t index, link_index = 0xFF; 3862 3863 for (index = 0; index < dc->link_count; index++) { 3864 /* ddc_hw_inst has dpia port index for dpia links 3865 * and ddc instance for legacy links 3866 */ 3867 if (!dc->links[index]->ddc->ddc_pin) { 3868 if (dc->links[index]->ddc_hw_inst == dpia_port_index) { 3869 link_index = index; 3870 break; 3871 } 3872 } 3873 } 3874 ASSERT(link_index != 0xFF); 3875 return link_index; 3876} 3877 3878/** 3879 ***************************************************************************** 3880 * Function: dc_process_dmub_set_config_async 3881 * 3882 * @brief 3883 * Submits set_config command to dmub via inbox message 3884 * 3885 * @param 3886 * [in] dc: dc structure 3887 * [in] link_index: link index 3888 * [in] payload: aux payload 3889 * [out] notify: set_config immediate reply 3890 * 3891 * @return 3892 * True if successful, False if failure 3893 ***************************************************************************** 3894 */ 3895bool dc_process_dmub_set_config_async(struct dc *dc, 3896 uint32_t link_index, 3897 struct set_config_cmd_payload *payload, 3898 struct dmub_notification *notify) 3899{ 3900 union dmub_rb_cmd cmd = {0}; 3901 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 3902 bool is_cmd_complete = true; 3903 3904 /* prepare SET_CONFIG command */ 3905 cmd.set_config_access.header.type = DMUB_CMD__DPIA; 3906 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS; 3907 3908 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst; 3909 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type; 3910 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data; 3911 3912 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) { 3913 /* command is not processed by dmub */ 3914 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR; 3915 return is_cmd_complete; 3916 } 3917 3918 /* command processed by dmub, if ret_status is 1, it is completed instantly */ 3919 if (cmd.set_config_access.header.ret_status == 1) 3920 notify->sc_status = cmd.set_config_access.set_config_control.immed_status; 3921 else 3922 /* cmd pending, will receive notification via outbox */ 3923 is_cmd_complete = false; 3924 3925 return is_cmd_complete; 3926} 3927 3928/** 3929 ***************************************************************************** 3930 * Function: dc_process_dmub_set_mst_slots 3931 * 3932 * @brief 3933 * Submits mst slot allocation command to dmub via inbox message 3934 * 3935 * @param 3936 * [in] dc: dc structure 3937 * [in] link_index: link index 3938 * [in] mst_alloc_slots: mst slots to be allotted 3939 * [out] mst_slots_in_use: mst slots in use returned in failure case 3940 * 3941 * @return 3942 * DC_OK if successful, DC_ERROR if failure 3943 ***************************************************************************** 3944 */ 3945enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc, 3946 uint32_t link_index, 3947 uint8_t mst_alloc_slots, 3948 uint8_t *mst_slots_in_use) 3949{ 3950 union dmub_rb_cmd cmd = {0}; 3951 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv; 3952 3953 /* prepare MST_ALLOC_SLOTS command */ 3954 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA; 3955 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS; 3956 3957 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst; 3958 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots; 3959 3960 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) 3961 /* command is not processed by dmub */ 3962 return DC_ERROR_UNEXPECTED; 3963 3964 /* command processed by dmub, if ret_status is 1 */ 3965 if (cmd.set_config_access.header.ret_status != 1) 3966 /* command processing error */ 3967 return DC_ERROR_UNEXPECTED; 3968 3969 /* command processed and we have a status of 2, mst not enabled in dpia */ 3970 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2) 3971 return DC_FAIL_UNSUPPORTED_1; 3972 3973 /* previously configured mst alloc and used slots did not match */ 3974 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) { 3975 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use; 3976 return DC_NOT_SUPPORTED; 3977 } 3978 3979 return DC_OK; 3980} 3981 3982/** 3983 * dc_disable_accelerated_mode - disable accelerated mode 3984 * @dc: dc structure 3985 */ 3986void dc_disable_accelerated_mode(struct dc *dc) 3987{ 3988 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0); 3989} 3990 3991 3992/** 3993 ***************************************************************************** 3994 * dc_notify_vsync_int_state() - notifies vsync enable/disable state 3995 * @dc: dc structure 3996 * @stream: stream where vsync int state changed 3997 * @enable: whether vsync is enabled or disabled 3998 * 3999 * Called when vsync is enabled/disabled 4000 * Will notify DMUB to start/stop ABM interrupts after steady state is reached 4001 * 4002 ***************************************************************************** 4003 */ 4004void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 4005{ 4006 int i; 4007 int edp_num; 4008 struct pipe_ctx *pipe = NULL; 4009 struct dc_link *link = stream->sink->link; 4010 struct dc_link *edp_links[MAX_NUM_EDP]; 4011 4012 4013 if (link->psr_settings.psr_feature_enabled) 4014 return; 4015 4016 /*find primary pipe associated with stream*/ 4017 for (i = 0; i < MAX_PIPES; i++) { 4018 pipe = &dc->current_state->res_ctx.pipe_ctx[i]; 4019 4020 if (pipe->stream == stream && pipe->stream_res.tg) 4021 break; 4022 } 4023 4024 if (i == MAX_PIPES) { 4025 ASSERT(0); 4026 return; 4027 } 4028 4029 get_edp_links(dc, edp_links, &edp_num); 4030 4031 /* Determine panel inst */ 4032 for (i = 0; i < edp_num; i++) { 4033 if (edp_links[i] == link) 4034 break; 4035 } 4036 4037 if (i == edp_num) { 4038 return; 4039 } 4040 4041 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause) 4042 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst); 4043} 4044/* 4045 * dc_extended_blank_supported: Decide whether extended blank is supported 4046 * 4047 * Extended blank is a freesync optimization feature to be enabled in the future. 4048 * During the extra vblank period gained from freesync, we have the ability to enter z9/z10. 4049 * 4050 * @param [in] dc: Current DC state 4051 * @return: Indicate whether extended blank is supported (true or false) 4052 */ 4053bool dc_extended_blank_supported(struct dc *dc) 4054{ 4055 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10 4056 && dc->caps.zstate_support && dc->caps.is_apu; 4057}