cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dcn10_hw_sequencer.c (119225B)


      1/*
      2 * Copyright 2016 Advanced Micro Devices, Inc.
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice shall be included in
     12 * all copies or substantial portions of the Software.
     13 *
     14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     20 * OTHER DEALINGS IN THE SOFTWARE.
     21 *
     22 * Authors: AMD
     23 *
     24 */
     25
     26#include <linux/delay.h>
     27#include "dm_services.h"
     28#include "basics/dc_common.h"
     29#include "core_types.h"
     30#include "resource.h"
     31#include "custom_float.h"
     32#include "dcn10_hw_sequencer.h"
     33#include "dcn10_hw_sequencer_debug.h"
     34#include "dce/dce_hwseq.h"
     35#include "abm.h"
     36#include "dmcu.h"
     37#include "dcn10_optc.h"
     38#include "dcn10_dpp.h"
     39#include "dcn10_mpc.h"
     40#include "timing_generator.h"
     41#include "opp.h"
     42#include "ipp.h"
     43#include "mpc.h"
     44#include "reg_helper.h"
     45#include "dcn10_hubp.h"
     46#include "dcn10_hubbub.h"
     47#include "dcn10_cm_common.h"
     48#include "dc_link_dp.h"
     49#include "dccg.h"
     50#include "clk_mgr.h"
     51#include "link_hwss.h"
     52#include "dpcd_defs.h"
     53#include "dsc.h"
     54#include "dce/dmub_hw_lock_mgr.h"
     55#include "dc_trace.h"
     56#include "dce/dmub_outbox.h"
     57#include "inc/dc_link_dp.h"
     58#include "inc/link_dpcd.h"
     59
     60#define DC_LOGGER_INIT(logger)
     61
     62#define CTX \
     63	hws->ctx
     64#define REG(reg)\
     65	hws->regs->reg
     66
     67#undef FN
     68#define FN(reg_name, field_name) \
     69	hws->shifts->field_name, hws->masks->field_name
     70
     71/*print is 17 wide, first two characters are spaces*/
     72#define DTN_INFO_MICRO_SEC(ref_cycle) \
     73	print_microsec(dc_ctx, log_ctx, ref_cycle)
     74
     75#define GAMMA_HW_POINTS_NUM 256
     76
     77#define PGFSM_POWER_ON 0
     78#define PGFSM_POWER_OFF 2
     79
     80static void print_microsec(struct dc_context *dc_ctx,
     81			   struct dc_log_buffer_ctx *log_ctx,
     82			   uint32_t ref_cycle)
     83{
     84	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
     85	static const unsigned int frac = 1000;
     86	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
     87
     88	DTN_INFO("  %11d.%03d",
     89			us_x10 / frac,
     90			us_x10 % frac);
     91}
     92
     93void dcn10_lock_all_pipes(struct dc *dc,
     94	struct dc_state *context,
     95	bool lock)
     96{
     97	struct pipe_ctx *pipe_ctx;
     98	struct timing_generator *tg;
     99	int i;
    100
    101	for (i = 0; i < dc->res_pool->pipe_count; i++) {
    102		pipe_ctx = &context->res_ctx.pipe_ctx[i];
    103		tg = pipe_ctx->stream_res.tg;
    104
    105		/*
    106		 * Only lock the top pipe's tg to prevent redundant
    107		 * (un)locking. Also skip if pipe is disabled.
    108		 */
    109		if (pipe_ctx->top_pipe ||
    110		    !pipe_ctx->stream ||
    111		    !tg->funcs->is_tg_enabled(tg))
    112			continue;
    113
    114		if (lock)
    115			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
    116		else
    117			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
    118	}
    119}
    120
    121static void log_mpc_crc(struct dc *dc,
    122	struct dc_log_buffer_ctx *log_ctx)
    123{
    124	struct dc_context *dc_ctx = dc->ctx;
    125	struct dce_hwseq *hws = dc->hwseq;
    126
    127	if (REG(MPC_CRC_RESULT_GB))
    128		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
    129		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
    130	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
    131		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
    132		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
    133}
    134
    135static void dcn10_log_hubbub_state(struct dc *dc,
    136				   struct dc_log_buffer_ctx *log_ctx)
    137{
    138	struct dc_context *dc_ctx = dc->ctx;
    139	struct dcn_hubbub_wm wm;
    140	int i;
    141
    142	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
    143	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
    144
    145	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
    146			"         sr_enter          sr_exit  dram_clk_change\n");
    147
    148	for (i = 0; i < 4; i++) {
    149		struct dcn_hubbub_wm_set *s;
    150
    151		s = &wm.sets[i];
    152		DTN_INFO("WM_Set[%d]:", s->wm_set);
    153		DTN_INFO_MICRO_SEC(s->data_urgent);
    154		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
    155		DTN_INFO_MICRO_SEC(s->sr_enter);
    156		DTN_INFO_MICRO_SEC(s->sr_exit);
    157		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
    158		DTN_INFO("\n");
    159	}
    160
    161	DTN_INFO("\n");
    162}
    163
    164static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
    165{
    166	struct dc_context *dc_ctx = dc->ctx;
    167	struct resource_pool *pool = dc->res_pool;
    168	int i;
    169
    170	DTN_INFO(
    171		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
    172	for (i = 0; i < pool->pipe_count; i++) {
    173		struct hubp *hubp = pool->hubps[i];
    174		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
    175
    176		hubp->funcs->hubp_read_state(hubp);
    177
    178		if (!s->blank_en) {
    179			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
    180					hubp->inst,
    181					s->pixel_format,
    182					s->inuse_addr_hi,
    183					s->viewport_width,
    184					s->viewport_height,
    185					s->rotation_angle,
    186					s->h_mirror_en,
    187					s->sw_mode,
    188					s->dcc_en,
    189					s->blank_en,
    190					s->clock_en,
    191					s->ttu_disable,
    192					s->underflow_status);
    193			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
    194			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
    195			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
    196			DTN_INFO("\n");
    197		}
    198	}
    199
    200	DTN_INFO("\n=========RQ========\n");
    201	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
    202		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
    203		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
    204	for (i = 0; i < pool->pipe_count; i++) {
    205		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
    206		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
    207
    208		if (!s->blank_en)
    209			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
    210				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
    211				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
    212				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
    213				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
    214				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
    215				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
    216				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
    217				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
    218				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
    219	}
    220
    221	DTN_INFO("========DLG========\n");
    222	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
    223			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
    224			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
    225			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
    226			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
    227			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
    228			"  x_rp_dlay  x_rr_sfl\n");
    229	for (i = 0; i < pool->pipe_count; i++) {
    230		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
    231		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
    232
    233		if (!s->blank_en)
    234			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
    235				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
    236				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
    237				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
    238				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
    239				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
    240				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
    241				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
    242				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
    243				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
    244				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
    245				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
    246				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
    247				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
    248				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
    249				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
    250				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
    251				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
    252				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
    253				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
    254				dlg_regs->xfc_reg_remote_surface_flip_latency);
    255	}
    256
    257	DTN_INFO("========TTU========\n");
    258	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
    259			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
    260			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
    261	for (i = 0; i < pool->pipe_count; i++) {
    262		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
    263		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
    264
    265		if (!s->blank_en)
    266			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
    267				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
    268				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
    269				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
    270				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
    271				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
    272				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
    273				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
    274	}
    275	DTN_INFO("\n");
    276}
    277
    278void dcn10_log_hw_state(struct dc *dc,
    279	struct dc_log_buffer_ctx *log_ctx)
    280{
    281	struct dc_context *dc_ctx = dc->ctx;
    282	struct resource_pool *pool = dc->res_pool;
    283	int i;
    284
    285	DTN_INFO_BEGIN();
    286
    287	dcn10_log_hubbub_state(dc, log_ctx);
    288
    289	dcn10_log_hubp_states(dc, log_ctx);
    290
    291	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
    292			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
    293			"C31 C32   C33 C34\n");
    294	for (i = 0; i < pool->pipe_count; i++) {
    295		struct dpp *dpp = pool->dpps[i];
    296		struct dcn_dpp_state s = {0};
    297
    298		dpp->funcs->dpp_read_state(dpp, &s);
    299
    300		if (!s.is_enabled)
    301			continue;
    302
    303		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
    304				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
    305				dpp->inst,
    306				s.igam_input_format,
    307				(s.igam_lut_mode == 0) ? "BypassFixed" :
    308					((s.igam_lut_mode == 1) ? "BypassFloat" :
    309					((s.igam_lut_mode == 2) ? "RAM" :
    310					((s.igam_lut_mode == 3) ? "RAM" :
    311								 "Unknown"))),
    312				(s.dgam_lut_mode == 0) ? "Bypass" :
    313					((s.dgam_lut_mode == 1) ? "sRGB" :
    314					((s.dgam_lut_mode == 2) ? "Ycc" :
    315					((s.dgam_lut_mode == 3) ? "RAM" :
    316					((s.dgam_lut_mode == 4) ? "RAM" :
    317								 "Unknown")))),
    318				(s.rgam_lut_mode == 0) ? "Bypass" :
    319					((s.rgam_lut_mode == 1) ? "sRGB" :
    320					((s.rgam_lut_mode == 2) ? "Ycc" :
    321					((s.rgam_lut_mode == 3) ? "RAM" :
    322					((s.rgam_lut_mode == 4) ? "RAM" :
    323								 "Unknown")))),
    324				s.gamut_remap_mode,
    325				s.gamut_remap_c11_c12,
    326				s.gamut_remap_c13_c14,
    327				s.gamut_remap_c21_c22,
    328				s.gamut_remap_c23_c24,
    329				s.gamut_remap_c31_c32,
    330				s.gamut_remap_c33_c34);
    331		DTN_INFO("\n");
    332	}
    333	DTN_INFO("\n");
    334
    335	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
    336	for (i = 0; i < pool->pipe_count; i++) {
    337		struct mpcc_state s = {0};
    338
    339		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
    340		if (s.opp_id != 0xf)
    341			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
    342				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
    343				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
    344				s.idle);
    345	}
    346	DTN_INFO("\n");
    347
    348	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
    349
    350	for (i = 0; i < pool->timing_generator_count; i++) {
    351		struct timing_generator *tg = pool->timing_generators[i];
    352		struct dcn_otg_state s = {0};
    353		/* Read shared OTG state registers for all DCNx */
    354		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
    355
    356		/*
    357		 * For DCN2 and greater, a register on the OPP is used to
    358		 * determine if the CRTC is blanked instead of the OTG. So use
    359		 * dpg_is_blanked() if exists, otherwise fallback on otg.
    360		 *
    361		 * TODO: Implement DCN-specific read_otg_state hooks.
    362		 */
    363		if (pool->opps[i]->funcs->dpg_is_blanked)
    364			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
    365		else
    366			s.blank_enabled = tg->funcs->is_blanked(tg);
    367
    368		//only print if OTG master is enabled
    369		if ((s.otg_enabled & 1) == 0)
    370			continue;
    371
    372		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
    373				tg->inst,
    374				s.v_blank_start,
    375				s.v_blank_end,
    376				s.v_sync_a_start,
    377				s.v_sync_a_end,
    378				s.v_sync_a_pol,
    379				s.v_total_max,
    380				s.v_total_min,
    381				s.v_total_max_sel,
    382				s.v_total_min_sel,
    383				s.h_blank_start,
    384				s.h_blank_end,
    385				s.h_sync_a_start,
    386				s.h_sync_a_end,
    387				s.h_sync_a_pol,
    388				s.h_total,
    389				s.v_total,
    390				s.underflow_occurred_status,
    391				s.blank_enabled);
    392
    393		// Clear underflow for debug purposes
    394		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
    395		// This function is called only from Windows or Diags test environment, hence it's safe to clear
    396		// it from here without affecting the original intent.
    397		tg->funcs->clear_optc_underflow(tg);
    398	}
    399	DTN_INFO("\n");
    400
    401	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
    402	// TODO: Update golden log header to reflect this name change
    403	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
    404	for (i = 0; i < pool->res_cap->num_dsc; i++) {
    405		struct display_stream_compressor *dsc = pool->dscs[i];
    406		struct dcn_dsc_state s = {0};
    407
    408		dsc->funcs->dsc_read_state(dsc, &s);
    409		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
    410		dsc->inst,
    411			s.dsc_clock_en,
    412			s.dsc_slice_width,
    413			s.dsc_bits_per_pixel);
    414		DTN_INFO("\n");
    415	}
    416	DTN_INFO("\n");
    417
    418	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
    419			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
    420	for (i = 0; i < pool->stream_enc_count; i++) {
    421		struct stream_encoder *enc = pool->stream_enc[i];
    422		struct enc_state s = {0};
    423
    424		if (enc->funcs->enc_read_state) {
    425			enc->funcs->enc_read_state(enc, &s);
    426			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
    427				enc->id,
    428				s.dsc_mode,
    429				s.sec_gsp_pps_line_num,
    430				s.vbid6_line_reference,
    431				s.vbid6_line_num,
    432				s.sec_gsp_pps_enable,
    433				s.sec_stream_enable);
    434			DTN_INFO("\n");
    435		}
    436	}
    437	DTN_INFO("\n");
    438
    439	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
    440	for (i = 0; i < dc->link_count; i++) {
    441		struct link_encoder *lenc = dc->links[i]->link_enc;
    442
    443		struct link_enc_state s = {0};
    444
    445		if (lenc->funcs->read_state) {
    446			lenc->funcs->read_state(lenc, &s);
    447			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
    448				i,
    449				s.dphy_fec_en,
    450				s.dphy_fec_ready_shadow,
    451				s.dphy_fec_active_status,
    452				s.dp_link_training_complete);
    453			DTN_INFO("\n");
    454		}
    455	}
    456	DTN_INFO("\n");
    457
    458	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
    459		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
    460			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
    461			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
    462			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
    463			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
    464			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
    465			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
    466			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
    467
    468	log_mpc_crc(dc, log_ctx);
    469
    470	{
    471		if (pool->hpo_dp_stream_enc_count > 0) {
    472			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
    473			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
    474				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
    475				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
    476
    477				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
    478					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
    479
    480					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
    481							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
    482							hpo_dp_se_state.stream_enc_enabled,
    483							hpo_dp_se_state.otg_inst,
    484							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
    485									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
    486									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
    487							(hpo_dp_se_state.component_depth == 0) ? 6 :
    488									((hpo_dp_se_state.component_depth == 1) ? 8 :
    489									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
    490							hpo_dp_se_state.vid_stream_enabled,
    491							hpo_dp_se_state.sdp_enabled,
    492							hpo_dp_se_state.compressed_format,
    493							hpo_dp_se_state.mapped_to_link_enc);
    494				}
    495			}
    496
    497			DTN_INFO("\n");
    498		}
    499
    500		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
    501		if (pool->hpo_dp_link_enc_count) {
    502			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
    503
    504			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
    505				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
    506				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
    507
    508				if (hpo_dp_link_enc->funcs->read_state) {
    509					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
    510					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
    511							hpo_dp_link_enc->inst,
    512							hpo_dp_le_state.link_enc_enabled,
    513							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
    514									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
    515									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
    516							hpo_dp_le_state.lane_count,
    517							hpo_dp_le_state.stream_src[0],
    518							hpo_dp_le_state.slot_count[0],
    519							hpo_dp_le_state.vc_rate_x[0],
    520							hpo_dp_le_state.vc_rate_y[0]);
    521					DTN_INFO("\n");
    522				}
    523			}
    524
    525			DTN_INFO("\n");
    526		}
    527	}
    528
    529	DTN_INFO_END();
    530}
    531
    532bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
    533{
    534	struct hubp *hubp = pipe_ctx->plane_res.hubp;
    535	struct timing_generator *tg = pipe_ctx->stream_res.tg;
    536
    537	if (tg->funcs->is_optc_underflow_occurred(tg)) {
    538		tg->funcs->clear_optc_underflow(tg);
    539		return true;
    540	}
    541
    542	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
    543		hubp->funcs->hubp_clear_underflow(hubp);
    544		return true;
    545	}
    546	return false;
    547}
    548
    549void dcn10_enable_power_gating_plane(
    550	struct dce_hwseq *hws,
    551	bool enable)
    552{
    553	bool force_on = true; /* disable power gating */
    554
    555	if (enable)
    556		force_on = false;
    557
    558	/* DCHUBP0/1/2/3 */
    559	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
    560	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
    561	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
    562	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
    563
    564	/* DPP0/1/2/3 */
    565	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
    566	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
    567	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
    568	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
    569}
    570
    571void dcn10_disable_vga(
    572	struct dce_hwseq *hws)
    573{
    574	unsigned int in_vga1_mode = 0;
    575	unsigned int in_vga2_mode = 0;
    576	unsigned int in_vga3_mode = 0;
    577	unsigned int in_vga4_mode = 0;
    578
    579	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
    580	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
    581	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
    582	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
    583
    584	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
    585			in_vga3_mode == 0 && in_vga4_mode == 0)
    586		return;
    587
    588	REG_WRITE(D1VGA_CONTROL, 0);
    589	REG_WRITE(D2VGA_CONTROL, 0);
    590	REG_WRITE(D3VGA_CONTROL, 0);
    591	REG_WRITE(D4VGA_CONTROL, 0);
    592
    593	/* HW Engineer's Notes:
    594	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
    595	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
    596	 *
    597	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
    598	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
    599	 */
    600	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
    601	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
    602}
    603
    604/**
    605 * dcn10_dpp_pg_control - DPP power gate control.
    606 *
    607 * @hws: dce_hwseq reference.
    608 * @dpp_inst: DPP instance reference.
    609 * @power_on: true if we want to enable power gate, false otherwise.
    610 *
    611 * Enable or disable power gate in the specific DPP instance.
    612 */
    613void dcn10_dpp_pg_control(
    614		struct dce_hwseq *hws,
    615		unsigned int dpp_inst,
    616		bool power_on)
    617{
    618	uint32_t power_gate = power_on ? 0 : 1;
    619	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
    620
    621	if (hws->ctx->dc->debug.disable_dpp_power_gate)
    622		return;
    623	if (REG(DOMAIN1_PG_CONFIG) == 0)
    624		return;
    625
    626	switch (dpp_inst) {
    627	case 0: /* DPP0 */
    628		REG_UPDATE(DOMAIN1_PG_CONFIG,
    629				DOMAIN1_POWER_GATE, power_gate);
    630
    631		REG_WAIT(DOMAIN1_PG_STATUS,
    632				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
    633				1, 1000);
    634		break;
    635	case 1: /* DPP1 */
    636		REG_UPDATE(DOMAIN3_PG_CONFIG,
    637				DOMAIN3_POWER_GATE, power_gate);
    638
    639		REG_WAIT(DOMAIN3_PG_STATUS,
    640				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
    641				1, 1000);
    642		break;
    643	case 2: /* DPP2 */
    644		REG_UPDATE(DOMAIN5_PG_CONFIG,
    645				DOMAIN5_POWER_GATE, power_gate);
    646
    647		REG_WAIT(DOMAIN5_PG_STATUS,
    648				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
    649				1, 1000);
    650		break;
    651	case 3: /* DPP3 */
    652		REG_UPDATE(DOMAIN7_PG_CONFIG,
    653				DOMAIN7_POWER_GATE, power_gate);
    654
    655		REG_WAIT(DOMAIN7_PG_STATUS,
    656				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
    657				1, 1000);
    658		break;
    659	default:
    660		BREAK_TO_DEBUGGER();
    661		break;
    662	}
    663}
    664
    665/**
    666 * dcn10_hubp_pg_control - HUBP power gate control.
    667 *
    668 * @hws: dce_hwseq reference.
    669 * @hubp_inst: DPP instance reference.
    670 * @power_on: true if we want to enable power gate, false otherwise.
    671 *
    672 * Enable or disable power gate in the specific HUBP instance.
    673 */
    674void dcn10_hubp_pg_control(
    675		struct dce_hwseq *hws,
    676		unsigned int hubp_inst,
    677		bool power_on)
    678{
    679	uint32_t power_gate = power_on ? 0 : 1;
    680	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
    681
    682	if (hws->ctx->dc->debug.disable_hubp_power_gate)
    683		return;
    684	if (REG(DOMAIN0_PG_CONFIG) == 0)
    685		return;
    686
    687	switch (hubp_inst) {
    688	case 0: /* DCHUBP0 */
    689		REG_UPDATE(DOMAIN0_PG_CONFIG,
    690				DOMAIN0_POWER_GATE, power_gate);
    691
    692		REG_WAIT(DOMAIN0_PG_STATUS,
    693				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
    694				1, 1000);
    695		break;
    696	case 1: /* DCHUBP1 */
    697		REG_UPDATE(DOMAIN2_PG_CONFIG,
    698				DOMAIN2_POWER_GATE, power_gate);
    699
    700		REG_WAIT(DOMAIN2_PG_STATUS,
    701				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
    702				1, 1000);
    703		break;
    704	case 2: /* DCHUBP2 */
    705		REG_UPDATE(DOMAIN4_PG_CONFIG,
    706				DOMAIN4_POWER_GATE, power_gate);
    707
    708		REG_WAIT(DOMAIN4_PG_STATUS,
    709				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
    710				1, 1000);
    711		break;
    712	case 3: /* DCHUBP3 */
    713		REG_UPDATE(DOMAIN6_PG_CONFIG,
    714				DOMAIN6_POWER_GATE, power_gate);
    715
    716		REG_WAIT(DOMAIN6_PG_STATUS,
    717				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
    718				1, 1000);
    719		break;
    720	default:
    721		BREAK_TO_DEBUGGER();
    722		break;
    723	}
    724}
    725
    726static void power_on_plane(
    727	struct dce_hwseq *hws,
    728	int plane_id)
    729{
    730	DC_LOGGER_INIT(hws->ctx->logger);
    731	if (REG(DC_IP_REQUEST_CNTL)) {
    732		REG_SET(DC_IP_REQUEST_CNTL, 0,
    733				IP_REQUEST_EN, 1);
    734
    735		if (hws->funcs.dpp_pg_control)
    736			hws->funcs.dpp_pg_control(hws, plane_id, true);
    737
    738		if (hws->funcs.hubp_pg_control)
    739			hws->funcs.hubp_pg_control(hws, plane_id, true);
    740
    741		REG_SET(DC_IP_REQUEST_CNTL, 0,
    742				IP_REQUEST_EN, 0);
    743		DC_LOG_DEBUG(
    744				"Un-gated front end for pipe %d\n", plane_id);
    745	}
    746}
    747
    748static void undo_DEGVIDCN10_253_wa(struct dc *dc)
    749{
    750	struct dce_hwseq *hws = dc->hwseq;
    751	struct hubp *hubp = dc->res_pool->hubps[0];
    752
    753	if (!hws->wa_state.DEGVIDCN10_253_applied)
    754		return;
    755
    756	hubp->funcs->set_blank(hubp, true);
    757
    758	REG_SET(DC_IP_REQUEST_CNTL, 0,
    759			IP_REQUEST_EN, 1);
    760
    761	hws->funcs.hubp_pg_control(hws, 0, false);
    762	REG_SET(DC_IP_REQUEST_CNTL, 0,
    763			IP_REQUEST_EN, 0);
    764
    765	hws->wa_state.DEGVIDCN10_253_applied = false;
    766}
    767
    768static void apply_DEGVIDCN10_253_wa(struct dc *dc)
    769{
    770	struct dce_hwseq *hws = dc->hwseq;
    771	struct hubp *hubp = dc->res_pool->hubps[0];
    772	int i;
    773
    774	if (dc->debug.disable_stutter)
    775		return;
    776
    777	if (!hws->wa.DEGVIDCN10_253)
    778		return;
    779
    780	for (i = 0; i < dc->res_pool->pipe_count; i++) {
    781		if (!dc->res_pool->hubps[i]->power_gated)
    782			return;
    783	}
    784
    785	/* all pipe power gated, apply work around to enable stutter. */
    786
    787	REG_SET(DC_IP_REQUEST_CNTL, 0,
    788			IP_REQUEST_EN, 1);
    789
    790	hws->funcs.hubp_pg_control(hws, 0, true);
    791	REG_SET(DC_IP_REQUEST_CNTL, 0,
    792			IP_REQUEST_EN, 0);
    793
    794	hubp->funcs->set_hubp_blank_en(hubp, false);
    795	hws->wa_state.DEGVIDCN10_253_applied = true;
    796}
    797
    798void dcn10_bios_golden_init(struct dc *dc)
    799{
    800	struct dce_hwseq *hws = dc->hwseq;
    801	struct dc_bios *bp = dc->ctx->dc_bios;
    802	int i;
    803	bool allow_self_fresh_force_enable = true;
    804
    805	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
    806		return;
    807
    808	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
    809		allow_self_fresh_force_enable =
    810				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
    811
    812
    813	/* WA for making DF sleep when idle after resume from S0i3.
    814	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
    815	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
    816	 * before calling command table and it changed to 1 after,
    817	 * it should be set back to 0.
    818	 */
    819
    820	/* initialize dcn global */
    821	bp->funcs->enable_disp_power_gating(bp,
    822			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
    823
    824	for (i = 0; i < dc->res_pool->pipe_count; i++) {
    825		/* initialize dcn per pipe */
    826		bp->funcs->enable_disp_power_gating(bp,
    827				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
    828	}
    829
    830	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
    831		if (allow_self_fresh_force_enable == false &&
    832				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
    833			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
    834										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
    835
    836}
    837
    838static void false_optc_underflow_wa(
    839		struct dc *dc,
    840		const struct dc_stream_state *stream,
    841		struct timing_generator *tg)
    842{
    843	int i;
    844	bool underflow;
    845
    846	if (!dc->hwseq->wa.false_optc_underflow)
    847		return;
    848
    849	underflow = tg->funcs->is_optc_underflow_occurred(tg);
    850
    851	for (i = 0; i < dc->res_pool->pipe_count; i++) {
    852		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
    853
    854		if (old_pipe_ctx->stream != stream)
    855			continue;
    856
    857		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
    858	}
    859
    860	if (tg->funcs->set_blank_data_double_buffer)
    861		tg->funcs->set_blank_data_double_buffer(tg, true);
    862
    863	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
    864		tg->funcs->clear_optc_underflow(tg);
    865}
    866
    867enum dc_status dcn10_enable_stream_timing(
    868		struct pipe_ctx *pipe_ctx,
    869		struct dc_state *context,
    870		struct dc *dc)
    871{
    872	struct dc_stream_state *stream = pipe_ctx->stream;
    873	enum dc_color_space color_space;
    874	struct tg_color black_color = {0};
    875
    876	/* by upper caller loop, pipe0 is parent pipe and be called first.
    877	 * back end is set up by for pipe0. Other children pipe share back end
    878	 * with pipe 0. No program is needed.
    879	 */
    880	if (pipe_ctx->top_pipe != NULL)
    881		return DC_OK;
    882
    883	/* TODO check if timing_changed, disable stream if timing changed */
    884
    885	/* HW program guide assume display already disable
    886	 * by unplug sequence. OTG assume stop.
    887	 */
    888	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
    889
    890	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
    891			pipe_ctx->clock_source,
    892			&pipe_ctx->stream_res.pix_clk_params,
    893			&pipe_ctx->pll_settings)) {
    894		BREAK_TO_DEBUGGER();
    895		return DC_ERROR_UNEXPECTED;
    896	}
    897
    898	pipe_ctx->stream_res.tg->funcs->program_timing(
    899			pipe_ctx->stream_res.tg,
    900			&stream->timing,
    901			pipe_ctx->pipe_dlg_param.vready_offset,
    902			pipe_ctx->pipe_dlg_param.vstartup_start,
    903			pipe_ctx->pipe_dlg_param.vupdate_offset,
    904			pipe_ctx->pipe_dlg_param.vupdate_width,
    905			pipe_ctx->stream->signal,
    906			true);
    907
    908#if 0 /* move to after enable_crtc */
    909	/* TODO: OPP FMT, ABM. etc. should be done here. */
    910	/* or FPGA now. instance 0 only. TODO: move to opp.c */
    911
    912	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
    913
    914	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
    915				pipe_ctx->stream_res.opp,
    916				&stream->bit_depth_params,
    917				&stream->clamping);
    918#endif
    919	/* program otg blank color */
    920	color_space = stream->output_color_space;
    921	color_space_to_black_color(dc, color_space, &black_color);
    922
    923	/*
    924	 * The way 420 is packed, 2 channels carry Y component, 1 channel
    925	 * alternate between Cb and Cr, so both channels need the pixel
    926	 * value for Y
    927	 */
    928	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
    929		black_color.color_r_cr = black_color.color_g_y;
    930
    931	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
    932		pipe_ctx->stream_res.tg->funcs->set_blank_color(
    933				pipe_ctx->stream_res.tg,
    934				&black_color);
    935
    936	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
    937			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
    938		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
    939		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
    940		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
    941	}
    942
    943	/* VTG is  within DCHUB command block. DCFCLK is always on */
    944	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
    945		BREAK_TO_DEBUGGER();
    946		return DC_ERROR_UNEXPECTED;
    947	}
    948
    949	/* TODO program crtc source select for non-virtual signal*/
    950	/* TODO program FMT */
    951	/* TODO setup link_enc */
    952	/* TODO set stream attributes */
    953	/* TODO program audio */
    954	/* TODO enable stream if timing changed */
    955	/* TODO unblank stream if DP */
    956
    957	return DC_OK;
    958}
    959
    960static void dcn10_reset_back_end_for_pipe(
    961		struct dc *dc,
    962		struct pipe_ctx *pipe_ctx,
    963		struct dc_state *context)
    964{
    965	int i;
    966	struct dc_link *link;
    967	DC_LOGGER_INIT(dc->ctx->logger);
    968	if (pipe_ctx->stream_res.stream_enc == NULL) {
    969		pipe_ctx->stream = NULL;
    970		return;
    971	}
    972
    973	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
    974		link = pipe_ctx->stream->link;
    975		/* DPMS may already disable or */
    976		/* dpms_off status is incorrect due to fastboot
    977		 * feature. When system resume from S4 with second
    978		 * screen only, the dpms_off would be true but
    979		 * VBIOS lit up eDP, so check link status too.
    980		 */
    981		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
    982			core_link_disable_stream(pipe_ctx);
    983		else if (pipe_ctx->stream_res.audio)
    984			dc->hwss.disable_audio_stream(pipe_ctx);
    985
    986		if (pipe_ctx->stream_res.audio) {
    987			/*disable az_endpoint*/
    988			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
    989
    990			/*free audio*/
    991			if (dc->caps.dynamic_audio == true) {
    992				/*we have to dynamic arbitrate the audio endpoints*/
    993				/*we free the resource, need reset is_audio_acquired*/
    994				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
    995						pipe_ctx->stream_res.audio, false);
    996				pipe_ctx->stream_res.audio = NULL;
    997			}
    998		}
    999	}
   1000
   1001	/* by upper caller loop, parent pipe: pipe0, will be reset last.
   1002	 * back end share by all pipes and will be disable only when disable
   1003	 * parent pipe.
   1004	 */
   1005	if (pipe_ctx->top_pipe == NULL) {
   1006
   1007		if (pipe_ctx->stream_res.abm)
   1008			dc->hwss.set_abm_immediate_disable(pipe_ctx);
   1009
   1010		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
   1011
   1012		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
   1013		if (pipe_ctx->stream_res.tg->funcs->set_drr)
   1014			pipe_ctx->stream_res.tg->funcs->set_drr(
   1015					pipe_ctx->stream_res.tg, NULL);
   1016	}
   1017
   1018	for (i = 0; i < dc->res_pool->pipe_count; i++)
   1019		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
   1020			break;
   1021
   1022	if (i == dc->res_pool->pipe_count)
   1023		return;
   1024
   1025	pipe_ctx->stream = NULL;
   1026	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
   1027					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
   1028}
   1029
   1030static bool dcn10_hw_wa_force_recovery(struct dc *dc)
   1031{
   1032	struct hubp *hubp ;
   1033	unsigned int i;
   1034	bool need_recover = true;
   1035
   1036	if (!dc->debug.recovery_enabled)
   1037		return false;
   1038
   1039	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1040		struct pipe_ctx *pipe_ctx =
   1041			&dc->current_state->res_ctx.pipe_ctx[i];
   1042		if (pipe_ctx != NULL) {
   1043			hubp = pipe_ctx->plane_res.hubp;
   1044			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
   1045				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
   1046					/* one pipe underflow, we will reset all the pipes*/
   1047					need_recover = true;
   1048				}
   1049			}
   1050		}
   1051	}
   1052	if (!need_recover)
   1053		return false;
   1054	/*
   1055	DCHUBP_CNTL:HUBP_BLANK_EN=1
   1056	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
   1057	DCHUBP_CNTL:HUBP_DISABLE=1
   1058	DCHUBP_CNTL:HUBP_DISABLE=0
   1059	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
   1060	DCSURF_PRIMARY_SURFACE_ADDRESS
   1061	DCHUBP_CNTL:HUBP_BLANK_EN=0
   1062	*/
   1063
   1064	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1065		struct pipe_ctx *pipe_ctx =
   1066			&dc->current_state->res_ctx.pipe_ctx[i];
   1067		if (pipe_ctx != NULL) {
   1068			hubp = pipe_ctx->plane_res.hubp;
   1069			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
   1070			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
   1071				hubp->funcs->set_hubp_blank_en(hubp, true);
   1072		}
   1073	}
   1074	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
   1075	hubbub1_soft_reset(dc->res_pool->hubbub, true);
   1076
   1077	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1078		struct pipe_ctx *pipe_ctx =
   1079			&dc->current_state->res_ctx.pipe_ctx[i];
   1080		if (pipe_ctx != NULL) {
   1081			hubp = pipe_ctx->plane_res.hubp;
   1082			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
   1083			if (hubp != NULL && hubp->funcs->hubp_disable_control)
   1084				hubp->funcs->hubp_disable_control(hubp, true);
   1085		}
   1086	}
   1087	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1088		struct pipe_ctx *pipe_ctx =
   1089			&dc->current_state->res_ctx.pipe_ctx[i];
   1090		if (pipe_ctx != NULL) {
   1091			hubp = pipe_ctx->plane_res.hubp;
   1092			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
   1093			if (hubp != NULL && hubp->funcs->hubp_disable_control)
   1094				hubp->funcs->hubp_disable_control(hubp, true);
   1095		}
   1096	}
   1097	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
   1098	hubbub1_soft_reset(dc->res_pool->hubbub, false);
   1099	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1100		struct pipe_ctx *pipe_ctx =
   1101			&dc->current_state->res_ctx.pipe_ctx[i];
   1102		if (pipe_ctx != NULL) {
   1103			hubp = pipe_ctx->plane_res.hubp;
   1104			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
   1105			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
   1106				hubp->funcs->set_hubp_blank_en(hubp, true);
   1107		}
   1108	}
   1109	return true;
   1110
   1111}
   1112
   1113void dcn10_verify_allow_pstate_change_high(struct dc *dc)
   1114{
   1115	struct hubbub *hubbub = dc->res_pool->hubbub;
   1116	static bool should_log_hw_state; /* prevent hw state log by default */
   1117
   1118	if (!hubbub->funcs->verify_allow_pstate_change_high)
   1119		return;
   1120
   1121	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
   1122		int i = 0;
   1123
   1124		if (should_log_hw_state)
   1125			dcn10_log_hw_state(dc, NULL);
   1126
   1127		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
   1128		BREAK_TO_DEBUGGER();
   1129		if (dcn10_hw_wa_force_recovery(dc)) {
   1130			/*check again*/
   1131			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
   1132				BREAK_TO_DEBUGGER();
   1133		}
   1134	}
   1135}
   1136
   1137/* trigger HW to start disconnect plane from stream on the next vsync */
   1138void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
   1139{
   1140	struct dce_hwseq *hws = dc->hwseq;
   1141	struct hubp *hubp = pipe_ctx->plane_res.hubp;
   1142	int dpp_id = pipe_ctx->plane_res.dpp->inst;
   1143	struct mpc *mpc = dc->res_pool->mpc;
   1144	struct mpc_tree *mpc_tree_params;
   1145	struct mpcc *mpcc_to_remove = NULL;
   1146	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
   1147
   1148	mpc_tree_params = &(opp->mpc_tree_params);
   1149	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
   1150
   1151	/*Already reset*/
   1152	if (mpcc_to_remove == NULL)
   1153		return;
   1154
   1155	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
   1156	if (opp != NULL)
   1157		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
   1158
   1159	dc->optimized_required = true;
   1160
   1161	if (hubp->funcs->hubp_disconnect)
   1162		hubp->funcs->hubp_disconnect(hubp);
   1163
   1164	if (dc->debug.sanity_checks)
   1165		hws->funcs.verify_allow_pstate_change_high(dc);
   1166}
   1167
   1168/**
   1169 * dcn10_plane_atomic_power_down - Power down plane components.
   1170 *
   1171 * @dc: dc struct reference. used for grab hwseq.
   1172 * @dpp: dpp struct reference.
   1173 * @hubp: hubp struct reference.
   1174 *
   1175 * Keep in mind that this operation requires a power gate configuration;
   1176 * however, requests for switch power gate are precisely controlled to avoid
   1177 * problems. For this reason, power gate request is usually disabled. This
   1178 * function first needs to enable the power gate request before disabling DPP
   1179 * and HUBP. Finally, it disables the power gate request again.
   1180 */
   1181void dcn10_plane_atomic_power_down(struct dc *dc,
   1182		struct dpp *dpp,
   1183		struct hubp *hubp)
   1184{
   1185	struct dce_hwseq *hws = dc->hwseq;
   1186	DC_LOGGER_INIT(dc->ctx->logger);
   1187
   1188	if (REG(DC_IP_REQUEST_CNTL)) {
   1189		REG_SET(DC_IP_REQUEST_CNTL, 0,
   1190				IP_REQUEST_EN, 1);
   1191
   1192		if (hws->funcs.dpp_pg_control)
   1193			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
   1194
   1195		if (hws->funcs.hubp_pg_control)
   1196			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
   1197
   1198		dpp->funcs->dpp_reset(dpp);
   1199		REG_SET(DC_IP_REQUEST_CNTL, 0,
   1200				IP_REQUEST_EN, 0);
   1201		DC_LOG_DEBUG(
   1202				"Power gated front end %d\n", hubp->inst);
   1203	}
   1204}
   1205
   1206/* disable HW used by plane.
   1207 * note:  cannot disable until disconnect is complete
   1208 */
   1209void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
   1210{
   1211	struct dce_hwseq *hws = dc->hwseq;
   1212	struct hubp *hubp = pipe_ctx->plane_res.hubp;
   1213	struct dpp *dpp = pipe_ctx->plane_res.dpp;
   1214	int opp_id = hubp->opp_id;
   1215
   1216	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
   1217
   1218	hubp->funcs->hubp_clk_cntl(hubp, false);
   1219
   1220	dpp->funcs->dpp_dppclk_control(dpp, false, false);
   1221
   1222	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
   1223		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
   1224				pipe_ctx->stream_res.opp,
   1225				false);
   1226
   1227	hubp->power_gated = true;
   1228	dc->optimized_required = false; /* We're powering off, no need to optimize */
   1229
   1230	hws->funcs.plane_atomic_power_down(dc,
   1231			pipe_ctx->plane_res.dpp,
   1232			pipe_ctx->plane_res.hubp);
   1233
   1234	pipe_ctx->stream = NULL;
   1235	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
   1236	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
   1237	pipe_ctx->top_pipe = NULL;
   1238	pipe_ctx->bottom_pipe = NULL;
   1239	pipe_ctx->plane_state = NULL;
   1240}
   1241
   1242void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
   1243{
   1244	struct dce_hwseq *hws = dc->hwseq;
   1245	DC_LOGGER_INIT(dc->ctx->logger);
   1246
   1247	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
   1248		return;
   1249
   1250	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
   1251
   1252	apply_DEGVIDCN10_253_wa(dc);
   1253
   1254	DC_LOG_DC("Power down front end %d\n",
   1255					pipe_ctx->pipe_idx);
   1256}
   1257
   1258void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
   1259{
   1260	int i;
   1261	struct dce_hwseq *hws = dc->hwseq;
   1262	struct hubbub *hubbub = dc->res_pool->hubbub;
   1263	bool can_apply_seamless_boot = false;
   1264
   1265	for (i = 0; i < context->stream_count; i++) {
   1266		if (context->streams[i]->apply_seamless_boot_optimization) {
   1267			can_apply_seamless_boot = true;
   1268			break;
   1269		}
   1270	}
   1271
   1272	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1273		struct timing_generator *tg = dc->res_pool->timing_generators[i];
   1274		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   1275
   1276		/* There is assumption that pipe_ctx is not mapping irregularly
   1277		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
   1278		 * we will use the pipe, so don't disable
   1279		 */
   1280		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   1281			continue;
   1282
   1283		/* Blank controller using driver code instead of
   1284		 * command table.
   1285		 */
   1286		if (tg->funcs->is_tg_enabled(tg)) {
   1287			if (hws->funcs.init_blank != NULL) {
   1288				hws->funcs.init_blank(dc, tg);
   1289				tg->funcs->lock(tg);
   1290			} else {
   1291				tg->funcs->lock(tg);
   1292				tg->funcs->set_blank(tg, true);
   1293				hwss_wait_for_blank_complete(tg);
   1294			}
   1295		}
   1296	}
   1297
   1298	/* Reset det size */
   1299	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1300		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   1301		struct hubp *hubp = dc->res_pool->hubps[i];
   1302
   1303		/* Do not need to reset for seamless boot */
   1304		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   1305			continue;
   1306
   1307		if (hubbub && hubp) {
   1308			if (hubbub->funcs->program_det_size)
   1309				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
   1310		}
   1311	}
   1312
   1313	/* num_opp will be equal to number of mpcc */
   1314	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
   1315		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   1316
   1317		/* Cannot reset the MPC mux if seamless boot */
   1318		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
   1319			continue;
   1320
   1321		dc->res_pool->mpc->funcs->mpc_init_single_inst(
   1322				dc->res_pool->mpc, i);
   1323	}
   1324
   1325	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   1326		struct timing_generator *tg = dc->res_pool->timing_generators[i];
   1327		struct hubp *hubp = dc->res_pool->hubps[i];
   1328		struct dpp *dpp = dc->res_pool->dpps[i];
   1329		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   1330
   1331		/* There is assumption that pipe_ctx is not mapping irregularly
   1332		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
   1333		 * we will use the pipe, so don't disable
   1334		 */
   1335		if (can_apply_seamless_boot &&
   1336			pipe_ctx->stream != NULL &&
   1337			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
   1338				pipe_ctx->stream_res.tg)) {
   1339			// Enable double buffering for OTG_BLANK no matter if
   1340			// seamless boot is enabled or not to suppress global sync
   1341			// signals when OTG blanked. This is to prevent pipe from
   1342			// requesting data while in PSR.
   1343			tg->funcs->tg_init(tg);
   1344			hubp->power_gated = true;
   1345			continue;
   1346		}
   1347
   1348		/* Disable on the current state so the new one isn't cleared. */
   1349		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
   1350
   1351		dpp->funcs->dpp_reset(dpp);
   1352
   1353		pipe_ctx->stream_res.tg = tg;
   1354		pipe_ctx->pipe_idx = i;
   1355
   1356		pipe_ctx->plane_res.hubp = hubp;
   1357		pipe_ctx->plane_res.dpp = dpp;
   1358		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
   1359		hubp->mpcc_id = dpp->inst;
   1360		hubp->opp_id = OPP_ID_INVALID;
   1361		hubp->power_gated = false;
   1362
   1363		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
   1364		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
   1365		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
   1366		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
   1367
   1368		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
   1369
   1370		if (tg->funcs->is_tg_enabled(tg))
   1371			tg->funcs->unlock(tg);
   1372
   1373		dc->hwss.disable_plane(dc, pipe_ctx);
   1374
   1375		pipe_ctx->stream_res.tg = NULL;
   1376		pipe_ctx->plane_res.hubp = NULL;
   1377
   1378		if (tg->funcs->is_tg_enabled(tg)) {
   1379			if (tg->funcs->init_odm)
   1380				tg->funcs->init_odm(tg);
   1381		}
   1382
   1383		tg->funcs->tg_init(tg);
   1384	}
   1385
   1386	/* Power gate DSCs */
   1387	if (hws->funcs.dsc_pg_control != NULL) {
   1388		uint32_t num_opps = 0;
   1389		uint32_t opp_id_src0 = OPP_ID_INVALID;
   1390		uint32_t opp_id_src1 = OPP_ID_INVALID;
   1391
   1392		// Step 1: To find out which OPTC is running & OPTC DSC is ON
   1393		// We can't use res_pool->res_cap->num_timing_generator to check
   1394		// Because it records display pipes default setting built in driver,
   1395		// not display pipes of the current chip.
   1396		// Some ASICs would be fused display pipes less than the default setting.
   1397		// In dcnxx_resource_construct function, driver would obatin real information.
   1398		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
   1399			uint32_t optc_dsc_state = 0;
   1400			struct timing_generator *tg = dc->res_pool->timing_generators[i];
   1401
   1402			if (tg->funcs->is_tg_enabled(tg)) {
   1403				if (tg->funcs->get_dsc_status)
   1404					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
   1405				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
   1406				// non-zero value is DSC enabled
   1407				if (optc_dsc_state != 0) {
   1408					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
   1409					break;
   1410				}
   1411			}
   1412		}
   1413
   1414		// Step 2: To power down DSC but skip DSC  of running OPTC
   1415		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
   1416			struct dcn_dsc_state s  = {0};
   1417
   1418			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
   1419
   1420			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
   1421				s.dsc_clock_en && s.dsc_fw_en)
   1422				continue;
   1423
   1424			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
   1425		}
   1426	}
   1427}
   1428
   1429void dcn10_init_hw(struct dc *dc)
   1430{
   1431	int i;
   1432	struct abm *abm = dc->res_pool->abm;
   1433	struct dmcu *dmcu = dc->res_pool->dmcu;
   1434	struct dce_hwseq *hws = dc->hwseq;
   1435	struct dc_bios *dcb = dc->ctx->dc_bios;
   1436	struct resource_pool *res_pool = dc->res_pool;
   1437	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
   1438	bool   is_optimized_init_done = false;
   1439
   1440	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
   1441		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
   1442
   1443	/* Align bw context with hw config when system resume. */
   1444	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
   1445		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
   1446		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
   1447	}
   1448
   1449	// Initialize the dccg
   1450	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
   1451		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
   1452
   1453	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
   1454
   1455		REG_WRITE(REFCLK_CNTL, 0);
   1456		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
   1457		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
   1458
   1459		if (!dc->debug.disable_clock_gate) {
   1460			/* enable all DCN clock gating */
   1461			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
   1462
   1463			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
   1464
   1465			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
   1466		}
   1467
   1468		//Enable ability to power gate / don't force power on permanently
   1469		if (hws->funcs.enable_power_gating_plane)
   1470			hws->funcs.enable_power_gating_plane(hws, true);
   1471
   1472		return;
   1473	}
   1474
   1475	if (!dcb->funcs->is_accelerated_mode(dcb))
   1476		hws->funcs.disable_vga(dc->hwseq);
   1477
   1478	hws->funcs.bios_golden_init(dc);
   1479
   1480	if (dc->ctx->dc_bios->fw_info_valid) {
   1481		res_pool->ref_clocks.xtalin_clock_inKhz =
   1482				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
   1483
   1484		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
   1485			if (res_pool->dccg && res_pool->hubbub) {
   1486
   1487				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
   1488						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
   1489						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
   1490
   1491				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
   1492						res_pool->ref_clocks.dccg_ref_clock_inKhz,
   1493						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
   1494			} else {
   1495				// Not all ASICs have DCCG sw component
   1496				res_pool->ref_clocks.dccg_ref_clock_inKhz =
   1497						res_pool->ref_clocks.xtalin_clock_inKhz;
   1498				res_pool->ref_clocks.dchub_ref_clock_inKhz =
   1499						res_pool->ref_clocks.xtalin_clock_inKhz;
   1500			}
   1501		}
   1502	} else
   1503		ASSERT_CRITICAL(false);
   1504
   1505	for (i = 0; i < dc->link_count; i++) {
   1506		/* Power up AND update implementation according to the
   1507		 * required signal (which may be different from the
   1508		 * default signal on connector).
   1509		 */
   1510		struct dc_link *link = dc->links[i];
   1511
   1512		if (!is_optimized_init_done)
   1513			link->link_enc->funcs->hw_init(link->link_enc);
   1514
   1515		/* Check for enabled DIG to identify enabled display */
   1516		if (link->link_enc->funcs->is_dig_enabled &&
   1517			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
   1518			link->link_status.link_active = true;
   1519			if (link->link_enc->funcs->fec_is_active &&
   1520					link->link_enc->funcs->fec_is_active(link->link_enc))
   1521				link->fec_state = dc_link_fec_enabled;
   1522		}
   1523	}
   1524
   1525	/* we want to turn off all dp displays before doing detection */
   1526	dc_link_blank_all_dp_displays(dc);
   1527
   1528	if (hws->funcs.enable_power_gating_plane)
   1529		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
   1530
   1531	/* If taking control over from VBIOS, we may want to optimize our first
   1532	 * mode set, so we need to skip powering down pipes until we know which
   1533	 * pipes we want to use.
   1534	 * Otherwise, if taking control is not possible, we need to power
   1535	 * everything down.
   1536	 */
   1537	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
   1538		if (!is_optimized_init_done) {
   1539			hws->funcs.init_pipes(dc, dc->current_state);
   1540			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
   1541				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
   1542						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
   1543		}
   1544	}
   1545
   1546	if (!is_optimized_init_done) {
   1547
   1548		for (i = 0; i < res_pool->audio_count; i++) {
   1549			struct audio *audio = res_pool->audios[i];
   1550
   1551			audio->funcs->hw_init(audio);
   1552		}
   1553
   1554		for (i = 0; i < dc->link_count; i++) {
   1555			struct dc_link *link = dc->links[i];
   1556
   1557			if (link->panel_cntl)
   1558				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
   1559		}
   1560
   1561		if (abm != NULL)
   1562			abm->funcs->abm_init(abm, backlight);
   1563
   1564		if (dmcu != NULL && !dmcu->auto_load_dmcu)
   1565			dmcu->funcs->dmcu_init(dmcu);
   1566	}
   1567
   1568	if (abm != NULL && dmcu != NULL)
   1569		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
   1570
   1571	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
   1572	if (!is_optimized_init_done)
   1573		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
   1574
   1575	if (!dc->debug.disable_clock_gate) {
   1576		/* enable all DCN clock gating */
   1577		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
   1578
   1579		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
   1580
   1581		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
   1582	}
   1583
   1584	if (dc->clk_mgr->funcs->notify_wm_ranges)
   1585		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
   1586}
   1587
   1588/* In headless boot cases, DIG may be turned
   1589 * on which causes HW/SW discrepancies.
   1590 * To avoid this, power down hardware on boot
   1591 * if DIG is turned on
   1592 */
   1593void dcn10_power_down_on_boot(struct dc *dc)
   1594{
   1595	struct dc_link *edp_links[MAX_NUM_EDP];
   1596	struct dc_link *edp_link = NULL;
   1597	int edp_num;
   1598	int i = 0;
   1599
   1600	get_edp_links(dc, edp_links, &edp_num);
   1601	if (edp_num)
   1602		edp_link = edp_links[0];
   1603
   1604	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
   1605			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
   1606			dc->hwseq->funcs.edp_backlight_control &&
   1607			dc->hwss.power_down &&
   1608			dc->hwss.edp_power_control) {
   1609		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
   1610		dc->hwss.power_down(dc);
   1611		dc->hwss.edp_power_control(edp_link, false);
   1612	} else {
   1613		for (i = 0; i < dc->link_count; i++) {
   1614			struct dc_link *link = dc->links[i];
   1615
   1616			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
   1617					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
   1618					dc->hwss.power_down) {
   1619				dc->hwss.power_down(dc);
   1620				break;
   1621			}
   1622
   1623		}
   1624	}
   1625
   1626	/*
   1627	 * Call update_clocks with empty context
   1628	 * to send DISPLAY_OFF
   1629	 * Otherwise DISPLAY_OFF may not be asserted
   1630	 */
   1631	if (dc->clk_mgr->funcs->set_low_power_state)
   1632		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
   1633}
   1634
   1635void dcn10_reset_hw_ctx_wrap(
   1636		struct dc *dc,
   1637		struct dc_state *context)
   1638{
   1639	int i;
   1640	struct dce_hwseq *hws = dc->hwseq;
   1641
   1642	/* Reset Back End*/
   1643	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
   1644		struct pipe_ctx *pipe_ctx_old =
   1645			&dc->current_state->res_ctx.pipe_ctx[i];
   1646		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   1647
   1648		if (!pipe_ctx_old->stream)
   1649			continue;
   1650
   1651		if (pipe_ctx_old->top_pipe)
   1652			continue;
   1653
   1654		if (!pipe_ctx->stream ||
   1655				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
   1656			struct clock_source *old_clk = pipe_ctx_old->clock_source;
   1657
   1658			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
   1659			if (hws->funcs.enable_stream_gating)
   1660				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
   1661			if (old_clk)
   1662				old_clk->funcs->cs_power_down(old_clk);
   1663		}
   1664	}
   1665}
   1666
   1667static bool patch_address_for_sbs_tb_stereo(
   1668		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
   1669{
   1670	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
   1671	bool sec_split = pipe_ctx->top_pipe &&
   1672			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
   1673	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
   1674		(pipe_ctx->stream->timing.timing_3d_format ==
   1675		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
   1676		 pipe_ctx->stream->timing.timing_3d_format ==
   1677		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
   1678		*addr = plane_state->address.grph_stereo.left_addr;
   1679		plane_state->address.grph_stereo.left_addr =
   1680		plane_state->address.grph_stereo.right_addr;
   1681		return true;
   1682	} else {
   1683		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
   1684			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
   1685			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
   1686			plane_state->address.grph_stereo.right_addr =
   1687			plane_state->address.grph_stereo.left_addr;
   1688			plane_state->address.grph_stereo.right_meta_addr =
   1689			plane_state->address.grph_stereo.left_meta_addr;
   1690		}
   1691	}
   1692	return false;
   1693}
   1694
   1695void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
   1696{
   1697	bool addr_patched = false;
   1698	PHYSICAL_ADDRESS_LOC addr;
   1699	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
   1700
   1701	if (plane_state == NULL)
   1702		return;
   1703
   1704	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
   1705
   1706	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
   1707			pipe_ctx->plane_res.hubp,
   1708			&plane_state->address,
   1709			plane_state->flip_immediate);
   1710
   1711	plane_state->status.requested_address = plane_state->address;
   1712
   1713	if (plane_state->flip_immediate)
   1714		plane_state->status.current_address = plane_state->address;
   1715
   1716	if (addr_patched)
   1717		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
   1718}
   1719
   1720bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
   1721			const struct dc_plane_state *plane_state)
   1722{
   1723	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
   1724	const struct dc_transfer_func *tf = NULL;
   1725	bool result = true;
   1726
   1727	if (dpp_base == NULL)
   1728		return false;
   1729
   1730	if (plane_state->in_transfer_func)
   1731		tf = plane_state->in_transfer_func;
   1732
   1733	if (plane_state->gamma_correction &&
   1734		!dpp_base->ctx->dc->debug.always_use_regamma
   1735		&& !plane_state->gamma_correction->is_identity
   1736			&& dce_use_lut(plane_state->format))
   1737		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
   1738
   1739	if (tf == NULL)
   1740		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
   1741	else if (tf->type == TF_TYPE_PREDEFINED) {
   1742		switch (tf->tf) {
   1743		case TRANSFER_FUNCTION_SRGB:
   1744			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
   1745			break;
   1746		case TRANSFER_FUNCTION_BT709:
   1747			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
   1748			break;
   1749		case TRANSFER_FUNCTION_LINEAR:
   1750			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
   1751			break;
   1752		case TRANSFER_FUNCTION_PQ:
   1753			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
   1754			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
   1755			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
   1756			result = true;
   1757			break;
   1758		default:
   1759			result = false;
   1760			break;
   1761		}
   1762	} else if (tf->type == TF_TYPE_BYPASS) {
   1763		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
   1764	} else {
   1765		cm_helper_translate_curve_to_degamma_hw_format(tf,
   1766					&dpp_base->degamma_params);
   1767		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
   1768				&dpp_base->degamma_params);
   1769		result = true;
   1770	}
   1771
   1772	return result;
   1773}
   1774
   1775#define MAX_NUM_HW_POINTS 0x200
   1776
   1777static void log_tf(struct dc_context *ctx,
   1778				struct dc_transfer_func *tf, uint32_t hw_points_num)
   1779{
   1780	// DC_LOG_GAMMA is default logging of all hw points
   1781	// DC_LOG_ALL_GAMMA logs all points, not only hw points
   1782	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
   1783	int i = 0;
   1784
   1785	DC_LOGGER_INIT(ctx->logger);
   1786	DC_LOG_GAMMA("Gamma Correction TF");
   1787	DC_LOG_ALL_GAMMA("Logging all tf points...");
   1788	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
   1789
   1790	for (i = 0; i < hw_points_num; i++) {
   1791		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
   1792		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
   1793		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
   1794	}
   1795
   1796	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
   1797		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
   1798		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
   1799		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
   1800	}
   1801}
   1802
   1803bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
   1804				const struct dc_stream_state *stream)
   1805{
   1806	struct dpp *dpp = pipe_ctx->plane_res.dpp;
   1807
   1808	if (dpp == NULL)
   1809		return false;
   1810
   1811	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
   1812
   1813	if (stream->out_transfer_func &&
   1814	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
   1815	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
   1816		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
   1817
   1818	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
   1819	 * update.
   1820	 */
   1821	else if (cm_helper_translate_curve_to_hw_format(
   1822			stream->out_transfer_func,
   1823			&dpp->regamma_params, false)) {
   1824		dpp->funcs->dpp_program_regamma_pwl(
   1825				dpp,
   1826				&dpp->regamma_params, OPP_REGAMMA_USER);
   1827	} else
   1828		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
   1829
   1830	if (stream != NULL && stream->ctx != NULL &&
   1831			stream->out_transfer_func != NULL) {
   1832		log_tf(stream->ctx,
   1833				stream->out_transfer_func,
   1834				dpp->regamma_params.hw_points_num);
   1835	}
   1836
   1837	return true;
   1838}
   1839
   1840void dcn10_pipe_control_lock(
   1841	struct dc *dc,
   1842	struct pipe_ctx *pipe,
   1843	bool lock)
   1844{
   1845	struct dce_hwseq *hws = dc->hwseq;
   1846
   1847	/* use TG master update lock to lock everything on the TG
   1848	 * therefore only top pipe need to lock
   1849	 */
   1850	if (!pipe || pipe->top_pipe)
   1851		return;
   1852
   1853	if (dc->debug.sanity_checks)
   1854		hws->funcs.verify_allow_pstate_change_high(dc);
   1855
   1856	if (lock)
   1857		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
   1858	else
   1859		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
   1860
   1861	if (dc->debug.sanity_checks)
   1862		hws->funcs.verify_allow_pstate_change_high(dc);
   1863}
   1864
   1865/**
   1866 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
   1867 *
   1868 * Software keepout workaround to prevent cursor update locking from stalling
   1869 * out cursor updates indefinitely or from old values from being retained in
   1870 * the case where the viewport changes in the same frame as the cursor.
   1871 *
   1872 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
   1873 * too close to VUPDATE, then stall out until VUPDATE finishes.
   1874 *
   1875 * TODO: Optimize cursor programming to be once per frame before VUPDATE
   1876 *       to avoid the need for this workaround.
   1877 */
   1878static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
   1879{
   1880	struct dc_stream_state *stream = pipe_ctx->stream;
   1881	struct crtc_position position;
   1882	uint32_t vupdate_start, vupdate_end;
   1883	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
   1884	unsigned int us_per_line, us_vupdate;
   1885
   1886	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
   1887		return;
   1888
   1889	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
   1890		return;
   1891
   1892	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
   1893				       &vupdate_end);
   1894
   1895	dc->hwss.get_position(&pipe_ctx, 1, &position);
   1896	vpos = position.vertical_count;
   1897
   1898	/* Avoid wraparound calculation issues */
   1899	vupdate_start += stream->timing.v_total;
   1900	vupdate_end += stream->timing.v_total;
   1901	vpos += stream->timing.v_total;
   1902
   1903	if (vpos <= vupdate_start) {
   1904		/* VPOS is in VACTIVE or back porch. */
   1905		lines_to_vupdate = vupdate_start - vpos;
   1906	} else if (vpos > vupdate_end) {
   1907		/* VPOS is in the front porch. */
   1908		return;
   1909	} else {
   1910		/* VPOS is in VUPDATE. */
   1911		lines_to_vupdate = 0;
   1912	}
   1913
   1914	/* Calculate time until VUPDATE in microseconds. */
   1915	us_per_line =
   1916		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
   1917	us_to_vupdate = lines_to_vupdate * us_per_line;
   1918
   1919	/* 70 us is a conservative estimate of cursor update time*/
   1920	if (us_to_vupdate > 70)
   1921		return;
   1922
   1923	/* Stall out until the cursor update completes. */
   1924	if (vupdate_end < vupdate_start)
   1925		vupdate_end += stream->timing.v_total;
   1926	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
   1927	udelay(us_to_vupdate + us_vupdate);
   1928}
   1929
   1930void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
   1931{
   1932	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
   1933	if (!pipe || pipe->top_pipe)
   1934		return;
   1935
   1936	/* Prevent cursor lock from stalling out cursor updates. */
   1937	if (lock)
   1938		delay_cursor_until_vupdate(dc, pipe);
   1939
   1940	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
   1941		union dmub_hw_lock_flags hw_locks = { 0 };
   1942		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
   1943
   1944		hw_locks.bits.lock_cursor = 1;
   1945		inst_flags.opp_inst = pipe->stream_res.opp->inst;
   1946
   1947		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
   1948					lock,
   1949					&hw_locks,
   1950					&inst_flags);
   1951	} else
   1952		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
   1953				pipe->stream_res.opp->inst, lock);
   1954}
   1955
   1956static bool wait_for_reset_trigger_to_occur(
   1957	struct dc_context *dc_ctx,
   1958	struct timing_generator *tg)
   1959{
   1960	bool rc = false;
   1961
   1962	/* To avoid endless loop we wait at most
   1963	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
   1964	const uint32_t frames_to_wait_on_triggered_reset = 10;
   1965	int i;
   1966
   1967	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
   1968
   1969		if (!tg->funcs->is_counter_moving(tg)) {
   1970			DC_ERROR("TG counter is not moving!\n");
   1971			break;
   1972		}
   1973
   1974		if (tg->funcs->did_triggered_reset_occur(tg)) {
   1975			rc = true;
   1976			/* usually occurs at i=1 */
   1977			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
   1978					i);
   1979			break;
   1980		}
   1981
   1982		/* Wait for one frame. */
   1983		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
   1984		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
   1985	}
   1986
   1987	if (false == rc)
   1988		DC_ERROR("GSL: Timeout on reset trigger!\n");
   1989
   1990	return rc;
   1991}
   1992
   1993static uint64_t reduceSizeAndFraction(uint64_t *numerator,
   1994				      uint64_t *denominator,
   1995				      bool checkUint32Bounary)
   1996{
   1997	int i;
   1998	bool ret = checkUint32Bounary == false;
   1999	uint64_t max_int32 = 0xffffffff;
   2000	uint64_t num, denom;
   2001	static const uint16_t prime_numbers[] = {
   2002		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
   2003		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
   2004		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
   2005		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
   2006		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
   2007		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
   2008		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
   2009		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
   2010		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
   2011		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
   2012		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
   2013		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
   2014		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
   2015		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
   2016		941, 947, 953, 967, 971, 977, 983, 991, 997};
   2017	int count = ARRAY_SIZE(prime_numbers);
   2018
   2019	num = *numerator;
   2020	denom = *denominator;
   2021	for (i = 0; i < count; i++) {
   2022		uint32_t num_remainder, denom_remainder;
   2023		uint64_t num_result, denom_result;
   2024		if (checkUint32Bounary &&
   2025			num <= max_int32 && denom <= max_int32) {
   2026			ret = true;
   2027			break;
   2028		}
   2029		do {
   2030			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
   2031			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
   2032			if (num_remainder == 0 && denom_remainder == 0) {
   2033				num = num_result;
   2034				denom = denom_result;
   2035			}
   2036		} while (num_remainder == 0 && denom_remainder == 0);
   2037	}
   2038	*numerator = num;
   2039	*denominator = denom;
   2040	return ret;
   2041}
   2042
   2043static bool is_low_refresh_rate(struct pipe_ctx *pipe)
   2044{
   2045	uint32_t master_pipe_refresh_rate =
   2046		pipe->stream->timing.pix_clk_100hz * 100 /
   2047		pipe->stream->timing.h_total /
   2048		pipe->stream->timing.v_total;
   2049	return master_pipe_refresh_rate <= 30;
   2050}
   2051
   2052static uint8_t get_clock_divider(struct pipe_ctx *pipe,
   2053				 bool account_low_refresh_rate)
   2054{
   2055	uint32_t clock_divider = 1;
   2056	uint32_t numpipes = 1;
   2057
   2058	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
   2059		clock_divider *= 2;
   2060
   2061	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
   2062		clock_divider *= 2;
   2063
   2064	while (pipe->next_odm_pipe) {
   2065		pipe = pipe->next_odm_pipe;
   2066		numpipes++;
   2067	}
   2068	clock_divider *= numpipes;
   2069
   2070	return clock_divider;
   2071}
   2072
   2073static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
   2074				    struct pipe_ctx *grouped_pipes[])
   2075{
   2076	struct dc_context *dc_ctx = dc->ctx;
   2077	int i, master = -1, embedded = -1;
   2078	struct dc_crtc_timing *hw_crtc_timing;
   2079	uint64_t phase[MAX_PIPES];
   2080	uint64_t modulo[MAX_PIPES];
   2081	unsigned int pclk;
   2082
   2083	uint32_t embedded_pix_clk_100hz;
   2084	uint16_t embedded_h_total;
   2085	uint16_t embedded_v_total;
   2086	uint32_t dp_ref_clk_100hz =
   2087		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
   2088
   2089	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
   2090	if (!hw_crtc_timing)
   2091		return master;
   2092
   2093	if (dc->config.vblank_alignment_dto_params &&
   2094		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
   2095		embedded_h_total =
   2096			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
   2097		embedded_v_total =
   2098			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
   2099		embedded_pix_clk_100hz =
   2100			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
   2101
   2102		for (i = 0; i < group_size; i++) {
   2103			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
   2104					grouped_pipes[i]->stream_res.tg,
   2105					&hw_crtc_timing[i]);
   2106			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
   2107				dc->res_pool->dp_clock_source,
   2108				grouped_pipes[i]->stream_res.tg->inst,
   2109				&pclk);
   2110			hw_crtc_timing[i].pix_clk_100hz = pclk;
   2111			if (dc_is_embedded_signal(
   2112					grouped_pipes[i]->stream->signal)) {
   2113				embedded = i;
   2114				master = i;
   2115				phase[i] = embedded_pix_clk_100hz*100;
   2116				modulo[i] = dp_ref_clk_100hz*100;
   2117			} else {
   2118
   2119				phase[i] = (uint64_t)embedded_pix_clk_100hz*
   2120					hw_crtc_timing[i].h_total*
   2121					hw_crtc_timing[i].v_total;
   2122				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
   2123				modulo[i] = (uint64_t)dp_ref_clk_100hz*
   2124					embedded_h_total*
   2125					embedded_v_total;
   2126
   2127				if (reduceSizeAndFraction(&phase[i],
   2128						&modulo[i], true) == false) {
   2129					/*
   2130					 * this will help to stop reporting
   2131					 * this timing synchronizable
   2132					 */
   2133					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
   2134					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
   2135				}
   2136			}
   2137		}
   2138
   2139		for (i = 0; i < group_size; i++) {
   2140			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
   2141				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
   2142					dc->res_pool->dp_clock_source,
   2143					grouped_pipes[i]->stream_res.tg->inst,
   2144					phase[i], modulo[i]);
   2145				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
   2146					dc->res_pool->dp_clock_source,
   2147					grouped_pipes[i]->stream_res.tg->inst, &pclk);
   2148					grouped_pipes[i]->stream->timing.pix_clk_100hz =
   2149						pclk*get_clock_divider(grouped_pipes[i], false);
   2150				if (master == -1)
   2151					master = i;
   2152			}
   2153		}
   2154
   2155	}
   2156
   2157	kfree(hw_crtc_timing);
   2158	return master;
   2159}
   2160
   2161void dcn10_enable_vblanks_synchronization(
   2162	struct dc *dc,
   2163	int group_index,
   2164	int group_size,
   2165	struct pipe_ctx *grouped_pipes[])
   2166{
   2167	struct dc_context *dc_ctx = dc->ctx;
   2168	struct output_pixel_processor *opp;
   2169	struct timing_generator *tg;
   2170	int i, width, height, master;
   2171
   2172	for (i = 1; i < group_size; i++) {
   2173		opp = grouped_pipes[i]->stream_res.opp;
   2174		tg = grouped_pipes[i]->stream_res.tg;
   2175		tg->funcs->get_otg_active_size(tg, &width, &height);
   2176		if (opp->funcs->opp_program_dpg_dimensions)
   2177			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
   2178	}
   2179
   2180	for (i = 0; i < group_size; i++) {
   2181		if (grouped_pipes[i]->stream == NULL)
   2182			continue;
   2183		grouped_pipes[i]->stream->vblank_synchronized = false;
   2184		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
   2185	}
   2186
   2187	DC_SYNC_INFO("Aligning DP DTOs\n");
   2188
   2189	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
   2190
   2191	DC_SYNC_INFO("Synchronizing VBlanks\n");
   2192
   2193	if (master >= 0) {
   2194		for (i = 0; i < group_size; i++) {
   2195			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
   2196			grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
   2197				grouped_pipes[master]->stream_res.tg,
   2198				grouped_pipes[i]->stream_res.tg,
   2199				grouped_pipes[master]->stream->timing.pix_clk_100hz,
   2200				grouped_pipes[i]->stream->timing.pix_clk_100hz,
   2201				get_clock_divider(grouped_pipes[master], false),
   2202				get_clock_divider(grouped_pipes[i], false));
   2203				grouped_pipes[i]->stream->vblank_synchronized = true;
   2204		}
   2205		grouped_pipes[master]->stream->vblank_synchronized = true;
   2206		DC_SYNC_INFO("Sync complete\n");
   2207	}
   2208
   2209	for (i = 1; i < group_size; i++) {
   2210		opp = grouped_pipes[i]->stream_res.opp;
   2211		tg = grouped_pipes[i]->stream_res.tg;
   2212		tg->funcs->get_otg_active_size(tg, &width, &height);
   2213		if (opp->funcs->opp_program_dpg_dimensions)
   2214			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
   2215	}
   2216}
   2217
   2218void dcn10_enable_timing_synchronization(
   2219	struct dc *dc,
   2220	int group_index,
   2221	int group_size,
   2222	struct pipe_ctx *grouped_pipes[])
   2223{
   2224	struct dc_context *dc_ctx = dc->ctx;
   2225	struct output_pixel_processor *opp;
   2226	struct timing_generator *tg;
   2227	int i, width, height;
   2228
   2229	DC_SYNC_INFO("Setting up OTG reset trigger\n");
   2230
   2231	for (i = 1; i < group_size; i++) {
   2232		opp = grouped_pipes[i]->stream_res.opp;
   2233		tg = grouped_pipes[i]->stream_res.tg;
   2234		tg->funcs->get_otg_active_size(tg, &width, &height);
   2235		if (opp->funcs->opp_program_dpg_dimensions)
   2236			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
   2237	}
   2238
   2239	for (i = 0; i < group_size; i++) {
   2240		if (grouped_pipes[i]->stream == NULL)
   2241			continue;
   2242		grouped_pipes[i]->stream->vblank_synchronized = false;
   2243	}
   2244
   2245	for (i = 1; i < group_size; i++)
   2246		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
   2247				grouped_pipes[i]->stream_res.tg,
   2248				grouped_pipes[0]->stream_res.tg->inst);
   2249
   2250	DC_SYNC_INFO("Waiting for trigger\n");
   2251
   2252	/* Need to get only check 1 pipe for having reset as all the others are
   2253	 * synchronized. Look at last pipe programmed to reset.
   2254	 */
   2255
   2256	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
   2257	for (i = 1; i < group_size; i++)
   2258		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
   2259				grouped_pipes[i]->stream_res.tg);
   2260
   2261	for (i = 1; i < group_size; i++) {
   2262		opp = grouped_pipes[i]->stream_res.opp;
   2263		tg = grouped_pipes[i]->stream_res.tg;
   2264		tg->funcs->get_otg_active_size(tg, &width, &height);
   2265		if (opp->funcs->opp_program_dpg_dimensions)
   2266			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
   2267	}
   2268
   2269	DC_SYNC_INFO("Sync complete\n");
   2270}
   2271
   2272void dcn10_enable_per_frame_crtc_position_reset(
   2273	struct dc *dc,
   2274	int group_size,
   2275	struct pipe_ctx *grouped_pipes[])
   2276{
   2277	struct dc_context *dc_ctx = dc->ctx;
   2278	int i;
   2279
   2280	DC_SYNC_INFO("Setting up\n");
   2281	for (i = 0; i < group_size; i++)
   2282		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
   2283			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
   2284					grouped_pipes[i]->stream_res.tg,
   2285					0,
   2286					&grouped_pipes[i]->stream->triggered_crtc_reset);
   2287
   2288	DC_SYNC_INFO("Waiting for trigger\n");
   2289
   2290	for (i = 0; i < group_size; i++)
   2291		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
   2292
   2293	DC_SYNC_INFO("Multi-display sync is complete\n");
   2294}
   2295
   2296static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
   2297		struct vm_system_aperture_param *apt,
   2298		struct dce_hwseq *hws)
   2299{
   2300	PHYSICAL_ADDRESS_LOC physical_page_number;
   2301	uint32_t logical_addr_low;
   2302	uint32_t logical_addr_high;
   2303
   2304	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
   2305			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
   2306	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
   2307			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
   2308
   2309	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
   2310			LOGICAL_ADDR, &logical_addr_low);
   2311
   2312	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
   2313			LOGICAL_ADDR, &logical_addr_high);
   2314
   2315	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
   2316	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
   2317	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
   2318}
   2319
   2320/* Temporary read settings, future will get values from kmd directly */
   2321static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
   2322		struct vm_context0_param *vm0,
   2323		struct dce_hwseq *hws)
   2324{
   2325	PHYSICAL_ADDRESS_LOC fb_base;
   2326	PHYSICAL_ADDRESS_LOC fb_offset;
   2327	uint32_t fb_base_value;
   2328	uint32_t fb_offset_value;
   2329
   2330	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
   2331	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
   2332
   2333	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
   2334			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
   2335	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
   2336			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
   2337
   2338	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
   2339			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
   2340	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
   2341			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
   2342
   2343	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
   2344			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
   2345	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
   2346			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
   2347
   2348	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
   2349			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
   2350	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
   2351			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
   2352
   2353	/*
   2354	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
   2355	 * Therefore we need to do
   2356	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
   2357	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
   2358	 */
   2359	fb_base.quad_part = (uint64_t)fb_base_value << 24;
   2360	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
   2361	vm0->pte_base.quad_part += fb_base.quad_part;
   2362	vm0->pte_base.quad_part -= fb_offset.quad_part;
   2363}
   2364
   2365
   2366static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
   2367{
   2368	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
   2369	struct vm_system_aperture_param apt = {0};
   2370	struct vm_context0_param vm0 = {0};
   2371
   2372	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
   2373	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
   2374
   2375	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
   2376	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
   2377}
   2378
   2379static void dcn10_enable_plane(
   2380	struct dc *dc,
   2381	struct pipe_ctx *pipe_ctx,
   2382	struct dc_state *context)
   2383{
   2384	struct dce_hwseq *hws = dc->hwseq;
   2385
   2386	if (dc->debug.sanity_checks) {
   2387		hws->funcs.verify_allow_pstate_change_high(dc);
   2388	}
   2389
   2390	undo_DEGVIDCN10_253_wa(dc);
   2391
   2392	power_on_plane(dc->hwseq,
   2393		pipe_ctx->plane_res.hubp->inst);
   2394
   2395	/* enable DCFCLK current DCHUB */
   2396	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
   2397
   2398	/* make sure OPP_PIPE_CLOCK_EN = 1 */
   2399	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
   2400			pipe_ctx->stream_res.opp,
   2401			true);
   2402
   2403	if (dc->config.gpu_vm_support)
   2404		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
   2405
   2406	if (dc->debug.sanity_checks) {
   2407		hws->funcs.verify_allow_pstate_change_high(dc);
   2408	}
   2409
   2410	if (!pipe_ctx->top_pipe
   2411		&& pipe_ctx->plane_state
   2412		&& pipe_ctx->plane_state->flip_int_enabled
   2413		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
   2414			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
   2415
   2416}
   2417
   2418void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
   2419{
   2420	int i = 0;
   2421	struct dpp_grph_csc_adjustment adjust;
   2422	memset(&adjust, 0, sizeof(adjust));
   2423	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
   2424
   2425
   2426	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
   2427		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
   2428		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
   2429			adjust.temperature_matrix[i] =
   2430				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
   2431	} else if (pipe_ctx->plane_state &&
   2432		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
   2433		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
   2434		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
   2435			adjust.temperature_matrix[i] =
   2436				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
   2437	}
   2438
   2439	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
   2440}
   2441
   2442
   2443static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
   2444{
   2445	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
   2446		if (pipe_ctx->top_pipe) {
   2447			struct pipe_ctx *top = pipe_ctx->top_pipe;
   2448
   2449			while (top->top_pipe)
   2450				top = top->top_pipe; // Traverse to top pipe_ctx
   2451			if (top->plane_state && top->plane_state->layer_index == 0)
   2452				return true; // Front MPO plane not hidden
   2453		}
   2454	}
   2455	return false;
   2456}
   2457
   2458static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
   2459{
   2460	// Override rear plane RGB bias to fix MPO brightness
   2461	uint16_t rgb_bias = matrix[3];
   2462
   2463	matrix[3] = 0;
   2464	matrix[7] = 0;
   2465	matrix[11] = 0;
   2466	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
   2467	matrix[3] = rgb_bias;
   2468	matrix[7] = rgb_bias;
   2469	matrix[11] = rgb_bias;
   2470}
   2471
   2472void dcn10_program_output_csc(struct dc *dc,
   2473		struct pipe_ctx *pipe_ctx,
   2474		enum dc_color_space colorspace,
   2475		uint16_t *matrix,
   2476		int opp_id)
   2477{
   2478	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
   2479		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
   2480
   2481			/* MPO is broken with RGB colorspaces when OCSC matrix
   2482			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
   2483			 * Blending adds offsets from front + rear to rear plane
   2484			 *
   2485			 * Fix is to set RGB bias to 0 on rear plane, top plane
   2486			 * black value pixels add offset instead of rear + front
   2487			 */
   2488
   2489			int16_t rgb_bias = matrix[3];
   2490			// matrix[3/7/11] are all the same offset value
   2491
   2492			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
   2493				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
   2494			} else {
   2495				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
   2496			}
   2497		}
   2498	} else {
   2499		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
   2500			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
   2501	}
   2502}
   2503
   2504static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
   2505{
   2506	struct dc_bias_and_scale bns_params = {0};
   2507
   2508	// program the input csc
   2509	dpp->funcs->dpp_setup(dpp,
   2510			plane_state->format,
   2511			EXPANSION_MODE_ZERO,
   2512			plane_state->input_csc_color_matrix,
   2513			plane_state->color_space,
   2514			NULL);
   2515
   2516	//set scale and bias registers
   2517	build_prescale_params(&bns_params, plane_state);
   2518	if (dpp->funcs->dpp_program_bias_and_scale)
   2519		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
   2520}
   2521
   2522void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
   2523{
   2524	struct mpc *mpc = dc->res_pool->mpc;
   2525
   2526	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
   2527		get_hdr_visual_confirm_color(pipe_ctx, color);
   2528	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
   2529		get_surface_visual_confirm_color(pipe_ctx, color);
   2530	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
   2531		get_surface_tile_visual_confirm_color(pipe_ctx, color);
   2532	else
   2533		color_space_to_black_color(
   2534				dc, pipe_ctx->stream->output_color_space, color);
   2535
   2536	if (mpc->funcs->set_bg_color)
   2537		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
   2538}
   2539
   2540void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
   2541{
   2542	struct hubp *hubp = pipe_ctx->plane_res.hubp;
   2543	struct mpcc_blnd_cfg blnd_cfg = {0};
   2544	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
   2545	int mpcc_id;
   2546	struct mpcc *new_mpcc;
   2547	struct mpc *mpc = dc->res_pool->mpc;
   2548	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
   2549
   2550	blnd_cfg.overlap_only = false;
   2551	blnd_cfg.global_gain = 0xff;
   2552
   2553	if (per_pixel_alpha) {
   2554		/* DCN1.0 has output CM before MPC which seems to screw with
   2555		 * pre-multiplied alpha.
   2556		 */
   2557		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
   2558				pipe_ctx->stream->output_color_space)
   2559						&& pipe_ctx->plane_state->pre_multiplied_alpha);
   2560		if (pipe_ctx->plane_state->global_alpha) {
   2561			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
   2562			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
   2563		} else {
   2564			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
   2565		}
   2566	} else {
   2567		blnd_cfg.pre_multiplied_alpha = false;
   2568		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
   2569	}
   2570
   2571	if (pipe_ctx->plane_state->global_alpha)
   2572		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
   2573	else
   2574		blnd_cfg.global_alpha = 0xff;
   2575
   2576	/*
   2577	 * TODO: remove hack
   2578	 * Note: currently there is a bug in init_hw such that
   2579	 * on resume from hibernate, BIOS sets up MPCC0, and
   2580	 * we do mpcc_remove but the mpcc cannot go to idle
   2581	 * after remove. This cause us to pick mpcc1 here,
   2582	 * which causes a pstate hang for yet unknown reason.
   2583	 */
   2584	mpcc_id = hubp->inst;
   2585
   2586	/* If there is no full update, don't need to touch MPC tree*/
   2587	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
   2588		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
   2589		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
   2590		return;
   2591	}
   2592
   2593	/* check if this MPCC is already being used */
   2594	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
   2595	/* remove MPCC if being used */
   2596	if (new_mpcc != NULL)
   2597		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
   2598	else
   2599		if (dc->debug.sanity_checks)
   2600			mpc->funcs->assert_mpcc_idle_before_connect(
   2601					dc->res_pool->mpc, mpcc_id);
   2602
   2603	/* Call MPC to insert new plane */
   2604	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
   2605			mpc_tree_params,
   2606			&blnd_cfg,
   2607			NULL,
   2608			NULL,
   2609			hubp->inst,
   2610			mpcc_id);
   2611	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
   2612
   2613	ASSERT(new_mpcc != NULL);
   2614
   2615	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
   2616	hubp->mpcc_id = mpcc_id;
   2617}
   2618
   2619static void update_scaler(struct pipe_ctx *pipe_ctx)
   2620{
   2621	bool per_pixel_alpha =
   2622			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
   2623
   2624	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
   2625	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
   2626	/* scaler configuration */
   2627	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
   2628			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
   2629}
   2630
   2631static void dcn10_update_dchubp_dpp(
   2632	struct dc *dc,
   2633	struct pipe_ctx *pipe_ctx,
   2634	struct dc_state *context)
   2635{
   2636	struct dce_hwseq *hws = dc->hwseq;
   2637	struct hubp *hubp = pipe_ctx->plane_res.hubp;
   2638	struct dpp *dpp = pipe_ctx->plane_res.dpp;
   2639	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
   2640	struct plane_size size = plane_state->plane_size;
   2641	unsigned int compat_level = 0;
   2642	bool should_divided_by_2 = false;
   2643
   2644	/* depends on DML calculation, DPP clock value may change dynamically */
   2645	/* If request max dpp clk is lower than current dispclk, no need to
   2646	 * divided by 2
   2647	 */
   2648	if (plane_state->update_flags.bits.full_update) {
   2649
   2650		/* new calculated dispclk, dppclk are stored in
   2651		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
   2652		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
   2653		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
   2654		 * dispclk will put in use after optimize_bandwidth when
   2655		 * ramp_up_dispclk_with_dpp is called.
   2656		 * there are two places for dppclk be put in use. One location
   2657		 * is the same as the location as dispclk. Another is within
   2658		 * update_dchubp_dpp which happens between pre_bandwidth and
   2659		 * optimize_bandwidth.
   2660		 * dppclk updated within update_dchubp_dpp will cause new
   2661		 * clock values of dispclk and dppclk not be in use at the same
   2662		 * time. when clocks are decreased, this may cause dppclk is
   2663		 * lower than previous configuration and let pipe stuck.
   2664		 * for example, eDP + external dp,  change resolution of DP from
   2665		 * 1920x1080x144hz to 1280x960x60hz.
   2666		 * before change: dispclk = 337889 dppclk = 337889
   2667		 * change mode, dcn10_validate_bandwidth calculate
   2668		 *                dispclk = 143122 dppclk = 143122
   2669		 * update_dchubp_dpp be executed before dispclk be updated,
   2670		 * dispclk = 337889, but dppclk use new value dispclk /2 =
   2671		 * 168944. this will cause pipe pstate warning issue.
   2672		 * solution: between pre_bandwidth and optimize_bandwidth, while
   2673		 * dispclk is going to be decreased, keep dppclk = dispclk
   2674		 **/
   2675		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
   2676				dc->clk_mgr->clks.dispclk_khz)
   2677			should_divided_by_2 = false;
   2678		else
   2679			should_divided_by_2 =
   2680					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
   2681					dc->clk_mgr->clks.dispclk_khz / 2;
   2682
   2683		dpp->funcs->dpp_dppclk_control(
   2684				dpp,
   2685				should_divided_by_2,
   2686				true);
   2687
   2688		if (dc->res_pool->dccg)
   2689			dc->res_pool->dccg->funcs->update_dpp_dto(
   2690					dc->res_pool->dccg,
   2691					dpp->inst,
   2692					pipe_ctx->plane_res.bw.dppclk_khz);
   2693		else
   2694			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
   2695						dc->clk_mgr->clks.dispclk_khz / 2 :
   2696							dc->clk_mgr->clks.dispclk_khz;
   2697	}
   2698
   2699	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
   2700	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
   2701	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
   2702	 */
   2703	if (plane_state->update_flags.bits.full_update) {
   2704		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
   2705
   2706		hubp->funcs->hubp_setup(
   2707			hubp,
   2708			&pipe_ctx->dlg_regs,
   2709			&pipe_ctx->ttu_regs,
   2710			&pipe_ctx->rq_regs,
   2711			&pipe_ctx->pipe_dlg_param);
   2712		hubp->funcs->hubp_setup_interdependent(
   2713			hubp,
   2714			&pipe_ctx->dlg_regs,
   2715			&pipe_ctx->ttu_regs);
   2716	}
   2717
   2718	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
   2719
   2720	if (plane_state->update_flags.bits.full_update ||
   2721		plane_state->update_flags.bits.bpp_change)
   2722		dcn10_update_dpp(dpp, plane_state);
   2723
   2724	if (plane_state->update_flags.bits.full_update ||
   2725		plane_state->update_flags.bits.per_pixel_alpha_change ||
   2726		plane_state->update_flags.bits.global_alpha_change)
   2727		hws->funcs.update_mpcc(dc, pipe_ctx);
   2728
   2729	if (plane_state->update_flags.bits.full_update ||
   2730		plane_state->update_flags.bits.per_pixel_alpha_change ||
   2731		plane_state->update_flags.bits.global_alpha_change ||
   2732		plane_state->update_flags.bits.scaling_change ||
   2733		plane_state->update_flags.bits.position_change) {
   2734		update_scaler(pipe_ctx);
   2735	}
   2736
   2737	if (plane_state->update_flags.bits.full_update ||
   2738		plane_state->update_flags.bits.scaling_change ||
   2739		plane_state->update_flags.bits.position_change) {
   2740		hubp->funcs->mem_program_viewport(
   2741			hubp,
   2742			&pipe_ctx->plane_res.scl_data.viewport,
   2743			&pipe_ctx->plane_res.scl_data.viewport_c);
   2744	}
   2745
   2746	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
   2747		dc->hwss.set_cursor_position(pipe_ctx);
   2748		dc->hwss.set_cursor_attribute(pipe_ctx);
   2749
   2750		if (dc->hwss.set_cursor_sdr_white_level)
   2751			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
   2752	}
   2753
   2754	if (plane_state->update_flags.bits.full_update) {
   2755		/*gamut remap*/
   2756		dc->hwss.program_gamut_remap(pipe_ctx);
   2757
   2758		dc->hwss.program_output_csc(dc,
   2759				pipe_ctx,
   2760				pipe_ctx->stream->output_color_space,
   2761				pipe_ctx->stream->csc_color_matrix.matrix,
   2762				pipe_ctx->stream_res.opp->inst);
   2763	}
   2764
   2765	if (plane_state->update_flags.bits.full_update ||
   2766		plane_state->update_flags.bits.pixel_format_change ||
   2767		plane_state->update_flags.bits.horizontal_mirror_change ||
   2768		plane_state->update_flags.bits.rotation_change ||
   2769		plane_state->update_flags.bits.swizzle_change ||
   2770		plane_state->update_flags.bits.dcc_change ||
   2771		plane_state->update_flags.bits.bpp_change ||
   2772		plane_state->update_flags.bits.scaling_change ||
   2773		plane_state->update_flags.bits.plane_size_change) {
   2774		hubp->funcs->hubp_program_surface_config(
   2775			hubp,
   2776			plane_state->format,
   2777			&plane_state->tiling_info,
   2778			&size,
   2779			plane_state->rotation,
   2780			&plane_state->dcc,
   2781			plane_state->horizontal_mirror,
   2782			compat_level);
   2783	}
   2784
   2785	hubp->power_gated = false;
   2786
   2787	hws->funcs.update_plane_addr(dc, pipe_ctx);
   2788
   2789	if (is_pipe_tree_visible(pipe_ctx))
   2790		hubp->funcs->set_blank(hubp, false);
   2791}
   2792
   2793void dcn10_blank_pixel_data(
   2794		struct dc *dc,
   2795		struct pipe_ctx *pipe_ctx,
   2796		bool blank)
   2797{
   2798	enum dc_color_space color_space;
   2799	struct tg_color black_color = {0};
   2800	struct stream_resource *stream_res = &pipe_ctx->stream_res;
   2801	struct dc_stream_state *stream = pipe_ctx->stream;
   2802
   2803	/* program otg blank color */
   2804	color_space = stream->output_color_space;
   2805	color_space_to_black_color(dc, color_space, &black_color);
   2806
   2807	/*
   2808	 * The way 420 is packed, 2 channels carry Y component, 1 channel
   2809	 * alternate between Cb and Cr, so both channels need the pixel
   2810	 * value for Y
   2811	 */
   2812	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
   2813		black_color.color_r_cr = black_color.color_g_y;
   2814
   2815
   2816	if (stream_res->tg->funcs->set_blank_color)
   2817		stream_res->tg->funcs->set_blank_color(
   2818				stream_res->tg,
   2819				&black_color);
   2820
   2821	if (!blank) {
   2822		if (stream_res->tg->funcs->set_blank)
   2823			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
   2824		if (stream_res->abm) {
   2825			dc->hwss.set_pipe(pipe_ctx);
   2826			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
   2827		}
   2828	} else if (blank) {
   2829		dc->hwss.set_abm_immediate_disable(pipe_ctx);
   2830		if (stream_res->tg->funcs->set_blank) {
   2831			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
   2832			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
   2833		}
   2834	}
   2835}
   2836
   2837void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
   2838{
   2839	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
   2840	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
   2841	struct custom_float_format fmt;
   2842
   2843	fmt.exponenta_bits = 6;
   2844	fmt.mantissa_bits = 12;
   2845	fmt.sign = true;
   2846
   2847
   2848	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
   2849		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
   2850
   2851	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
   2852			pipe_ctx->plane_res.dpp, hw_mult);
   2853}
   2854
   2855void dcn10_program_pipe(
   2856		struct dc *dc,
   2857		struct pipe_ctx *pipe_ctx,
   2858		struct dc_state *context)
   2859{
   2860	struct dce_hwseq *hws = dc->hwseq;
   2861
   2862	if (pipe_ctx->top_pipe == NULL) {
   2863		bool blank = !is_pipe_tree_visible(pipe_ctx);
   2864
   2865		pipe_ctx->stream_res.tg->funcs->program_global_sync(
   2866				pipe_ctx->stream_res.tg,
   2867				pipe_ctx->pipe_dlg_param.vready_offset,
   2868				pipe_ctx->pipe_dlg_param.vstartup_start,
   2869				pipe_ctx->pipe_dlg_param.vupdate_offset,
   2870				pipe_ctx->pipe_dlg_param.vupdate_width);
   2871
   2872		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
   2873				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
   2874
   2875		if (hws->funcs.setup_vupdate_interrupt)
   2876			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
   2877
   2878		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
   2879	}
   2880
   2881	if (pipe_ctx->plane_state->update_flags.bits.full_update)
   2882		dcn10_enable_plane(dc, pipe_ctx, context);
   2883
   2884	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
   2885
   2886	hws->funcs.set_hdr_multiplier(pipe_ctx);
   2887
   2888	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
   2889			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
   2890			pipe_ctx->plane_state->update_flags.bits.gamma_change)
   2891		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
   2892
   2893	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
   2894	 * only do gamma programming for full update.
   2895	 * TODO: This can be further optimized/cleaned up
   2896	 * Always call this for now since it does memcmp inside before
   2897	 * doing heavy calculation and programming
   2898	 */
   2899	if (pipe_ctx->plane_state->update_flags.bits.full_update)
   2900		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
   2901}
   2902
   2903void dcn10_wait_for_pending_cleared(struct dc *dc,
   2904		struct dc_state *context)
   2905{
   2906		struct pipe_ctx *pipe_ctx;
   2907		struct timing_generator *tg;
   2908		int i;
   2909
   2910		for (i = 0; i < dc->res_pool->pipe_count; i++) {
   2911			pipe_ctx = &context->res_ctx.pipe_ctx[i];
   2912			tg = pipe_ctx->stream_res.tg;
   2913
   2914			/*
   2915			 * Only wait for top pipe's tg penindg bit
   2916			 * Also skip if pipe is disabled.
   2917			 */
   2918			if (pipe_ctx->top_pipe ||
   2919			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
   2920			    !tg->funcs->is_tg_enabled(tg))
   2921				continue;
   2922
   2923			/*
   2924			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
   2925			 * For some reason waiting for OTG_UPDATE_PENDING cleared
   2926			 * seems to not trigger the update right away, and if we
   2927			 * lock again before VUPDATE then we don't get a separated
   2928			 * operation.
   2929			 */
   2930			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
   2931			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
   2932		}
   2933}
   2934
   2935void dcn10_post_unlock_program_front_end(
   2936		struct dc *dc,
   2937		struct dc_state *context)
   2938{
   2939	int i;
   2940
   2941	DC_LOGGER_INIT(dc->ctx->logger);
   2942
   2943	for (i = 0; i < dc->res_pool->pipe_count; i++) {
   2944		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
   2945
   2946		if (!pipe_ctx->top_pipe &&
   2947			!pipe_ctx->prev_odm_pipe &&
   2948			pipe_ctx->stream) {
   2949			struct timing_generator *tg = pipe_ctx->stream_res.tg;
   2950
   2951			if (context->stream_status[i].plane_count == 0)
   2952				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
   2953		}
   2954	}
   2955
   2956	for (i = 0; i < dc->res_pool->pipe_count; i++)
   2957		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
   2958			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
   2959
   2960	for (i = 0; i < dc->res_pool->pipe_count; i++)
   2961		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
   2962			dc->hwss.optimize_bandwidth(dc, context);
   2963			break;
   2964		}
   2965
   2966	if (dc->hwseq->wa.DEGVIDCN10_254)
   2967		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
   2968}
   2969
   2970static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
   2971{
   2972	uint8_t i;
   2973
   2974	for (i = 0; i < context->stream_count; i++) {
   2975		if (context->streams[i]->timing.timing_3d_format
   2976				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
   2977			/*
   2978			 * Disable stutter
   2979			 */
   2980			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
   2981			break;
   2982		}
   2983	}
   2984}
   2985
   2986void dcn10_prepare_bandwidth(
   2987		struct dc *dc,
   2988		struct dc_state *context)
   2989{
   2990	struct dce_hwseq *hws = dc->hwseq;
   2991	struct hubbub *hubbub = dc->res_pool->hubbub;
   2992
   2993	if (dc->debug.sanity_checks)
   2994		hws->funcs.verify_allow_pstate_change_high(dc);
   2995
   2996	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
   2997		if (context->stream_count == 0)
   2998			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
   2999
   3000		dc->clk_mgr->funcs->update_clocks(
   3001				dc->clk_mgr,
   3002				context,
   3003				false);
   3004	}
   3005
   3006	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
   3007			&context->bw_ctx.bw.dcn.watermarks,
   3008			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
   3009			true);
   3010	dcn10_stereo_hw_frame_pack_wa(dc, context);
   3011
   3012	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
   3013		DC_FP_START();
   3014		dcn_bw_notify_pplib_of_wm_ranges(dc);
   3015		DC_FP_END();
   3016	}
   3017
   3018	if (dc->debug.sanity_checks)
   3019		hws->funcs.verify_allow_pstate_change_high(dc);
   3020}
   3021
   3022void dcn10_optimize_bandwidth(
   3023		struct dc *dc,
   3024		struct dc_state *context)
   3025{
   3026	struct dce_hwseq *hws = dc->hwseq;
   3027	struct hubbub *hubbub = dc->res_pool->hubbub;
   3028
   3029	if (dc->debug.sanity_checks)
   3030		hws->funcs.verify_allow_pstate_change_high(dc);
   3031
   3032	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
   3033		if (context->stream_count == 0)
   3034			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
   3035
   3036		dc->clk_mgr->funcs->update_clocks(
   3037				dc->clk_mgr,
   3038				context,
   3039				true);
   3040	}
   3041
   3042	hubbub->funcs->program_watermarks(hubbub,
   3043			&context->bw_ctx.bw.dcn.watermarks,
   3044			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
   3045			true);
   3046
   3047	dcn10_stereo_hw_frame_pack_wa(dc, context);
   3048
   3049	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
   3050		DC_FP_START();
   3051		dcn_bw_notify_pplib_of_wm_ranges(dc);
   3052		DC_FP_END();
   3053	}
   3054
   3055	if (dc->debug.sanity_checks)
   3056		hws->funcs.verify_allow_pstate_change_high(dc);
   3057}
   3058
   3059void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
   3060		int num_pipes, struct dc_crtc_timing_adjust adjust)
   3061{
   3062	int i = 0;
   3063	struct drr_params params = {0};
   3064	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
   3065	unsigned int event_triggers = 0x800;
   3066	// Note DRR trigger events are generated regardless of whether num frames met.
   3067	unsigned int num_frames = 2;
   3068
   3069	params.vertical_total_max = adjust.v_total_max;
   3070	params.vertical_total_min = adjust.v_total_min;
   3071	params.vertical_total_mid = adjust.v_total_mid;
   3072	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
   3073	/* TODO: If multiple pipes are to be supported, you need
   3074	 * some GSL stuff. Static screen triggers may be programmed differently
   3075	 * as well.
   3076	 */
   3077	for (i = 0; i < num_pipes; i++) {
   3078		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
   3079			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
   3080				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
   3081					pipe_ctx[i]->stream_res.tg, &params);
   3082			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
   3083				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
   3084					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
   3085						pipe_ctx[i]->stream_res.tg,
   3086						event_triggers, num_frames);
   3087		}
   3088	}
   3089}
   3090
   3091void dcn10_get_position(struct pipe_ctx **pipe_ctx,
   3092		int num_pipes,
   3093		struct crtc_position *position)
   3094{
   3095	int i = 0;
   3096
   3097	/* TODO: handle pipes > 1
   3098	 */
   3099	for (i = 0; i < num_pipes; i++)
   3100		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
   3101}
   3102
   3103void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
   3104		int num_pipes, const struct dc_static_screen_params *params)
   3105{
   3106	unsigned int i;
   3107	unsigned int triggers = 0;
   3108
   3109	if (params->triggers.surface_update)
   3110		triggers |= 0x80;
   3111	if (params->triggers.cursor_update)
   3112		triggers |= 0x2;
   3113	if (params->triggers.force_trigger)
   3114		triggers |= 0x1;
   3115
   3116	for (i = 0; i < num_pipes; i++)
   3117		pipe_ctx[i]->stream_res.tg->funcs->
   3118			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
   3119					triggers, params->num_frames);
   3120}
   3121
   3122static void dcn10_config_stereo_parameters(
   3123		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
   3124{
   3125	enum view_3d_format view_format = stream->view_format;
   3126	enum dc_timing_3d_format timing_3d_format =\
   3127			stream->timing.timing_3d_format;
   3128	bool non_stereo_timing = false;
   3129
   3130	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
   3131		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
   3132		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
   3133		non_stereo_timing = true;
   3134
   3135	if (non_stereo_timing == false &&
   3136		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
   3137
   3138		flags->PROGRAM_STEREO         = 1;
   3139		flags->PROGRAM_POLARITY       = 1;
   3140		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
   3141			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
   3142			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
   3143			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
   3144			enum display_dongle_type dongle = \
   3145					stream->link->ddc->dongle_type;
   3146			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
   3147				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
   3148				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
   3149				flags->DISABLE_STEREO_DP_SYNC = 1;
   3150		}
   3151		flags->RIGHT_EYE_POLARITY =\
   3152				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
   3153		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
   3154			flags->FRAME_PACKED = 1;
   3155	}
   3156
   3157	return;
   3158}
   3159
   3160void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
   3161{
   3162	struct crtc_stereo_flags flags = { 0 };
   3163	struct dc_stream_state *stream = pipe_ctx->stream;
   3164
   3165	dcn10_config_stereo_parameters(stream, &flags);
   3166
   3167	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
   3168		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
   3169			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
   3170	} else {
   3171		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
   3172	}
   3173
   3174	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
   3175		pipe_ctx->stream_res.opp,
   3176		flags.PROGRAM_STEREO == 1,
   3177		&stream->timing);
   3178
   3179	pipe_ctx->stream_res.tg->funcs->program_stereo(
   3180		pipe_ctx->stream_res.tg,
   3181		&stream->timing,
   3182		&flags);
   3183
   3184	return;
   3185}
   3186
   3187static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
   3188{
   3189	int i;
   3190
   3191	for (i = 0; i < res_pool->pipe_count; i++) {
   3192		if (res_pool->hubps[i]->inst == mpcc_inst)
   3193			return res_pool->hubps[i];
   3194	}
   3195	ASSERT(false);
   3196	return NULL;
   3197}
   3198
   3199void dcn10_wait_for_mpcc_disconnect(
   3200		struct dc *dc,
   3201		struct resource_pool *res_pool,
   3202		struct pipe_ctx *pipe_ctx)
   3203{
   3204	struct dce_hwseq *hws = dc->hwseq;
   3205	int mpcc_inst;
   3206
   3207	if (dc->debug.sanity_checks) {
   3208		hws->funcs.verify_allow_pstate_change_high(dc);
   3209	}
   3210
   3211	if (!pipe_ctx->stream_res.opp)
   3212		return;
   3213
   3214	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
   3215		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
   3216			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
   3217
   3218			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
   3219				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
   3220			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
   3221			hubp->funcs->set_blank(hubp, true);
   3222		}
   3223	}
   3224
   3225	if (dc->debug.sanity_checks) {
   3226		hws->funcs.verify_allow_pstate_change_high(dc);
   3227	}
   3228
   3229}
   3230
   3231bool dcn10_dummy_display_power_gating(
   3232	struct dc *dc,
   3233	uint8_t controller_id,
   3234	struct dc_bios *dcb,
   3235	enum pipe_gating_control power_gating)
   3236{
   3237	return true;
   3238}
   3239
   3240void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
   3241{
   3242	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
   3243	struct timing_generator *tg = pipe_ctx->stream_res.tg;
   3244	bool flip_pending;
   3245	struct dc *dc = plane_state->ctx->dc;
   3246
   3247	if (plane_state == NULL)
   3248		return;
   3249
   3250	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
   3251					pipe_ctx->plane_res.hubp);
   3252
   3253	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
   3254
   3255	if (!flip_pending)
   3256		plane_state->status.current_address = plane_state->status.requested_address;
   3257
   3258	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
   3259			tg->funcs->is_stereo_left_eye) {
   3260		plane_state->status.is_right_eye =
   3261				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
   3262	}
   3263
   3264	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
   3265		struct dce_hwseq *hwseq = dc->hwseq;
   3266		struct timing_generator *tg = dc->res_pool->timing_generators[0];
   3267		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
   3268
   3269		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
   3270			struct hubbub *hubbub = dc->res_pool->hubbub;
   3271
   3272			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
   3273			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
   3274		}
   3275	}
   3276}
   3277
   3278void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
   3279{
   3280	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
   3281
   3282	/* In DCN, this programming sequence is owned by the hubbub */
   3283	hubbub->funcs->update_dchub(hubbub, dh_data);
   3284}
   3285
   3286static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
   3287{
   3288	struct pipe_ctx *test_pipe, *split_pipe;
   3289	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
   3290	struct rect r1 = scl_data->recout, r2, r2_half;
   3291	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
   3292	int cur_layer = pipe_ctx->plane_state->layer_index;
   3293
   3294	/**
   3295	 * Disable the cursor if there's another pipe above this with a
   3296	 * plane that contains this pipe's viewport to prevent double cursor
   3297	 * and incorrect scaling artifacts.
   3298	 */
   3299	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
   3300	     test_pipe = test_pipe->top_pipe) {
   3301		// Skip invisible layer and pipe-split plane on same layer
   3302		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
   3303			continue;
   3304
   3305		r2 = test_pipe->plane_res.scl_data.recout;
   3306		r2_r = r2.x + r2.width;
   3307		r2_b = r2.y + r2.height;
   3308		split_pipe = test_pipe;
   3309
   3310		/**
   3311		 * There is another half plane on same layer because of
   3312		 * pipe-split, merge together per same height.
   3313		 */
   3314		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
   3315		     split_pipe = split_pipe->top_pipe)
   3316			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
   3317				r2_half = split_pipe->plane_res.scl_data.recout;
   3318				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
   3319				r2.width = r2.width + r2_half.width;
   3320				r2_r = r2.x + r2.width;
   3321				break;
   3322			}
   3323
   3324		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
   3325			return true;
   3326	}
   3327
   3328	return false;
   3329}
   3330
   3331void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
   3332{
   3333	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
   3334	struct hubp *hubp = pipe_ctx->plane_res.hubp;
   3335	struct dpp *dpp = pipe_ctx->plane_res.dpp;
   3336	struct dc_cursor_mi_param param = {
   3337		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
   3338		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
   3339		.viewport = pipe_ctx->plane_res.scl_data.viewport,
   3340		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
   3341		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
   3342		.rotation = pipe_ctx->plane_state->rotation,
   3343		.mirror = pipe_ctx->plane_state->horizontal_mirror
   3344	};
   3345	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
   3346		(pipe_ctx->bottom_pipe != NULL);
   3347	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
   3348		(pipe_ctx->prev_odm_pipe != NULL);
   3349
   3350	int x_plane = pipe_ctx->plane_state->dst_rect.x;
   3351	int y_plane = pipe_ctx->plane_state->dst_rect.y;
   3352	int x_pos = pos_cpy.x;
   3353	int y_pos = pos_cpy.y;
   3354
   3355	/**
   3356	 * DC cursor is stream space, HW cursor is plane space and drawn
   3357	 * as part of the framebuffer.
   3358	 *
   3359	 * Cursor position can't be negative, but hotspot can be used to
   3360	 * shift cursor out of the plane bounds. Hotspot must be smaller
   3361	 * than the cursor size.
   3362	 */
   3363
   3364	/**
   3365	 * Translate cursor from stream space to plane space.
   3366	 *
   3367	 * If the cursor is scaled then we need to scale the position
   3368	 * to be in the approximately correct place. We can't do anything
   3369	 * about the actual size being incorrect, that's a limitation of
   3370	 * the hardware.
   3371	 */
   3372	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
   3373		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
   3374				pipe_ctx->plane_state->dst_rect.width;
   3375		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
   3376				pipe_ctx->plane_state->dst_rect.height;
   3377	} else {
   3378		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
   3379				pipe_ctx->plane_state->dst_rect.width;
   3380		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
   3381				pipe_ctx->plane_state->dst_rect.height;
   3382	}
   3383
   3384	/**
   3385	 * If the cursor's source viewport is clipped then we need to
   3386	 * translate the cursor to appear in the correct position on
   3387	 * the screen.
   3388	 *
   3389	 * This translation isn't affected by scaling so it needs to be
   3390	 * done *after* we adjust the position for the scale factor.
   3391	 *
   3392	 * This is only done by opt-in for now since there are still
   3393	 * some usecases like tiled display that might enable the
   3394	 * cursor on both streams while expecting dc to clip it.
   3395	 */
   3396	if (pos_cpy.translate_by_source) {
   3397		x_pos += pipe_ctx->plane_state->src_rect.x;
   3398		y_pos += pipe_ctx->plane_state->src_rect.y;
   3399	}
   3400
   3401	/**
   3402	 * If the position is negative then we need to add to the hotspot
   3403	 * to shift the cursor outside the plane.
   3404	 */
   3405
   3406	if (x_pos < 0) {
   3407		pos_cpy.x_hotspot -= x_pos;
   3408		x_pos = 0;
   3409	}
   3410
   3411	if (y_pos < 0) {
   3412		pos_cpy.y_hotspot -= y_pos;
   3413		y_pos = 0;
   3414	}
   3415
   3416	pos_cpy.x = (uint32_t)x_pos;
   3417	pos_cpy.y = (uint32_t)y_pos;
   3418
   3419	if (pipe_ctx->plane_state->address.type
   3420			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
   3421		pos_cpy.enable = false;
   3422
   3423	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
   3424		pos_cpy.enable = false;
   3425
   3426	// Swap axis and mirror horizontally
   3427	if (param.rotation == ROTATION_ANGLE_90) {
   3428		uint32_t temp_x = pos_cpy.x;
   3429
   3430		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
   3431				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
   3432		pos_cpy.y = temp_x;
   3433	}
   3434	// Swap axis and mirror vertically
   3435	else if (param.rotation == ROTATION_ANGLE_270) {
   3436		uint32_t temp_y = pos_cpy.y;
   3437		int viewport_height =
   3438			pipe_ctx->plane_res.scl_data.viewport.height;
   3439		int viewport_y =
   3440			pipe_ctx->plane_res.scl_data.viewport.y;
   3441
   3442		/**
   3443		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
   3444		 * For pipe split cases:
   3445		 * - apply offset of viewport.y to normalize pos_cpy.x
   3446		 * - calculate the pos_cpy.y as before
   3447		 * - shift pos_cpy.y back by same offset to get final value
   3448		 * - since we iterate through both pipes, use the lower
   3449		 *   viewport.y for offset
   3450		 * For non pipe split cases, use the same calculation for
   3451		 *  pos_cpy.y as the 180 degree rotation case below,
   3452		 *  but use pos_cpy.x as our input because we are rotating
   3453		 *  270 degrees
   3454		 */
   3455		if (pipe_split_on || odm_combine_on) {
   3456			int pos_cpy_x_offset;
   3457			int other_pipe_viewport_y;
   3458
   3459			if (pipe_split_on) {
   3460				if (pipe_ctx->bottom_pipe) {
   3461					other_pipe_viewport_y =
   3462						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
   3463				} else {
   3464					other_pipe_viewport_y =
   3465						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
   3466				}
   3467			} else {
   3468				if (pipe_ctx->next_odm_pipe) {
   3469					other_pipe_viewport_y =
   3470						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
   3471				} else {
   3472					other_pipe_viewport_y =
   3473						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
   3474				}
   3475			}
   3476			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
   3477				other_pipe_viewport_y : viewport_y;
   3478			pos_cpy.x -= pos_cpy_x_offset;
   3479			if (pos_cpy.x > viewport_height) {
   3480				pos_cpy.x = pos_cpy.x - viewport_height;
   3481				pos_cpy.y = viewport_height - pos_cpy.x;
   3482			} else {
   3483				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
   3484			}
   3485			pos_cpy.y += pos_cpy_x_offset;
   3486		} else {
   3487			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
   3488		}
   3489		pos_cpy.x = temp_y;
   3490	}
   3491	// Mirror horizontally and vertically
   3492	else if (param.rotation == ROTATION_ANGLE_180) {
   3493		int viewport_width =
   3494			pipe_ctx->plane_res.scl_data.viewport.width;
   3495		int viewport_x =
   3496			pipe_ctx->plane_res.scl_data.viewport.x;
   3497
   3498		if (pipe_split_on || odm_combine_on) {
   3499			if (pos_cpy.x >= viewport_width + viewport_x) {
   3500				pos_cpy.x = 2 * viewport_width
   3501						- pos_cpy.x + 2 * viewport_x;
   3502			} else {
   3503				uint32_t temp_x = pos_cpy.x;
   3504
   3505				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
   3506				if (temp_x >= viewport_x +
   3507					(int)hubp->curs_attr.width || pos_cpy.x
   3508					<= (int)hubp->curs_attr.width +
   3509					pipe_ctx->plane_state->src_rect.x) {
   3510					pos_cpy.x = temp_x + viewport_width;
   3511				}
   3512			}
   3513		} else {
   3514			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
   3515		}
   3516
   3517		/**
   3518		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
   3519		 * Calculation:
   3520		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
   3521		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
   3522		 * Simplify it as:
   3523		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
   3524		 */
   3525		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
   3526			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
   3527	}
   3528
   3529	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
   3530	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
   3531}
   3532
   3533void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
   3534{
   3535	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
   3536
   3537	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
   3538			pipe_ctx->plane_res.hubp, attributes);
   3539	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
   3540		pipe_ctx->plane_res.dpp, attributes);
   3541}
   3542
   3543void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
   3544{
   3545	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
   3546	struct fixed31_32 multiplier;
   3547	struct dpp_cursor_attributes opt_attr = { 0 };
   3548	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
   3549	struct custom_float_format fmt;
   3550
   3551	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
   3552		return;
   3553
   3554	fmt.exponenta_bits = 5;
   3555	fmt.mantissa_bits = 10;
   3556	fmt.sign = true;
   3557
   3558	if (sdr_white_level > 80) {
   3559		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
   3560		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
   3561	}
   3562
   3563	opt_attr.scale = hw_scale;
   3564	opt_attr.bias = 0;
   3565
   3566	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
   3567			pipe_ctx->plane_res.dpp, &opt_attr);
   3568}
   3569
   3570/*
   3571 * apply_front_porch_workaround  TODO FPGA still need?
   3572 *
   3573 * This is a workaround for a bug that has existed since R5xx and has not been
   3574 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
   3575 */
   3576static void apply_front_porch_workaround(
   3577	struct dc_crtc_timing *timing)
   3578{
   3579	if (timing->flags.INTERLACE == 1) {
   3580		if (timing->v_front_porch < 2)
   3581			timing->v_front_porch = 2;
   3582	} else {
   3583		if (timing->v_front_porch < 1)
   3584			timing->v_front_porch = 1;
   3585	}
   3586}
   3587
   3588int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
   3589{
   3590	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
   3591	struct dc_crtc_timing patched_crtc_timing;
   3592	int vesa_sync_start;
   3593	int asic_blank_end;
   3594	int interlace_factor;
   3595	int vertical_line_start;
   3596
   3597	patched_crtc_timing = *dc_crtc_timing;
   3598	apply_front_porch_workaround(&patched_crtc_timing);
   3599
   3600	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
   3601
   3602	vesa_sync_start = patched_crtc_timing.v_addressable +
   3603			patched_crtc_timing.v_border_bottom +
   3604			patched_crtc_timing.v_front_porch;
   3605
   3606	asic_blank_end = (patched_crtc_timing.v_total -
   3607			vesa_sync_start -
   3608			patched_crtc_timing.v_border_top)
   3609			* interlace_factor;
   3610
   3611	vertical_line_start = asic_blank_end -
   3612			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
   3613
   3614	return vertical_line_start;
   3615}
   3616
   3617void dcn10_calc_vupdate_position(
   3618		struct dc *dc,
   3619		struct pipe_ctx *pipe_ctx,
   3620		uint32_t *start_line,
   3621		uint32_t *end_line)
   3622{
   3623	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
   3624	int vline_int_offset_from_vupdate =
   3625			pipe_ctx->stream->periodic_interrupt0.lines_offset;
   3626	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
   3627	int start_position;
   3628
   3629	if (vline_int_offset_from_vupdate > 0)
   3630		vline_int_offset_from_vupdate--;
   3631	else if (vline_int_offset_from_vupdate < 0)
   3632		vline_int_offset_from_vupdate++;
   3633
   3634	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
   3635
   3636	if (start_position >= 0)
   3637		*start_line = start_position;
   3638	else
   3639		*start_line = dc_crtc_timing->v_total + start_position - 1;
   3640
   3641	*end_line = *start_line + 2;
   3642
   3643	if (*end_line >= dc_crtc_timing->v_total)
   3644		*end_line = 2;
   3645}
   3646
   3647static void dcn10_cal_vline_position(
   3648		struct dc *dc,
   3649		struct pipe_ctx *pipe_ctx,
   3650		enum vline_select vline,
   3651		uint32_t *start_line,
   3652		uint32_t *end_line)
   3653{
   3654	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
   3655
   3656	if (vline == VLINE0)
   3657		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
   3658	else if (vline == VLINE1)
   3659		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
   3660
   3661	switch (ref_point) {
   3662	case START_V_UPDATE:
   3663		dcn10_calc_vupdate_position(
   3664				dc,
   3665				pipe_ctx,
   3666				start_line,
   3667				end_line);
   3668		break;
   3669	case START_V_SYNC:
   3670		// Suppose to do nothing because vsync is 0;
   3671		break;
   3672	default:
   3673		ASSERT(0);
   3674		break;
   3675	}
   3676}
   3677
   3678void dcn10_setup_periodic_interrupt(
   3679		struct dc *dc,
   3680		struct pipe_ctx *pipe_ctx,
   3681		enum vline_select vline)
   3682{
   3683	struct timing_generator *tg = pipe_ctx->stream_res.tg;
   3684
   3685	if (vline == VLINE0) {
   3686		uint32_t start_line = 0;
   3687		uint32_t end_line = 0;
   3688
   3689		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
   3690
   3691		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
   3692
   3693	} else if (vline == VLINE1) {
   3694		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
   3695				tg,
   3696				pipe_ctx->stream->periodic_interrupt1.lines_offset);
   3697	}
   3698}
   3699
   3700void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
   3701{
   3702	struct timing_generator *tg = pipe_ctx->stream_res.tg;
   3703	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
   3704
   3705	if (start_line < 0) {
   3706		ASSERT(0);
   3707		start_line = 0;
   3708	}
   3709
   3710	if (tg->funcs->setup_vertical_interrupt2)
   3711		tg->funcs->setup_vertical_interrupt2(tg, start_line);
   3712}
   3713
   3714void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
   3715		struct dc_link_settings *link_settings)
   3716{
   3717	struct encoder_unblank_param params = {0};
   3718	struct dc_stream_state *stream = pipe_ctx->stream;
   3719	struct dc_link *link = stream->link;
   3720	struct dce_hwseq *hws = link->dc->hwseq;
   3721
   3722	/* only 3 items below are used by unblank */
   3723	params.timing = pipe_ctx->stream->timing;
   3724
   3725	params.link_settings.link_rate = link_settings->link_rate;
   3726
   3727	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
   3728		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
   3729			params.timing.pix_clk_100hz /= 2;
   3730		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
   3731	}
   3732
   3733	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
   3734		hws->funcs.edp_backlight_control(link, true);
   3735	}
   3736}
   3737
   3738void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
   3739				const uint8_t *custom_sdp_message,
   3740				unsigned int sdp_message_size)
   3741{
   3742	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
   3743		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
   3744				pipe_ctx->stream_res.stream_enc,
   3745				custom_sdp_message,
   3746				sdp_message_size);
   3747	}
   3748}
   3749enum dc_status dcn10_set_clock(struct dc *dc,
   3750			enum dc_clock_type clock_type,
   3751			uint32_t clk_khz,
   3752			uint32_t stepping)
   3753{
   3754	struct dc_state *context = dc->current_state;
   3755	struct dc_clock_config clock_cfg = {0};
   3756	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
   3757
   3758	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
   3759		return DC_FAIL_UNSUPPORTED_1;
   3760
   3761	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
   3762		context, clock_type, &clock_cfg);
   3763
   3764	if (clk_khz > clock_cfg.max_clock_khz)
   3765		return DC_FAIL_CLK_EXCEED_MAX;
   3766
   3767	if (clk_khz < clock_cfg.min_clock_khz)
   3768		return DC_FAIL_CLK_BELOW_MIN;
   3769
   3770	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
   3771		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
   3772
   3773	/*update internal request clock for update clock use*/
   3774	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
   3775		current_clocks->dispclk_khz = clk_khz;
   3776	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
   3777		current_clocks->dppclk_khz = clk_khz;
   3778	else
   3779		return DC_ERROR_UNEXPECTED;
   3780
   3781	if (dc->clk_mgr->funcs->update_clocks)
   3782				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
   3783				context, true);
   3784	return DC_OK;
   3785
   3786}
   3787
   3788void dcn10_get_clock(struct dc *dc,
   3789			enum dc_clock_type clock_type,
   3790			struct dc_clock_config *clock_cfg)
   3791{
   3792	struct dc_state *context = dc->current_state;
   3793
   3794	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
   3795				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
   3796
   3797}
   3798
   3799void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
   3800{
   3801	struct resource_pool *pool = dc->res_pool;
   3802	int i;
   3803
   3804	for (i = 0; i < pool->pipe_count; i++) {
   3805		struct hubp *hubp = pool->hubps[i];
   3806		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
   3807
   3808		hubp->funcs->hubp_read_state(hubp);
   3809
   3810		if (!s->blank_en)
   3811			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
   3812	}
   3813}