dcn10_dpp_cm.c (25384B)
1/* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26#include "dm_services.h" 27 28#include "core_types.h" 29 30#include "reg_helper.h" 31#include "dcn10_dpp.h" 32#include "basics/conversion.h" 33#include "dcn10_cm_common.h" 34 35#define NUM_PHASES 64 36#define HORZ_MAX_TAPS 8 37#define VERT_MAX_TAPS 8 38 39#define BLACK_OFFSET_RGB_Y 0x0 40#define BLACK_OFFSET_CBCR 0x8000 41 42#define REG(reg)\ 43 dpp->tf_regs->reg 44 45#define CTX \ 46 dpp->base.ctx 47 48#undef FN 49#define FN(reg_name, field_name) \ 50 dpp->tf_shift->field_name, dpp->tf_mask->field_name 51 52#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) 53 54 55enum dcn10_coef_filter_type_sel { 56 SCL_COEF_LUMA_VERT_FILTER = 0, 57 SCL_COEF_LUMA_HORZ_FILTER = 1, 58 SCL_COEF_CHROMA_VERT_FILTER = 2, 59 SCL_COEF_CHROMA_HORZ_FILTER = 3, 60 SCL_COEF_ALPHA_VERT_FILTER = 4, 61 SCL_COEF_ALPHA_HORZ_FILTER = 5 62}; 63 64enum dscl_autocal_mode { 65 AUTOCAL_MODE_OFF = 0, 66 67 /* Autocal calculate the scaling ratio and initial phase and the 68 * DSCL_MODE_SEL must be set to 1 69 */ 70 AUTOCAL_MODE_AUTOSCALE = 1, 71 /* Autocal perform auto centering without replication and the 72 * DSCL_MODE_SEL must be set to 0 73 */ 74 AUTOCAL_MODE_AUTOCENTER = 2, 75 /* Autocal perform auto centering and auto replication and the 76 * DSCL_MODE_SEL must be set to 0 77 */ 78 AUTOCAL_MODE_AUTOREPLICATE = 3 79}; 80 81enum dscl_mode_sel { 82 DSCL_MODE_SCALING_444_BYPASS = 0, 83 DSCL_MODE_SCALING_444_RGB_ENABLE = 1, 84 DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, 85 DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, 86 DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, 87 DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, 88 DSCL_MODE_DSCL_BYPASS = 6 89}; 90 91static void program_gamut_remap( 92 struct dcn10_dpp *dpp, 93 const uint16_t *regval, 94 enum gamut_remap_select select) 95{ 96 uint16_t selection = 0; 97 struct color_matrices_reg gam_regs; 98 99 if (regval == NULL || select == GAMUT_REMAP_BYPASS) { 100 REG_SET(CM_GAMUT_REMAP_CONTROL, 0, 101 CM_GAMUT_REMAP_MODE, 0); 102 return; 103 } 104 switch (select) { 105 case GAMUT_REMAP_COEFF: 106 selection = 1; 107 break; 108 case GAMUT_REMAP_COMA_COEFF: 109 selection = 2; 110 break; 111 case GAMUT_REMAP_COMB_COEFF: 112 selection = 3; 113 break; 114 default: 115 break; 116 } 117 118 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; 119 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; 120 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; 121 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; 122 123 124 if (select == GAMUT_REMAP_COEFF) { 125 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); 126 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); 127 128 cm_helper_program_color_matrices( 129 dpp->base.ctx, 130 regval, 131 &gam_regs); 132 133 } else if (select == GAMUT_REMAP_COMA_COEFF) { 134 135 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); 136 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); 137 138 cm_helper_program_color_matrices( 139 dpp->base.ctx, 140 regval, 141 &gam_regs); 142 143 } else { 144 145 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); 146 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); 147 148 cm_helper_program_color_matrices( 149 dpp->base.ctx, 150 regval, 151 &gam_regs); 152 } 153 154 REG_SET( 155 CM_GAMUT_REMAP_CONTROL, 0, 156 CM_GAMUT_REMAP_MODE, selection); 157 158} 159 160void dpp1_cm_set_gamut_remap( 161 struct dpp *dpp_base, 162 const struct dpp_grph_csc_adjustment *adjust) 163{ 164 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 165 int i = 0; 166 167 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) 168 /* Bypass if type is bypass or hw */ 169 program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); 170 else { 171 struct fixed31_32 arr_matrix[12]; 172 uint16_t arr_reg_val[12]; 173 174 for (i = 0; i < 12; i++) 175 arr_matrix[i] = adjust->temperature_matrix[i]; 176 177 convert_float_matrix( 178 arr_reg_val, arr_matrix, 12); 179 180 program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); 181 } 182} 183 184static void dpp1_cm_program_color_matrix( 185 struct dcn10_dpp *dpp, 186 const uint16_t *regval) 187{ 188 uint32_t ocsc_mode; 189 uint32_t cur_mode; 190 struct color_matrices_reg gam_regs; 191 192 if (regval == NULL) { 193 BREAK_TO_DEBUGGER(); 194 return; 195 } 196 197 /* determine which CSC matrix (ocsc or comb) we are using 198 * currently. select the alternate set to double buffer 199 * the CSC update so CSC is updated on frame boundary 200 */ 201 REG_SET(CM_TEST_DEBUG_INDEX, 0, 202 CM_TEST_DEBUG_INDEX, 9); 203 204 REG_GET(CM_TEST_DEBUG_DATA, 205 CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); 206 207 if (cur_mode != 4) 208 ocsc_mode = 4; 209 else 210 ocsc_mode = 5; 211 212 213 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; 214 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; 215 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; 216 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; 217 218 if (ocsc_mode == 4) { 219 220 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); 221 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); 222 223 } else { 224 225 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); 226 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); 227 228 } 229 230 cm_helper_program_color_matrices( 231 dpp->base.ctx, 232 regval, 233 &gam_regs); 234 235 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); 236 237} 238 239void dpp1_cm_set_output_csc_default( 240 struct dpp *dpp_base, 241 enum dc_color_space colorspace) 242{ 243 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 244 const uint16_t *regval = NULL; 245 int arr_size; 246 247 regval = find_color_matrix(colorspace, &arr_size); 248 if (regval == NULL) { 249 BREAK_TO_DEBUGGER(); 250 return; 251 } 252 253 dpp1_cm_program_color_matrix(dpp, regval); 254} 255 256static void dpp1_cm_get_reg_field( 257 struct dcn10_dpp *dpp, 258 struct xfer_func_reg *reg) 259{ 260 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; 261 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; 262 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 263 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 264 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; 265 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; 266 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 267 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 268 269 reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; 270 reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; 271 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; 272 reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; 273 reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; 274 reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; 275 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 276 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 277 reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; 278 reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; 279 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; 280 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; 281} 282 283static void dpp1_cm_get_degamma_reg_field( 284 struct dcn10_dpp *dpp, 285 struct xfer_func_reg *reg) 286{ 287 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; 288 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; 289 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 290 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; 291 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; 292 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; 293 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 294 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; 295 296 reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; 297 reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; 298 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; 299 reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; 300 reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; 301 reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; 302 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 303 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; 304 reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; 305 reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; 306 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; 307 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; 308} 309void dpp1_cm_set_output_csc_adjustment( 310 struct dpp *dpp_base, 311 const uint16_t *regval) 312{ 313 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 314 315 dpp1_cm_program_color_matrix(dpp, regval); 316} 317 318void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, 319 bool power_on) 320{ 321 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 322 323 REG_SET(CM_MEM_PWR_CTRL, 0, 324 RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); 325 326} 327 328void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, 329 const struct pwl_result_data *rgb, 330 uint32_t num) 331{ 332 uint32_t i; 333 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 334 335 REG_SEQ_START(); 336 337 for (i = 0 ; i < num; i++) { 338 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); 339 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); 340 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); 341 342 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); 343 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); 344 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); 345 346 } 347 348} 349 350void dpp1_cm_configure_regamma_lut( 351 struct dpp *dpp_base, 352 bool is_ram_a) 353{ 354 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 355 356 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, 357 CM_RGAM_LUT_WRITE_EN_MASK, 7); 358 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, 359 CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); 360 REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); 361} 362 363/*program re gamma RAM A*/ 364void dpp1_cm_program_regamma_luta_settings( 365 struct dpp *dpp_base, 366 const struct pwl_params *params) 367{ 368 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 369 struct xfer_func_reg gam_regs; 370 371 dpp1_cm_get_reg_field(dpp, &gam_regs); 372 373 gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); 374 gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); 375 gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); 376 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); 377 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); 378 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); 379 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); 380 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); 381 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); 382 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); 383 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); 384 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); 385 gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); 386 gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); 387 388 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 389 390} 391 392/*program re gamma RAM B*/ 393void dpp1_cm_program_regamma_lutb_settings( 394 struct dpp *dpp_base, 395 const struct pwl_params *params) 396{ 397 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 398 struct xfer_func_reg gam_regs; 399 400 dpp1_cm_get_reg_field(dpp, &gam_regs); 401 402 gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); 403 gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); 404 gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); 405 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); 406 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); 407 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); 408 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); 409 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); 410 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); 411 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); 412 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); 413 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); 414 gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); 415 gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); 416 417 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 418} 419 420void dpp1_program_input_csc( 421 struct dpp *dpp_base, 422 enum dc_color_space color_space, 423 enum dcn10_input_csc_select input_select, 424 const struct out_csc_color_matrix *tbl_entry) 425{ 426 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 427 int i; 428 int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); 429 const uint16_t *regval = NULL; 430 uint32_t cur_select = 0; 431 enum dcn10_input_csc_select select; 432 struct color_matrices_reg gam_regs; 433 434 if (input_select == INPUT_CSC_SELECT_BYPASS) { 435 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); 436 return; 437 } 438 439 if (tbl_entry == NULL) { 440 for (i = 0; i < arr_size; i++) 441 if (dpp_input_csc_matrix[i].color_space == color_space) { 442 regval = dpp_input_csc_matrix[i].regval; 443 break; 444 } 445 446 if (regval == NULL) { 447 BREAK_TO_DEBUGGER(); 448 return; 449 } 450 } else { 451 regval = tbl_entry->regval; 452 } 453 454 /* determine which CSC matrix (icsc or coma) we are using 455 * currently. select the alternate set to double buffer 456 * the CSC update so CSC is updated on frame boundary 457 */ 458 REG_SET(CM_TEST_DEBUG_INDEX, 0, 459 CM_TEST_DEBUG_INDEX, 9); 460 461 REG_GET(CM_TEST_DEBUG_DATA, 462 CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); 463 464 if (cur_select != INPUT_CSC_SELECT_ICSC) 465 select = INPUT_CSC_SELECT_ICSC; 466 else 467 select = INPUT_CSC_SELECT_COMA; 468 469 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; 470 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; 471 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; 472 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; 473 474 if (select == INPUT_CSC_SELECT_ICSC) { 475 476 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); 477 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); 478 479 } else { 480 481 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); 482 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); 483 484 } 485 486 cm_helper_program_color_matrices( 487 dpp->base.ctx, 488 regval, 489 &gam_regs); 490 491 REG_SET(CM_ICSC_CONTROL, 0, 492 CM_ICSC_MODE, select); 493} 494 495//keep here for now, decide multi dce support later 496void dpp1_program_bias_and_scale( 497 struct dpp *dpp_base, 498 struct dc_bias_and_scale *params) 499{ 500 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 501 502 REG_SET_2(CM_BNS_VALUES_R, 0, 503 CM_BNS_SCALE_R, params->scale_red, 504 CM_BNS_BIAS_R, params->bias_red); 505 506 REG_SET_2(CM_BNS_VALUES_G, 0, 507 CM_BNS_SCALE_G, params->scale_green, 508 CM_BNS_BIAS_G, params->bias_green); 509 510 REG_SET_2(CM_BNS_VALUES_B, 0, 511 CM_BNS_SCALE_B, params->scale_blue, 512 CM_BNS_BIAS_B, params->bias_blue); 513 514} 515 516/*program de gamma RAM B*/ 517void dpp1_program_degamma_lutb_settings( 518 struct dpp *dpp_base, 519 const struct pwl_params *params) 520{ 521 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 522 struct xfer_func_reg gam_regs; 523 524 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); 525 526 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); 527 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); 528 gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); 529 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); 530 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); 531 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); 532 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); 533 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); 534 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); 535 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); 536 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); 537 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); 538 gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); 539 gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); 540 541 542 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 543} 544 545/*program de gamma RAM A*/ 546void dpp1_program_degamma_luta_settings( 547 struct dpp *dpp_base, 548 const struct pwl_params *params) 549{ 550 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 551 struct xfer_func_reg gam_regs; 552 553 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); 554 555 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); 556 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); 557 gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); 558 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); 559 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); 560 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); 561 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); 562 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); 563 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); 564 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); 565 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); 566 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); 567 gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); 568 gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); 569 570 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); 571} 572 573void dpp1_power_on_degamma_lut( 574 struct dpp *dpp_base, 575 bool power_on) 576{ 577 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 578 579 REG_SET(CM_MEM_PWR_CTRL, 0, 580 SHARED_MEM_PWR_DIS, power_on ? 0:1); 581 582} 583 584static void dpp1_enable_cm_block( 585 struct dpp *dpp_base) 586{ 587 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 588 589 REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); 590 REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); 591} 592 593void dpp1_set_degamma( 594 struct dpp *dpp_base, 595 enum ipp_degamma_mode mode) 596{ 597 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 598 dpp1_enable_cm_block(dpp_base); 599 600 switch (mode) { 601 case IPP_DEGAMMA_MODE_BYPASS: 602 /* Setting de gamma bypass for now */ 603 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); 604 break; 605 case IPP_DEGAMMA_MODE_HW_sRGB: 606 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); 607 break; 608 case IPP_DEGAMMA_MODE_HW_xvYCC: 609 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); 610 break; 611 case IPP_DEGAMMA_MODE_USER_PWL: 612 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); 613 break; 614 default: 615 BREAK_TO_DEBUGGER(); 616 break; 617 } 618 619 REG_SEQ_SUBMIT(); 620 REG_SEQ_WAIT_DONE(); 621} 622 623void dpp1_degamma_ram_select( 624 struct dpp *dpp_base, 625 bool use_ram_a) 626{ 627 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 628 629 if (use_ram_a) 630 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); 631 else 632 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); 633 634} 635 636static bool dpp1_degamma_ram_inuse( 637 struct dpp *dpp_base, 638 bool *ram_a_inuse) 639{ 640 bool ret = false; 641 uint32_t status_reg = 0; 642 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 643 644 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, 645 &status_reg); 646 647 if (status_reg == 9) { 648 *ram_a_inuse = true; 649 ret = true; 650 } else if (status_reg == 10) { 651 *ram_a_inuse = false; 652 ret = true; 653 } 654 return ret; 655} 656 657void dpp1_program_degamma_lut( 658 struct dpp *dpp_base, 659 const struct pwl_result_data *rgb, 660 uint32_t num, 661 bool is_ram_a) 662{ 663 uint32_t i; 664 665 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 666 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); 667 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, 668 CM_DGAM_LUT_WRITE_EN_MASK, 7); 669 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, 670 is_ram_a == true ? 0:1); 671 672 REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); 673 for (i = 0 ; i < num; i++) { 674 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); 675 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); 676 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); 677 678 REG_SET(CM_DGAM_LUT_DATA, 0, 679 CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); 680 REG_SET(CM_DGAM_LUT_DATA, 0, 681 CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); 682 REG_SET(CM_DGAM_LUT_DATA, 0, 683 CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); 684 } 685} 686 687void dpp1_set_degamma_pwl(struct dpp *dpp_base, 688 const struct pwl_params *params) 689{ 690 bool is_ram_a = true; 691 692 dpp1_power_on_degamma_lut(dpp_base, true); 693 dpp1_enable_cm_block(dpp_base); 694 dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); 695 if (is_ram_a == true) 696 dpp1_program_degamma_lutb_settings(dpp_base, params); 697 else 698 dpp1_program_degamma_luta_settings(dpp_base, params); 699 700 dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, 701 params->hw_points_num, !is_ram_a); 702 dpp1_degamma_ram_select(dpp_base, !is_ram_a); 703} 704 705void dpp1_full_bypass(struct dpp *dpp_base) 706{ 707 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 708 709 /* Input pixel format: ARGB8888 */ 710 REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, 711 CNVC_SURFACE_PIXEL_FORMAT, 0x8); 712 713 /* Zero expansion */ 714 REG_SET_3(FORMAT_CONTROL, 0, 715 CNVC_BYPASS, 0, 716 FORMAT_CONTROL__ALPHA_EN, 0, 717 FORMAT_EXPANSION_MODE, 0); 718 719 /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ 720 if (dpp->tf_mask->CM_BYPASS_EN) 721 REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); 722 else 723 REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); 724 725 /* Setting degamma bypass for now */ 726 REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); 727} 728 729static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, 730 bool *ram_a_inuse) 731{ 732 bool in_use = false; 733 uint32_t status_reg = 0; 734 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 735 736 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, 737 &status_reg); 738 739 // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB 740 if (status_reg == 1 || status_reg == 3 || status_reg == 4) { 741 *ram_a_inuse = true; 742 in_use = true; 743 // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB 744 } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { 745 *ram_a_inuse = false; 746 in_use = true; 747 } 748 return in_use; 749} 750 751/* 752 * Input gamma LUT currently supports 256 values only. This means input color 753 * can have a maximum of 8 bits per channel (= 256 possible values) in order to 754 * have a one-to-one mapping with the LUT. Truncation will occur with color 755 * values greater than 8 bits. 756 * 757 * In the future, this function should support additional input gamma methods, 758 * such as piecewise linear mapping, and input gamma bypass. 759 */ 760void dpp1_program_input_lut( 761 struct dpp *dpp_base, 762 const struct dc_gamma *gamma) 763{ 764 int i; 765 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 766 bool rama_occupied = false; 767 uint32_t ram_num; 768 // Power on LUT memory. 769 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); 770 dpp1_enable_cm_block(dpp_base); 771 // Determine whether to use RAM A or RAM B 772 dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); 773 if (!rama_occupied) 774 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); 775 else 776 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); 777 // RW mode is 256-entry LUT 778 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); 779 // IGAM Input format should be 8 bits per channel. 780 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); 781 // Do not mask any R,G,B values 782 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); 783 // LUT-256, unsigned, integer, new u0.12 format 784 REG_UPDATE_3( 785 CM_IGAM_CONTROL, 786 CM_IGAM_LUT_FORMAT_R, 3, 787 CM_IGAM_LUT_FORMAT_G, 3, 788 CM_IGAM_LUT_FORMAT_B, 3); 789 // Start at index 0 of IGAM LUT 790 REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); 791 for (i = 0; i < gamma->num_entries; i++) { 792 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 793 dc_fixpt_round( 794 gamma->entries.red[i])); 795 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 796 dc_fixpt_round( 797 gamma->entries.green[i])); 798 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, 799 dc_fixpt_round( 800 gamma->entries.blue[i])); 801 } 802 // Power off LUT memory 803 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); 804 // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB 805 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); 806 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); 807} 808 809void dpp1_set_hdr_multiplier( 810 struct dpp *dpp_base, 811 uint32_t multiplier) 812{ 813 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); 814 815 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); 816}