dc_helper.c (21584B)
1/* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23/* 24 * dc_helper.c 25 * 26 * Created on: Aug 30, 2016 27 * Author: agrodzov 28 */ 29 30#include <linux/delay.h> 31#include <linux/stdarg.h> 32 33#include "dm_services.h" 34 35#include "dc.h" 36#include "dc_dmub_srv.h" 37#include "reg_helper.h" 38 39static inline void submit_dmub_read_modify_write( 40 struct dc_reg_helper_state *offload, 41 const struct dc_context *ctx) 42{ 43 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 44 bool gather = false; 45 46 offload->should_burst_write = 47 (offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1)); 48 cmd_buf->header.payload_bytes = 49 sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count; 50 51 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 52 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 53 54 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 55 56 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 57 58 memset(cmd_buf, 0, sizeof(*cmd_buf)); 59 60 offload->reg_seq_count = 0; 61 offload->same_addr_count = 0; 62} 63 64static inline void submit_dmub_burst_write( 65 struct dc_reg_helper_state *offload, 66 const struct dc_context *ctx) 67{ 68 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 69 bool gather = false; 70 71 cmd_buf->header.payload_bytes = 72 sizeof(uint32_t) * offload->reg_seq_count; 73 74 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 75 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 76 77 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 78 79 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 80 81 memset(cmd_buf, 0, sizeof(*cmd_buf)); 82 83 offload->reg_seq_count = 0; 84} 85 86static inline void submit_dmub_reg_wait( 87 struct dc_reg_helper_state *offload, 88 const struct dc_context *ctx) 89{ 90 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 91 bool gather = false; 92 93 gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress; 94 ctx->dmub_srv->reg_helper_offload.gather_in_progress = false; 95 96 dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data); 97 98 memset(cmd_buf, 0, sizeof(*cmd_buf)); 99 offload->reg_seq_count = 0; 100 101 ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather; 102} 103 104struct dc_reg_value_masks { 105 uint32_t value; 106 uint32_t mask; 107}; 108 109struct dc_reg_sequence { 110 uint32_t addr; 111 struct dc_reg_value_masks value_masks; 112}; 113 114static inline void set_reg_field_value_masks( 115 struct dc_reg_value_masks *field_value_mask, 116 uint32_t value, 117 uint32_t mask, 118 uint8_t shift) 119{ 120 ASSERT(mask != 0); 121 122 field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); 123 field_value_mask->mask = field_value_mask->mask | mask; 124} 125 126static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask, 127 uint32_t addr, int n, 128 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 129 va_list ap) 130{ 131 uint32_t shift, mask, field_value; 132 int i = 1; 133 134 /* gather all bits value/mask getting updated in this register */ 135 set_reg_field_value_masks(field_value_mask, 136 field_value1, mask1, shift1); 137 138 while (i < n) { 139 shift = va_arg(ap, uint32_t); 140 mask = va_arg(ap, uint32_t); 141 field_value = va_arg(ap, uint32_t); 142 143 set_reg_field_value_masks(field_value_mask, 144 field_value, mask, shift); 145 i++; 146 } 147} 148 149static void dmub_flush_buffer_execute( 150 struct dc_reg_helper_state *offload, 151 const struct dc_context *ctx) 152{ 153 submit_dmub_read_modify_write(offload, ctx); 154 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 155} 156 157static void dmub_flush_burst_write_buffer_execute( 158 struct dc_reg_helper_state *offload, 159 const struct dc_context *ctx) 160{ 161 submit_dmub_burst_write(offload, ctx); 162 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 163} 164 165static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr, 166 uint32_t reg_val) 167{ 168 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 169 struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write; 170 171 /* flush command if buffer is full */ 172 if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX) 173 dmub_flush_burst_write_buffer_execute(offload, ctx); 174 175 if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE && 176 addr != cmd_buf->addr) { 177 dmub_flush_burst_write_buffer_execute(offload, ctx); 178 return false; 179 } 180 181 cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE; 182 cmd_buf->header.sub_type = 0; 183 cmd_buf->addr = addr; 184 cmd_buf->write_values[offload->reg_seq_count] = reg_val; 185 offload->reg_seq_count++; 186 187 return true; 188} 189 190static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr, 191 struct dc_reg_value_masks *field_value_mask) 192{ 193 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 194 struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write; 195 struct dmub_cmd_read_modify_write_sequence *seq; 196 197 /* flush command if buffer is full */ 198 if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE && 199 offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX) 200 dmub_flush_buffer_execute(offload, ctx); 201 202 if (offload->should_burst_write) { 203 if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value)) 204 return field_value_mask->value; 205 else 206 offload->should_burst_write = false; 207 } 208 209 /* pack commands */ 210 cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE; 211 cmd_buf->header.sub_type = 0; 212 seq = &cmd_buf->seq[offload->reg_seq_count]; 213 214 if (offload->reg_seq_count) { 215 if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr) 216 offload->same_addr_count++; 217 else 218 offload->same_addr_count = 0; 219 } 220 221 seq->addr = addr; 222 seq->modify_mask = field_value_mask->mask; 223 seq->modify_value = field_value_mask->value; 224 offload->reg_seq_count++; 225 226 return field_value_mask->value; 227} 228 229static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr, 230 uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us) 231{ 232 struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload; 233 struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait; 234 235 cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT; 236 cmd_buf->header.sub_type = 0; 237 cmd_buf->reg_wait.addr = addr; 238 cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift); 239 cmd_buf->reg_wait.mask = mask; 240 cmd_buf->reg_wait.time_out_us = time_out_us; 241} 242 243uint32_t generic_reg_update_ex(const struct dc_context *ctx, 244 uint32_t addr, int n, 245 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 246 ...) 247{ 248 struct dc_reg_value_masks field_value_mask = {0}; 249 uint32_t reg_val; 250 va_list ap; 251 252 va_start(ap, field_value1); 253 254 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 255 field_value1, ap); 256 257 va_end(ap); 258 259 if (ctx->dmub_srv && 260 ctx->dmub_srv->reg_helper_offload.gather_in_progress) 261 return dmub_reg_value_pack(ctx, addr, &field_value_mask); 262 /* todo: return void so we can decouple code running in driver from register states */ 263 264 /* mmio write directly */ 265 reg_val = dm_read_reg(ctx, addr); 266 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 267 dm_write_reg(ctx, addr, reg_val); 268 return reg_val; 269} 270 271uint32_t generic_reg_set_ex(const struct dc_context *ctx, 272 uint32_t addr, uint32_t reg_val, int n, 273 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 274 ...) 275{ 276 struct dc_reg_value_masks field_value_mask = {0}; 277 va_list ap; 278 279 va_start(ap, field_value1); 280 281 set_reg_field_values(&field_value_mask, addr, n, shift1, mask1, 282 field_value1, ap); 283 284 va_end(ap); 285 286 287 /* mmio write directly */ 288 reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; 289 290 if (ctx->dmub_srv && 291 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 292 return dmub_reg_value_burst_set_pack(ctx, addr, reg_val); 293 /* todo: return void so we can decouple code running in driver from register states */ 294 } 295 296 dm_write_reg(ctx, addr, reg_val); 297 return reg_val; 298} 299 300uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr, 301 uint8_t shift, uint32_t mask, uint32_t *field_value) 302{ 303 uint32_t reg_val = dm_read_reg(ctx, addr); 304 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 305 return reg_val; 306} 307 308uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr, 309 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 310 uint8_t shift2, uint32_t mask2, uint32_t *field_value2) 311{ 312 uint32_t reg_val = dm_read_reg(ctx, addr); 313 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 314 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 315 return reg_val; 316} 317 318uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr, 319 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 320 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 321 uint8_t shift3, uint32_t mask3, uint32_t *field_value3) 322{ 323 uint32_t reg_val = dm_read_reg(ctx, addr); 324 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 325 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 326 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 327 return reg_val; 328} 329 330uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr, 331 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 332 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 333 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 334 uint8_t shift4, uint32_t mask4, uint32_t *field_value4) 335{ 336 uint32_t reg_val = dm_read_reg(ctx, addr); 337 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 338 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 339 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 340 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 341 return reg_val; 342} 343 344uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, 345 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 346 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 347 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 348 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 349 uint8_t shift5, uint32_t mask5, uint32_t *field_value5) 350{ 351 uint32_t reg_val = dm_read_reg(ctx, addr); 352 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 353 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 354 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 355 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 356 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 357 return reg_val; 358} 359 360uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, 361 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 362 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 363 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 364 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 365 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 366 uint8_t shift6, uint32_t mask6, uint32_t *field_value6) 367{ 368 uint32_t reg_val = dm_read_reg(ctx, addr); 369 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 370 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 371 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 372 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 373 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 374 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 375 return reg_val; 376} 377 378uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, 379 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 380 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 381 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 382 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 383 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 384 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 385 uint8_t shift7, uint32_t mask7, uint32_t *field_value7) 386{ 387 uint32_t reg_val = dm_read_reg(ctx, addr); 388 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 389 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 390 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 391 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 392 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 393 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 394 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 395 return reg_val; 396} 397 398uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, 399 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 400 uint8_t shift2, uint32_t mask2, uint32_t *field_value2, 401 uint8_t shift3, uint32_t mask3, uint32_t *field_value3, 402 uint8_t shift4, uint32_t mask4, uint32_t *field_value4, 403 uint8_t shift5, uint32_t mask5, uint32_t *field_value5, 404 uint8_t shift6, uint32_t mask6, uint32_t *field_value6, 405 uint8_t shift7, uint32_t mask7, uint32_t *field_value7, 406 uint8_t shift8, uint32_t mask8, uint32_t *field_value8) 407{ 408 uint32_t reg_val = dm_read_reg(ctx, addr); 409 *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); 410 *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); 411 *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); 412 *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); 413 *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); 414 *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); 415 *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); 416 *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); 417 return reg_val; 418} 419/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer 420 * compiler won't be able to check for size match and is prone to stack corruption type of bugs 421 422uint32_t generic_reg_get(const struct dc_context *ctx, 423 uint32_t addr, int n, ...) 424{ 425 uint32_t shift, mask; 426 uint32_t *field_value; 427 uint32_t reg_val; 428 int i = 0; 429 430 reg_val = dm_read_reg(ctx, addr); 431 432 va_list ap; 433 va_start(ap, n); 434 435 while (i < n) { 436 shift = va_arg(ap, uint32_t); 437 mask = va_arg(ap, uint32_t); 438 field_value = va_arg(ap, uint32_t *); 439 440 *field_value = get_reg_field_value_ex(reg_val, mask, shift); 441 i++; 442 } 443 444 va_end(ap); 445 446 return reg_val; 447} 448*/ 449 450void generic_reg_wait(const struct dc_context *ctx, 451 uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value, 452 unsigned int delay_between_poll_us, unsigned int time_out_num_tries, 453 const char *func_name, int line) 454{ 455 uint32_t field_value; 456 uint32_t reg_val; 457 int i; 458 459 if (ctx->dmub_srv && 460 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 461 dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value, 462 delay_between_poll_us * time_out_num_tries); 463 return; 464 } 465 466 /* 467 * Something is terribly wrong if time out is > 3000ms. 468 * 3000ms is the maximum time needed for SMU to pass values back. 469 * This value comes from experiments. 470 * 471 */ 472 ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000); 473 474 for (i = 0; i <= time_out_num_tries; i++) { 475 if (i) { 476 if (delay_between_poll_us >= 1000) 477 msleep(delay_between_poll_us/1000); 478 else if (delay_between_poll_us > 0) 479 udelay(delay_between_poll_us); 480 } 481 482 reg_val = dm_read_reg(ctx, addr); 483 484 field_value = get_reg_field_value_ex(reg_val, mask, shift); 485 486 if (field_value == condition_value) { 487 if (i * delay_between_poll_us > 1000 && 488 !IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 489 DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n", 490 delay_between_poll_us * i / 1000, 491 func_name, line); 492 return; 493 } 494 } 495 496 DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n", 497 delay_between_poll_us, time_out_num_tries, 498 func_name, line); 499 500 if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) 501 BREAK_TO_DEBUGGER(); 502} 503 504void generic_write_indirect_reg(const struct dc_context *ctx, 505 uint32_t addr_index, uint32_t addr_data, 506 uint32_t index, uint32_t data) 507{ 508 dm_write_reg(ctx, addr_index, index); 509 dm_write_reg(ctx, addr_data, data); 510} 511 512uint32_t generic_read_indirect_reg(const struct dc_context *ctx, 513 uint32_t addr_index, uint32_t addr_data, 514 uint32_t index) 515{ 516 uint32_t value = 0; 517 518 // when reg read, there should not be any offload. 519 if (ctx->dmub_srv && 520 ctx->dmub_srv->reg_helper_offload.gather_in_progress) { 521 ASSERT(false); 522 } 523 524 dm_write_reg(ctx, addr_index, index); 525 value = dm_read_reg(ctx, addr_data); 526 527 return value; 528} 529 530uint32_t generic_indirect_reg_get(const struct dc_context *ctx, 531 uint32_t addr_index, uint32_t addr_data, 532 uint32_t index, int n, 533 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 534 ...) 535{ 536 uint32_t shift, mask, *field_value; 537 uint32_t value = 0; 538 int i = 1; 539 540 va_list ap; 541 542 va_start(ap, field_value1); 543 544 value = generic_read_indirect_reg(ctx, addr_index, addr_data, index); 545 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 546 547 while (i < n) { 548 shift = va_arg(ap, uint32_t); 549 mask = va_arg(ap, uint32_t); 550 field_value = va_arg(ap, uint32_t *); 551 552 *field_value = get_reg_field_value_ex(value, mask, shift); 553 i++; 554 } 555 556 va_end(ap); 557 558 return value; 559} 560 561uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, 562 uint32_t addr_index, uint32_t addr_data, 563 uint32_t index, uint32_t reg_val, int n, 564 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 565 ...) 566{ 567 uint32_t shift, mask, field_value; 568 int i = 1; 569 570 va_list ap; 571 572 va_start(ap, field_value1); 573 574 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 575 576 while (i < n) { 577 shift = va_arg(ap, uint32_t); 578 mask = va_arg(ap, uint32_t); 579 field_value = va_arg(ap, uint32_t); 580 581 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 582 i++; 583 } 584 585 generic_write_indirect_reg(ctx, addr_index, addr_data, index, reg_val); 586 va_end(ap); 587 588 return reg_val; 589} 590 591 592uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, 593 uint32_t index, uint32_t reg_val, int n, 594 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 595 ...) 596{ 597 uint32_t shift, mask, field_value; 598 int i = 1; 599 600 va_list ap; 601 602 va_start(ap, field_value1); 603 604 reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); 605 606 while (i < n) { 607 shift = va_arg(ap, uint32_t); 608 mask = va_arg(ap, uint32_t); 609 field_value = va_arg(ap, uint32_t); 610 611 reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); 612 i++; 613 } 614 615 dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); 616 va_end(ap); 617 618 return reg_val; 619} 620 621uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, 622 uint32_t index, int n, 623 uint8_t shift1, uint32_t mask1, uint32_t *field_value1, 624 ...) 625{ 626 uint32_t shift, mask, *field_value; 627 uint32_t value = 0; 628 int i = 1; 629 630 va_list ap; 631 632 va_start(ap, field_value1); 633 634 value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); 635 *field_value1 = get_reg_field_value_ex(value, mask1, shift1); 636 637 while (i < n) { 638 shift = va_arg(ap, uint32_t); 639 mask = va_arg(ap, uint32_t); 640 field_value = va_arg(ap, uint32_t *); 641 642 *field_value = get_reg_field_value_ex(value, mask, shift); 643 i++; 644 } 645 646 va_end(ap); 647 648 return value; 649} 650 651void reg_sequence_start_gather(const struct dc_context *ctx) 652{ 653 /* if reg sequence is supported and enabled, set flag to 654 * indicate we want to have REG_SET, REG_UPDATE macro build 655 * reg sequence command buffer rather than MMIO directly. 656 */ 657 658 if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) { 659 struct dc_reg_helper_state *offload = 660 &ctx->dmub_srv->reg_helper_offload; 661 662 /* caller sequence mismatch. need to debug caller. offload will not work!!! */ 663 ASSERT(!offload->gather_in_progress); 664 665 offload->gather_in_progress = true; 666 } 667} 668 669void reg_sequence_start_execute(const struct dc_context *ctx) 670{ 671 struct dc_reg_helper_state *offload; 672 673 if (!ctx->dmub_srv) 674 return; 675 676 offload = &ctx->dmub_srv->reg_helper_offload; 677 678 if (offload && offload->gather_in_progress) { 679 offload->gather_in_progress = false; 680 offload->should_burst_write = false; 681 switch (offload->cmd_data.cmd_common.header.type) { 682 case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE: 683 submit_dmub_read_modify_write(offload, ctx); 684 break; 685 case DMUB_CMD__REG_REG_WAIT: 686 submit_dmub_reg_wait(offload, ctx); 687 break; 688 case DMUB_CMD__REG_SEQ_BURST_WRITE: 689 submit_dmub_burst_write(offload, ctx); 690 break; 691 default: 692 return; 693 } 694 695 dc_dmub_srv_cmd_execute(ctx->dmub_srv); 696 } 697} 698 699void reg_sequence_wait_done(const struct dc_context *ctx) 700{ 701 /* callback to DM to poll for last submission done*/ 702 struct dc_reg_helper_state *offload; 703 704 if (!ctx->dmub_srv) 705 return; 706 707 offload = &ctx->dmub_srv->reg_helper_offload; 708 709 if (offload && 710 ctx->dc->debug.dmub_offload_enabled && 711 !ctx->dc->debug.dmcub_emulation) { 712 dc_dmub_srv_wait_idle(ctx->dmub_srv); 713 } 714}