dmub_srv.c (25102B)
1/* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: AMD 23 * 24 */ 25 26#include "../dmub_srv.h" 27#include "dmub_dcn20.h" 28#include "dmub_dcn21.h" 29#include "dmub_cmd.h" 30#include "dmub_dcn30.h" 31#include "dmub_dcn301.h" 32#include "dmub_dcn302.h" 33#include "dmub_dcn303.h" 34#include "dmub_dcn31.h" 35#include "dmub_dcn315.h" 36#include "dmub_dcn316.h" 37#include "os_types.h" 38/* 39 * Note: the DMUB service is standalone. No additional headers should be 40 * added below or above this line unless they reside within the DMUB 41 * folder. 42 */ 43 44/* Alignment for framebuffer memory. */ 45#define DMUB_FB_ALIGNMENT (1024 * 1024) 46 47/* Stack size. */ 48#define DMUB_STACK_SIZE (128 * 1024) 49 50/* Context size. */ 51#define DMUB_CONTEXT_SIZE (512 * 1024) 52 53/* Mailbox size : Ring buffers are required for both inbox and outbox */ 54#define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE)) 55 56/* Default state size if meta is absent. */ 57#define DMUB_FW_STATE_SIZE (64 * 1024) 58 59/* Default tracebuffer size if meta is absent. */ 60#define DMUB_TRACE_BUFFER_SIZE (64 * 1024) 61 62 63/* Default scratch mem size. */ 64#define DMUB_SCRATCH_MEM_SIZE (256) 65 66/* Number of windows in use. */ 67#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) 68/* Base addresses. */ 69 70#define DMUB_CW0_BASE (0x60000000) 71#define DMUB_CW1_BASE (0x61000000) 72#define DMUB_CW3_BASE (0x63000000) 73#define DMUB_CW4_BASE (0x64000000) 74#define DMUB_CW5_BASE (0x65000000) 75#define DMUB_CW6_BASE (0x66000000) 76 77#define DMUB_REGION5_BASE (0xA0000000) 78 79static inline uint32_t dmub_align(uint32_t val, uint32_t factor) 80{ 81 return (val + factor - 1) / factor * factor; 82} 83 84void dmub_flush_buffer_mem(const struct dmub_fb *fb) 85{ 86 const uint8_t *base = (const uint8_t *)fb->cpu_addr; 87 uint8_t buf[64]; 88 uint32_t pos, end; 89 90 /** 91 * Read 64-byte chunks since we don't want to store a 92 * large temporary buffer for this purpose. 93 */ 94 end = fb->size / sizeof(buf) * sizeof(buf); 95 96 for (pos = 0; pos < end; pos += sizeof(buf)) 97 dmub_memcpy(buf, base + pos, sizeof(buf)); 98 99 /* Read anything leftover into the buffer. */ 100 if (end < fb->size) 101 dmub_memcpy(buf, base + pos, fb->size - end); 102} 103 104static const struct dmub_fw_meta_info * 105dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset) 106{ 107 const union dmub_fw_meta *meta; 108 109 if (!blob || !blob_size) 110 return NULL; 111 112 if (blob_size < sizeof(union dmub_fw_meta) + meta_offset) 113 return NULL; 114 115 meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset - 116 sizeof(union dmub_fw_meta)); 117 118 if (meta->info.magic_value != DMUB_FW_META_MAGIC) 119 return NULL; 120 121 return &meta->info; 122} 123 124static const struct dmub_fw_meta_info * 125dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) 126{ 127 const struct dmub_fw_meta_info *info = NULL; 128 129 if (params->fw_bss_data && params->bss_data_size) { 130 /* Legacy metadata region. */ 131 info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, 132 params->bss_data_size, 133 DMUB_FW_META_OFFSET); 134 } else if (params->fw_inst_const && params->inst_const_size) { 135 /* Combined metadata region - can be aligned to 16-bytes. */ 136 uint32_t i; 137 138 for (i = 0; i < 16; ++i) { 139 info = dmub_get_fw_meta_info_from_blob( 140 params->fw_inst_const, params->inst_const_size, i); 141 142 if (info) 143 break; 144 } 145 } 146 147 return info; 148} 149 150static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) 151{ 152 struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; 153 154 switch (asic) { 155 case DMUB_ASIC_DCN20: 156 case DMUB_ASIC_DCN21: 157 case DMUB_ASIC_DCN30: 158 case DMUB_ASIC_DCN301: 159 case DMUB_ASIC_DCN302: 160 case DMUB_ASIC_DCN303: 161 dmub->regs = &dmub_srv_dcn20_regs; 162 163 funcs->reset = dmub_dcn20_reset; 164 funcs->reset_release = dmub_dcn20_reset_release; 165 funcs->backdoor_load = dmub_dcn20_backdoor_load; 166 funcs->setup_windows = dmub_dcn20_setup_windows; 167 funcs->setup_mailbox = dmub_dcn20_setup_mailbox; 168 funcs->get_inbox1_rptr = dmub_dcn20_get_inbox1_rptr; 169 funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr; 170 funcs->is_supported = dmub_dcn20_is_supported; 171 funcs->is_hw_init = dmub_dcn20_is_hw_init; 172 funcs->set_gpint = dmub_dcn20_set_gpint; 173 funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked; 174 funcs->get_gpint_response = dmub_dcn20_get_gpint_response; 175 funcs->get_fw_status = dmub_dcn20_get_fw_boot_status; 176 funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; 177 funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence; 178 funcs->get_current_time = dmub_dcn20_get_current_time; 179 180 // Out mailbox register access functions for RN and above 181 funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox; 182 funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr; 183 funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr; 184 185 //outbox0 call stacks 186 funcs->setup_outbox0 = dmub_dcn20_setup_outbox0; 187 funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; 188 funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; 189 190 funcs->get_diagnostic_data = dmub_dcn20_get_diagnostic_data; 191 192 if (asic == DMUB_ASIC_DCN21) { 193 dmub->regs = &dmub_srv_dcn21_regs; 194 195 funcs->is_phy_init = dmub_dcn21_is_phy_init; 196 } 197 if (asic == DMUB_ASIC_DCN30) { 198 dmub->regs = &dmub_srv_dcn30_regs; 199 200 funcs->backdoor_load = dmub_dcn30_backdoor_load; 201 funcs->setup_windows = dmub_dcn30_setup_windows; 202 } 203 if (asic == DMUB_ASIC_DCN301) { 204 dmub->regs = &dmub_srv_dcn301_regs; 205 206 funcs->backdoor_load = dmub_dcn30_backdoor_load; 207 funcs->setup_windows = dmub_dcn30_setup_windows; 208 } 209 if (asic == DMUB_ASIC_DCN302) { 210 dmub->regs = &dmub_srv_dcn302_regs; 211 212 funcs->backdoor_load = dmub_dcn30_backdoor_load; 213 funcs->setup_windows = dmub_dcn30_setup_windows; 214 } 215 if (asic == DMUB_ASIC_DCN303) { 216 dmub->regs = &dmub_srv_dcn303_regs; 217 218 funcs->backdoor_load = dmub_dcn30_backdoor_load; 219 funcs->setup_windows = dmub_dcn30_setup_windows; 220 } 221 break; 222 223 case DMUB_ASIC_DCN31: 224 case DMUB_ASIC_DCN31B: 225 case DMUB_ASIC_DCN315: 226 case DMUB_ASIC_DCN316: 227 if (asic == DMUB_ASIC_DCN315) 228 dmub->regs_dcn31 = &dmub_srv_dcn315_regs; 229 else if (asic == DMUB_ASIC_DCN316) 230 dmub->regs_dcn31 = &dmub_srv_dcn316_regs; 231 else 232 dmub->regs_dcn31 = &dmub_srv_dcn31_regs; 233 funcs->reset = dmub_dcn31_reset; 234 funcs->reset_release = dmub_dcn31_reset_release; 235 funcs->backdoor_load = dmub_dcn31_backdoor_load; 236 funcs->setup_windows = dmub_dcn31_setup_windows; 237 funcs->setup_mailbox = dmub_dcn31_setup_mailbox; 238 funcs->get_inbox1_rptr = dmub_dcn31_get_inbox1_rptr; 239 funcs->set_inbox1_wptr = dmub_dcn31_set_inbox1_wptr; 240 funcs->setup_out_mailbox = dmub_dcn31_setup_out_mailbox; 241 funcs->get_outbox1_wptr = dmub_dcn31_get_outbox1_wptr; 242 funcs->set_outbox1_rptr = dmub_dcn31_set_outbox1_rptr; 243 funcs->is_supported = dmub_dcn31_is_supported; 244 funcs->is_hw_init = dmub_dcn31_is_hw_init; 245 funcs->set_gpint = dmub_dcn31_set_gpint; 246 funcs->is_gpint_acked = dmub_dcn31_is_gpint_acked; 247 funcs->get_gpint_response = dmub_dcn31_get_gpint_response; 248 funcs->get_gpint_dataout = dmub_dcn31_get_gpint_dataout; 249 funcs->get_fw_status = dmub_dcn31_get_fw_boot_status; 250 funcs->enable_dmub_boot_options = dmub_dcn31_enable_dmub_boot_options; 251 funcs->skip_dmub_panel_power_sequence = dmub_dcn31_skip_dmub_panel_power_sequence; 252 //outbox0 call stacks 253 funcs->setup_outbox0 = dmub_dcn31_setup_outbox0; 254 funcs->get_outbox0_wptr = dmub_dcn31_get_outbox0_wptr; 255 funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr; 256 257 funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data; 258 funcs->should_detect = dmub_dcn31_should_detect; 259 funcs->get_current_time = dmub_dcn31_get_current_time; 260 261 break; 262 263 default: 264 return false; 265 } 266 267 return true; 268} 269 270enum dmub_status dmub_srv_create(struct dmub_srv *dmub, 271 const struct dmub_srv_create_params *params) 272{ 273 enum dmub_status status = DMUB_STATUS_OK; 274 275 dmub_memset(dmub, 0, sizeof(*dmub)); 276 277 dmub->funcs = params->funcs; 278 dmub->user_ctx = params->user_ctx; 279 dmub->asic = params->asic; 280 dmub->fw_version = params->fw_version; 281 dmub->is_virtual = params->is_virtual; 282 283 /* Setup asic dependent hardware funcs. */ 284 if (!dmub_srv_hw_setup(dmub, params->asic)) { 285 status = DMUB_STATUS_INVALID; 286 goto cleanup; 287 } 288 289 /* Override (some) hardware funcs based on user params. */ 290 if (params->hw_funcs) { 291 if (params->hw_funcs->emul_get_inbox1_rptr) 292 dmub->hw_funcs.emul_get_inbox1_rptr = 293 params->hw_funcs->emul_get_inbox1_rptr; 294 295 if (params->hw_funcs->emul_set_inbox1_wptr) 296 dmub->hw_funcs.emul_set_inbox1_wptr = 297 params->hw_funcs->emul_set_inbox1_wptr; 298 299 if (params->hw_funcs->is_supported) 300 dmub->hw_funcs.is_supported = 301 params->hw_funcs->is_supported; 302 } 303 304 /* Sanity checks for required hw func pointers. */ 305 if (!dmub->hw_funcs.get_inbox1_rptr || 306 !dmub->hw_funcs.set_inbox1_wptr) { 307 status = DMUB_STATUS_INVALID; 308 goto cleanup; 309 } 310 311cleanup: 312 if (status == DMUB_STATUS_OK) 313 dmub->sw_init = true; 314 else 315 dmub_srv_destroy(dmub); 316 317 return status; 318} 319 320void dmub_srv_destroy(struct dmub_srv *dmub) 321{ 322 dmub_memset(dmub, 0, sizeof(*dmub)); 323} 324 325enum dmub_status 326dmub_srv_calc_region_info(struct dmub_srv *dmub, 327 const struct dmub_srv_region_params *params, 328 struct dmub_srv_region_info *out) 329{ 330 struct dmub_region *inst = &out->regions[DMUB_WINDOW_0_INST_CONST]; 331 struct dmub_region *stack = &out->regions[DMUB_WINDOW_1_STACK]; 332 struct dmub_region *data = &out->regions[DMUB_WINDOW_2_BSS_DATA]; 333 struct dmub_region *bios = &out->regions[DMUB_WINDOW_3_VBIOS]; 334 struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; 335 struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; 336 struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; 337 struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM]; 338 const struct dmub_fw_meta_info *fw_info; 339 uint32_t fw_state_size = DMUB_FW_STATE_SIZE; 340 uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; 341 uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE; 342 343 if (!dmub->sw_init) 344 return DMUB_STATUS_INVALID; 345 346 memset(out, 0, sizeof(*out)); 347 348 out->num_regions = DMUB_NUM_WINDOWS; 349 350 inst->base = 0x0; 351 inst->top = inst->base + params->inst_const_size; 352 353 data->base = dmub_align(inst->top, 256); 354 data->top = data->base + params->bss_data_size; 355 356 /* 357 * All cache windows below should be aligned to the size 358 * of the DMCUB cache line, 64 bytes. 359 */ 360 361 stack->base = dmub_align(data->top, 256); 362 stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; 363 364 bios->base = dmub_align(stack->top, 256); 365 bios->top = bios->base + params->vbios_size; 366 367 mail->base = dmub_align(bios->top, 256); 368 mail->top = mail->base + DMUB_MAILBOX_SIZE; 369 370 fw_info = dmub_get_fw_meta_info(params); 371 372 if (fw_info) { 373 fw_state_size = fw_info->fw_region_size; 374 trace_buffer_size = fw_info->trace_buffer_size; 375 376 /** 377 * If DM didn't fill in a version, then fill it in based on 378 * the firmware meta now that we have it. 379 * 380 * TODO: Make it easier for driver to extract this out to 381 * pass during creation. 382 */ 383 if (dmub->fw_version == 0) 384 dmub->fw_version = fw_info->fw_version; 385 } 386 387 trace_buff->base = dmub_align(mail->top, 256); 388 trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); 389 390 fw_state->base = dmub_align(trace_buff->top, 256); 391 fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); 392 393 scratch_mem->base = dmub_align(fw_state->top, 256); 394 scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64); 395 396 out->fb_size = dmub_align(scratch_mem->top, 4096); 397 398 return DMUB_STATUS_OK; 399} 400 401enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub, 402 const struct dmub_srv_fb_params *params, 403 struct dmub_srv_fb_info *out) 404{ 405 uint8_t *cpu_base; 406 uint64_t gpu_base; 407 uint32_t i; 408 409 if (!dmub->sw_init) 410 return DMUB_STATUS_INVALID; 411 412 memset(out, 0, sizeof(*out)); 413 414 if (params->region_info->num_regions != DMUB_NUM_WINDOWS) 415 return DMUB_STATUS_INVALID; 416 417 cpu_base = (uint8_t *)params->cpu_addr; 418 gpu_base = params->gpu_addr; 419 420 for (i = 0; i < DMUB_NUM_WINDOWS; ++i) { 421 const struct dmub_region *reg = 422 ¶ms->region_info->regions[i]; 423 424 out->fb[i].cpu_addr = cpu_base + reg->base; 425 out->fb[i].gpu_addr = gpu_base + reg->base; 426 out->fb[i].size = reg->top - reg->base; 427 } 428 429 out->num_fb = DMUB_NUM_WINDOWS; 430 431 return DMUB_STATUS_OK; 432} 433 434enum dmub_status dmub_srv_has_hw_support(struct dmub_srv *dmub, 435 bool *is_supported) 436{ 437 *is_supported = false; 438 439 if (!dmub->sw_init) 440 return DMUB_STATUS_INVALID; 441 442 if (dmub->hw_funcs.is_supported) 443 *is_supported = dmub->hw_funcs.is_supported(dmub); 444 445 return DMUB_STATUS_OK; 446} 447 448enum dmub_status dmub_srv_is_hw_init(struct dmub_srv *dmub, bool *is_hw_init) 449{ 450 *is_hw_init = false; 451 452 if (!dmub->sw_init) 453 return DMUB_STATUS_INVALID; 454 455 if (!dmub->hw_init) 456 return DMUB_STATUS_OK; 457 458 if (dmub->hw_funcs.is_hw_init) 459 *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); 460 461 return DMUB_STATUS_OK; 462} 463 464enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, 465 const struct dmub_srv_hw_params *params) 466{ 467 struct dmub_fb *inst_fb = params->fb[DMUB_WINDOW_0_INST_CONST]; 468 struct dmub_fb *stack_fb = params->fb[DMUB_WINDOW_1_STACK]; 469 struct dmub_fb *data_fb = params->fb[DMUB_WINDOW_2_BSS_DATA]; 470 struct dmub_fb *bios_fb = params->fb[DMUB_WINDOW_3_VBIOS]; 471 struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX]; 472 struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; 473 struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; 474 struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; 475 476 struct dmub_rb_init_params rb_params, outbox0_rb_params; 477 struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; 478 struct dmub_region inbox1, outbox1, outbox0; 479 480 if (!dmub->sw_init) 481 return DMUB_STATUS_INVALID; 482 483 if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || 484 !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { 485 ASSERT(0); 486 return DMUB_STATUS_INVALID; 487 } 488 489 dmub->fb_base = params->fb_base; 490 dmub->fb_offset = params->fb_offset; 491 dmub->psp_version = params->psp_version; 492 493 if (dmub->hw_funcs.reset) 494 dmub->hw_funcs.reset(dmub); 495 496 cw0.offset.quad_part = inst_fb->gpu_addr; 497 cw0.region.base = DMUB_CW0_BASE; 498 cw0.region.top = cw0.region.base + inst_fb->size - 1; 499 500 cw1.offset.quad_part = stack_fb->gpu_addr; 501 cw1.region.base = DMUB_CW1_BASE; 502 cw1.region.top = cw1.region.base + stack_fb->size - 1; 503 504 if (params->load_inst_const && dmub->hw_funcs.backdoor_load) { 505 /** 506 * Read back all the instruction memory so we don't hang the 507 * DMCUB when backdoor loading if the write from x86 hasn't been 508 * flushed yet. This only occurs in backdoor loading. 509 */ 510 dmub_flush_buffer_mem(inst_fb); 511 dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); 512 } 513 514 cw2.offset.quad_part = data_fb->gpu_addr; 515 cw2.region.base = DMUB_CW0_BASE + inst_fb->size; 516 cw2.region.top = cw2.region.base + data_fb->size; 517 518 cw3.offset.quad_part = bios_fb->gpu_addr; 519 cw3.region.base = DMUB_CW3_BASE; 520 cw3.region.top = cw3.region.base + bios_fb->size; 521 522 cw4.offset.quad_part = mail_fb->gpu_addr; 523 cw4.region.base = DMUB_CW4_BASE; 524 cw4.region.top = cw4.region.base + mail_fb->size; 525 526 /** 527 * Doubled the mailbox region to accomodate inbox and outbox. 528 * Note: Currently, currently total mailbox size is 16KB. It is split 529 * equally into 8KB between inbox and outbox. If this config is 530 * changed, then uncached base address configuration of outbox1 531 * has to be updated in funcs->setup_out_mailbox. 532 */ 533 inbox1.base = cw4.region.base; 534 inbox1.top = cw4.region.base + DMUB_RB_SIZE; 535 outbox1.base = inbox1.top; 536 outbox1.top = cw4.region.top; 537 538 cw5.offset.quad_part = tracebuff_fb->gpu_addr; 539 cw5.region.base = DMUB_CW5_BASE; 540 cw5.region.top = cw5.region.base + tracebuff_fb->size; 541 542 outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET; 543 outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET; 544 545 cw6.offset.quad_part = fw_state_fb->gpu_addr; 546 cw6.region.base = DMUB_CW6_BASE; 547 cw6.region.top = cw6.region.base + fw_state_fb->size; 548 549 dmub->fw_state = fw_state_fb->cpu_addr; 550 551 dmub->scratch_mem_fb = *scratch_mem_fb; 552 553 if (dmub->hw_funcs.setup_windows) 554 dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); 555 556 if (dmub->hw_funcs.setup_outbox0) 557 dmub->hw_funcs.setup_outbox0(dmub, &outbox0); 558 559 if (dmub->hw_funcs.setup_mailbox) 560 dmub->hw_funcs.setup_mailbox(dmub, &inbox1); 561 if (dmub->hw_funcs.setup_out_mailbox) 562 dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1); 563 564 dmub_memset(&rb_params, 0, sizeof(rb_params)); 565 rb_params.ctx = dmub; 566 rb_params.base_address = mail_fb->cpu_addr; 567 rb_params.capacity = DMUB_RB_SIZE; 568 dmub_rb_init(&dmub->inbox1_rb, &rb_params); 569 570 // Initialize outbox1 ring buffer 571 rb_params.ctx = dmub; 572 rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE); 573 rb_params.capacity = DMUB_RB_SIZE; 574 dmub_rb_init(&dmub->outbox1_rb, &rb_params); 575 576 dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params)); 577 outbox0_rb_params.ctx = dmub; 578 outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET); 579 outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64); 580 dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params); 581 582 /* Report to DMUB what features are supported by current driver */ 583 if (dmub->hw_funcs.enable_dmub_boot_options) 584 dmub->hw_funcs.enable_dmub_boot_options(dmub, params); 585 586 if (dmub->hw_funcs.reset_release) 587 dmub->hw_funcs.reset_release(dmub); 588 589 dmub->hw_init = true; 590 591 return DMUB_STATUS_OK; 592} 593 594enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) 595{ 596 if (!dmub->sw_init) 597 return DMUB_STATUS_INVALID; 598 599 if (dmub->hw_funcs.reset) 600 dmub->hw_funcs.reset(dmub); 601 602 dmub->hw_init = false; 603 604 return DMUB_STATUS_OK; 605} 606 607enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, 608 const union dmub_rb_cmd *cmd) 609{ 610 if (!dmub->hw_init) 611 return DMUB_STATUS_INVALID; 612 613 if (dmub_rb_push_front(&dmub->inbox1_rb, cmd)) 614 return DMUB_STATUS_OK; 615 616 return DMUB_STATUS_QUEUE_FULL; 617} 618 619enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) 620{ 621 struct dmub_rb flush_rb; 622 623 if (!dmub->hw_init) 624 return DMUB_STATUS_INVALID; 625 626 /** 627 * Read back all the queued commands to ensure that they've 628 * been flushed to framebuffer memory. Otherwise DMCUB might 629 * read back stale, fully invalid or partially invalid data. 630 */ 631 flush_rb = dmub->inbox1_rb; 632 flush_rb.rptr = dmub->inbox1_last_wptr; 633 dmub_rb_flush_pending(&flush_rb); 634 635 dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); 636 637 dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; 638 639 return DMUB_STATUS_OK; 640} 641 642enum dmub_status dmub_srv_wait_for_auto_load(struct dmub_srv *dmub, 643 uint32_t timeout_us) 644{ 645 uint32_t i; 646 647 if (!dmub->hw_init) 648 return DMUB_STATUS_INVALID; 649 650 for (i = 0; i <= timeout_us; i += 100) { 651 union dmub_fw_boot_status status = dmub->hw_funcs.get_fw_status(dmub); 652 653 if (status.bits.dal_fw && status.bits.mailbox_rdy) 654 return DMUB_STATUS_OK; 655 656 udelay(100); 657 } 658 659 return DMUB_STATUS_TIMEOUT; 660} 661 662enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub, 663 uint32_t timeout_us) 664{ 665 uint32_t i = 0; 666 667 if (!dmub->hw_init) 668 return DMUB_STATUS_INVALID; 669 670 if (!dmub->hw_funcs.is_phy_init) 671 return DMUB_STATUS_OK; 672 673 for (i = 0; i <= timeout_us; i += 10) { 674 if (dmub->hw_funcs.is_phy_init(dmub)) 675 return DMUB_STATUS_OK; 676 677 udelay(10); 678 } 679 680 return DMUB_STATUS_TIMEOUT; 681} 682 683enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub, 684 uint32_t timeout_us) 685{ 686 uint32_t i, rptr; 687 688 if (!dmub->hw_init) 689 return DMUB_STATUS_INVALID; 690 691 for (i = 0; i <= timeout_us; ++i) { 692 rptr = dmub->hw_funcs.get_inbox1_rptr(dmub); 693 694 if (rptr > dmub->inbox1_rb.capacity) 695 return DMUB_STATUS_HW_FAILURE; 696 697 dmub->inbox1_rb.rptr = rptr; 698 699 if (dmub_rb_empty(&dmub->inbox1_rb)) 700 return DMUB_STATUS_OK; 701 702 udelay(1); 703 } 704 705 return DMUB_STATUS_TIMEOUT; 706} 707 708enum dmub_status 709dmub_srv_send_gpint_command(struct dmub_srv *dmub, 710 enum dmub_gpint_command command_code, 711 uint16_t param, uint32_t timeout_us) 712{ 713 union dmub_gpint_data_register reg; 714 uint32_t i; 715 716 if (!dmub->sw_init) 717 return DMUB_STATUS_INVALID; 718 719 if (!dmub->hw_funcs.set_gpint) 720 return DMUB_STATUS_INVALID; 721 722 if (!dmub->hw_funcs.is_gpint_acked) 723 return DMUB_STATUS_INVALID; 724 725 reg.bits.status = 1; 726 reg.bits.command_code = command_code; 727 reg.bits.param = param; 728 729 dmub->hw_funcs.set_gpint(dmub, reg); 730 731 for (i = 0; i < timeout_us; ++i) { 732 udelay(1); 733 734 if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) 735 return DMUB_STATUS_OK; 736 } 737 738 return DMUB_STATUS_TIMEOUT; 739} 740 741enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub, 742 uint32_t *response) 743{ 744 *response = 0; 745 746 if (!dmub->sw_init) 747 return DMUB_STATUS_INVALID; 748 749 if (!dmub->hw_funcs.get_gpint_response) 750 return DMUB_STATUS_INVALID; 751 752 *response = dmub->hw_funcs.get_gpint_response(dmub); 753 754 return DMUB_STATUS_OK; 755} 756 757enum dmub_status dmub_srv_get_gpint_dataout(struct dmub_srv *dmub, 758 uint32_t *dataout) 759{ 760 *dataout = 0; 761 762 if (!dmub->sw_init) 763 return DMUB_STATUS_INVALID; 764 765 if (!dmub->hw_funcs.get_gpint_dataout) 766 return DMUB_STATUS_INVALID; 767 768 *dataout = dmub->hw_funcs.get_gpint_dataout(dmub); 769 770 return DMUB_STATUS_OK; 771} 772 773enum dmub_status dmub_srv_get_fw_boot_status(struct dmub_srv *dmub, 774 union dmub_fw_boot_status *status) 775{ 776 status->all = 0; 777 778 if (!dmub->sw_init) 779 return DMUB_STATUS_INVALID; 780 781 if (dmub->hw_funcs.get_fw_status) 782 *status = dmub->hw_funcs.get_fw_status(dmub); 783 784 return DMUB_STATUS_OK; 785} 786 787enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, 788 union dmub_rb_cmd *cmd) 789{ 790 enum dmub_status status = DMUB_STATUS_OK; 791 792 // Queue command 793 status = dmub_srv_cmd_queue(dmub, cmd); 794 795 if (status != DMUB_STATUS_OK) 796 return status; 797 798 // Execute command 799 status = dmub_srv_cmd_execute(dmub); 800 801 if (status != DMUB_STATUS_OK) 802 return status; 803 804 // Wait for DMUB to process command 805 status = dmub_srv_wait_for_idle(dmub, 100000); 806 807 if (status != DMUB_STATUS_OK) 808 return status; 809 810 // Copy data back from ring buffer into command 811 dmub_rb_get_return_data(&dmub->inbox1_rb, cmd); 812 813 return status; 814} 815 816static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, 817 void *entry) 818{ 819 const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t); 820 uint64_t *dst = (uint64_t *)entry; 821 uint8_t i; 822 uint8_t loop_count; 823 824 if (rb->rptr == rb->wrpt) 825 return false; 826 827 loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t); 828 // copying data 829 for (i = 0; i < loop_count; i++) 830 *dst++ = *src++; 831 832 rb->rptr += sizeof(struct dmcub_trace_buf_entry); 833 834 rb->rptr %= rb->capacity; 835 836 return true; 837} 838 839bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry) 840{ 841 dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); 842 843 return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); 844} 845 846bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data) 847{ 848 if (!dmub || !dmub->hw_funcs.get_diagnostic_data || !diag_data) 849 return false; 850 dmub->hw_funcs.get_diagnostic_data(dmub, diag_data); 851 return true; 852} 853 854bool dmub_srv_should_detect(struct dmub_srv *dmub) 855{ 856 if (!dmub->hw_init || !dmub->hw_funcs.should_detect) 857 return false; 858 859 return dmub->hw_funcs.should_detect(dmub); 860} 861 862enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) 863{ 864 if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register) 865 return DMUB_STATUS_INVALID; 866 867 dmub->hw_funcs.clear_inbox0_ack_register(dmub); 868 return DMUB_STATUS_OK; 869} 870 871enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) 872{ 873 uint32_t i = 0; 874 uint32_t ack = 0; 875 876 if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) 877 return DMUB_STATUS_INVALID; 878 879 for (i = 0; i <= timeout_us; i++) { 880 ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); 881 if (ack) 882 return DMUB_STATUS_OK; 883 } 884 return DMUB_STATUS_TIMEOUT; 885} 886 887enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, 888 union dmub_inbox0_data_register data) 889{ 890 if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd) 891 return DMUB_STATUS_INVALID; 892 893 dmub->hw_funcs.send_inbox0_cmd(dmub, data); 894 return DMUB_STATUS_OK; 895}