skl-sst.c (14384B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * skl-sst.c - HDA DSP library functions for SKL platform 4 * 5 * Copyright (C) 2014-15, Intel Corporation. 6 * Author:Rafal Redzimski <rafal.f.redzimski@intel.com> 7 * Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11#include <linux/module.h> 12#include <linux/delay.h> 13#include <linux/device.h> 14#include <linux/err.h> 15#include <linux/uuid.h> 16#include "../common/sst-dsp.h" 17#include "../common/sst-dsp-priv.h" 18#include "../common/sst-ipc.h" 19#include "skl.h" 20 21#define SKL_BASEFW_TIMEOUT 300 22#define SKL_INIT_TIMEOUT 1000 23 24/* Intel HD Audio SRAM Window 0*/ 25#define SKL_ADSP_SRAM0_BASE 0x8000 26 27/* Firmware status window */ 28#define SKL_ADSP_FW_STATUS SKL_ADSP_SRAM0_BASE 29#define SKL_ADSP_ERROR_CODE (SKL_ADSP_FW_STATUS + 0x4) 30 31#define SKL_NUM_MODULES 1 32 33static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status) 34{ 35 u32 cur_sts; 36 37 cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK; 38 39 return (cur_sts == status); 40} 41 42static int skl_transfer_firmware(struct sst_dsp *ctx, 43 const void *basefw, u32 base_fw_size) 44{ 45 int ret = 0; 46 47 ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size, 48 true); 49 if (ret < 0) 50 return ret; 51 52 ret = sst_dsp_register_poll(ctx, 53 SKL_ADSP_FW_STATUS, 54 SKL_FW_STS_MASK, 55 SKL_FW_RFW_START, 56 SKL_BASEFW_TIMEOUT, 57 "Firmware boot"); 58 59 ctx->cl_dev.ops.cl_stop_dma(ctx); 60 61 return ret; 62} 63 64#define SKL_ADSP_FW_BIN_HDR_OFFSET 0x284 65 66static int skl_load_base_firmware(struct sst_dsp *ctx) 67{ 68 int ret = 0, i; 69 struct skl_dev *skl = ctx->thread_context; 70 struct firmware stripped_fw; 71 u32 reg; 72 73 skl->boot_complete = false; 74 init_waitqueue_head(&skl->boot_wait); 75 76 if (ctx->fw == NULL) { 77 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev); 78 if (ret < 0) { 79 dev_err(ctx->dev, "Request firmware failed %d\n", ret); 80 return -EIO; 81 } 82 } 83 84 /* prase uuids on first boot */ 85 if (skl->is_first_boot) { 86 ret = snd_skl_parse_uuids(ctx, ctx->fw, SKL_ADSP_FW_BIN_HDR_OFFSET, 0); 87 if (ret < 0) { 88 dev_err(ctx->dev, "UUID parsing err: %d\n", ret); 89 release_firmware(ctx->fw); 90 skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); 91 return ret; 92 } 93 } 94 95 /* check for extended manifest */ 96 stripped_fw.data = ctx->fw->data; 97 stripped_fw.size = ctx->fw->size; 98 99 skl_dsp_strip_extended_manifest(&stripped_fw); 100 101 ret = skl_dsp_boot(ctx); 102 if (ret < 0) { 103 dev_err(ctx->dev, "Boot dsp core failed ret: %d\n", ret); 104 goto skl_load_base_firmware_failed; 105 } 106 107 ret = skl_cldma_prepare(ctx); 108 if (ret < 0) { 109 dev_err(ctx->dev, "CL dma prepare failed : %d\n", ret); 110 goto skl_load_base_firmware_failed; 111 } 112 113 /* enable Interrupt */ 114 skl_ipc_int_enable(ctx); 115 skl_ipc_op_int_enable(ctx); 116 117 /* check ROM Status */ 118 for (i = SKL_INIT_TIMEOUT; i > 0; --i) { 119 if (skl_check_fw_status(ctx, SKL_FW_INIT)) { 120 dev_dbg(ctx->dev, 121 "ROM loaded, we can continue with FW loading\n"); 122 break; 123 } 124 mdelay(1); 125 } 126 if (!i) { 127 reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS); 128 dev_err(ctx->dev, 129 "Timeout waiting for ROM init done, reg:0x%x\n", reg); 130 ret = -EIO; 131 goto transfer_firmware_failed; 132 } 133 134 ret = skl_transfer_firmware(ctx, stripped_fw.data, stripped_fw.size); 135 if (ret < 0) { 136 dev_err(ctx->dev, "Transfer firmware failed%d\n", ret); 137 goto transfer_firmware_failed; 138 } else { 139 ret = wait_event_timeout(skl->boot_wait, skl->boot_complete, 140 msecs_to_jiffies(SKL_IPC_BOOT_MSECS)); 141 if (ret == 0) { 142 dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n"); 143 ret = -EIO; 144 goto transfer_firmware_failed; 145 } 146 147 dev_dbg(ctx->dev, "Download firmware successful%d\n", ret); 148 skl->fw_loaded = true; 149 } 150 return 0; 151transfer_firmware_failed: 152 ctx->cl_dev.ops.cl_cleanup_controller(ctx); 153skl_load_base_firmware_failed: 154 skl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK); 155 release_firmware(ctx->fw); 156 ctx->fw = NULL; 157 return ret; 158} 159 160static int skl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id) 161{ 162 int ret; 163 struct skl_ipc_dxstate_info dx; 164 struct skl_dev *skl = ctx->thread_context; 165 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id); 166 167 /* If core0 is being turned on, we need to load the FW */ 168 if (core_id == SKL_DSP_CORE0_ID) { 169 ret = skl_load_base_firmware(ctx); 170 if (ret < 0) { 171 dev_err(ctx->dev, "unable to load firmware\n"); 172 return ret; 173 } 174 175 /* load libs as they are also lost on D3 */ 176 if (skl->lib_count > 1) { 177 ret = ctx->fw_ops.load_library(ctx, skl->lib_info, 178 skl->lib_count); 179 if (ret < 0) { 180 dev_err(ctx->dev, "reload libs failed: %d\n", 181 ret); 182 return ret; 183 } 184 185 } 186 } 187 188 /* 189 * If any core other than core 0 is being moved to D0, enable the 190 * core and send the set dx IPC for the core. 191 */ 192 if (core_id != SKL_DSP_CORE0_ID) { 193 ret = skl_dsp_enable_core(ctx, core_mask); 194 if (ret < 0) 195 return ret; 196 197 dx.core_mask = core_mask; 198 dx.dx_mask = core_mask; 199 200 ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, 201 SKL_BASE_FW_MODULE_ID, &dx); 202 if (ret < 0) { 203 dev_err(ctx->dev, "Failed to set dsp to D0:core id= %d\n", 204 core_id); 205 skl_dsp_disable_core(ctx, core_mask); 206 } 207 } 208 209 skl->cores.state[core_id] = SKL_DSP_RUNNING; 210 211 return 0; 212} 213 214static int skl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id) 215{ 216 int ret; 217 struct skl_ipc_dxstate_info dx; 218 struct skl_dev *skl = ctx->thread_context; 219 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id); 220 221 dx.core_mask = core_mask; 222 dx.dx_mask = SKL_IPC_D3_MASK; 223 224 ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx); 225 if (ret < 0) 226 dev_err(ctx->dev, "set Dx core %d fail: %d\n", core_id, ret); 227 228 if (core_id == SKL_DSP_CORE0_ID) { 229 /* disable Interrupt */ 230 ctx->cl_dev.ops.cl_cleanup_controller(ctx); 231 skl_cldma_int_disable(ctx); 232 skl_ipc_op_int_disable(ctx); 233 skl_ipc_int_disable(ctx); 234 } 235 236 ret = skl_dsp_disable_core(ctx, core_mask); 237 if (ret < 0) 238 return ret; 239 240 skl->cores.state[core_id] = SKL_DSP_RESET; 241 return ret; 242} 243 244static unsigned int skl_get_errorcode(struct sst_dsp *ctx) 245{ 246 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE); 247} 248 249/* 250 * since get/set_module are called from DAPM context, 251 * we don't need lock for usage count 252 */ 253static int skl_get_module(struct sst_dsp *ctx, u16 mod_id) 254{ 255 struct skl_module_table *module; 256 257 list_for_each_entry(module, &ctx->module_list, list) { 258 if (module->mod_info->mod_id == mod_id) 259 return ++module->usage_cnt; 260 } 261 262 return -EINVAL; 263} 264 265static int skl_put_module(struct sst_dsp *ctx, u16 mod_id) 266{ 267 struct skl_module_table *module; 268 269 list_for_each_entry(module, &ctx->module_list, list) { 270 if (module->mod_info->mod_id == mod_id) 271 return --module->usage_cnt; 272 } 273 274 return -EINVAL; 275} 276 277static struct skl_module_table *skl_fill_module_table(struct sst_dsp *ctx, 278 char *mod_name, int mod_id) 279{ 280 const struct firmware *fw; 281 struct skl_module_table *skl_module; 282 unsigned int size; 283 int ret; 284 285 ret = request_firmware(&fw, mod_name, ctx->dev); 286 if (ret < 0) { 287 dev_err(ctx->dev, "Request Module %s failed :%d\n", 288 mod_name, ret); 289 return NULL; 290 } 291 292 skl_module = devm_kzalloc(ctx->dev, sizeof(*skl_module), GFP_KERNEL); 293 if (skl_module == NULL) { 294 release_firmware(fw); 295 return NULL; 296 } 297 298 size = sizeof(*skl_module->mod_info); 299 skl_module->mod_info = devm_kzalloc(ctx->dev, size, GFP_KERNEL); 300 if (skl_module->mod_info == NULL) { 301 release_firmware(fw); 302 return NULL; 303 } 304 305 skl_module->mod_info->mod_id = mod_id; 306 skl_module->mod_info->fw = fw; 307 list_add(&skl_module->list, &ctx->module_list); 308 309 return skl_module; 310} 311 312/* get a module from it's unique ID */ 313static struct skl_module_table *skl_module_get_from_id( 314 struct sst_dsp *ctx, u16 mod_id) 315{ 316 struct skl_module_table *module; 317 318 if (list_empty(&ctx->module_list)) { 319 dev_err(ctx->dev, "Module list is empty\n"); 320 return NULL; 321 } 322 323 list_for_each_entry(module, &ctx->module_list, list) { 324 if (module->mod_info->mod_id == mod_id) 325 return module; 326 } 327 328 return NULL; 329} 330 331static int skl_transfer_module(struct sst_dsp *ctx, const void *data, 332 u32 size, u16 mod_id, u8 table_id, bool is_module) 333{ 334 int ret, bytes_left, curr_pos; 335 struct skl_dev *skl = ctx->thread_context; 336 skl->mod_load_complete = false; 337 338 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, data, size, false); 339 if (bytes_left < 0) 340 return bytes_left; 341 342 /* check is_module flag to load module or library */ 343 if (is_module) 344 ret = skl_ipc_load_modules(&skl->ipc, SKL_NUM_MODULES, &mod_id); 345 else 346 ret = skl_sst_ipc_load_library(&skl->ipc, 0, table_id, false); 347 348 if (ret < 0) { 349 dev_err(ctx->dev, "Failed to Load %s with err %d\n", 350 is_module ? "module" : "lib", ret); 351 goto out; 352 } 353 354 /* 355 * if bytes_left > 0 then wait for BDL complete interrupt and 356 * copy the next chunk till bytes_left is 0. if bytes_left is 357 * zero, then wait for load module IPC reply 358 */ 359 while (bytes_left > 0) { 360 curr_pos = size - bytes_left; 361 362 ret = skl_cldma_wait_interruptible(ctx); 363 if (ret < 0) 364 goto out; 365 366 bytes_left = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, 367 data + curr_pos, 368 bytes_left, false); 369 } 370 371 ret = wait_event_timeout(skl->mod_load_wait, skl->mod_load_complete, 372 msecs_to_jiffies(SKL_IPC_BOOT_MSECS)); 373 if (ret == 0 || !skl->mod_load_status) { 374 dev_err(ctx->dev, "Module Load failed\n"); 375 ret = -EIO; 376 } 377 378out: 379 ctx->cl_dev.ops.cl_stop_dma(ctx); 380 381 return ret; 382} 383 384static int 385skl_load_library(struct sst_dsp *ctx, struct skl_lib_info *linfo, int lib_count) 386{ 387 struct skl_dev *skl = ctx->thread_context; 388 struct firmware stripped_fw; 389 int ret, i; 390 391 /* library indices start from 1 to N. 0 represents base FW */ 392 for (i = 1; i < lib_count; i++) { 393 ret = skl_prepare_lib_load(skl, &skl->lib_info[i], &stripped_fw, 394 SKL_ADSP_FW_BIN_HDR_OFFSET, i); 395 if (ret < 0) 396 goto load_library_failed; 397 ret = skl_transfer_module(ctx, stripped_fw.data, 398 stripped_fw.size, 0, i, false); 399 if (ret < 0) 400 goto load_library_failed; 401 } 402 403 return 0; 404 405load_library_failed: 406 skl_release_library(linfo, lib_count); 407 return ret; 408} 409 410static int skl_load_module(struct sst_dsp *ctx, u16 mod_id, u8 *guid) 411{ 412 struct skl_module_table *module_entry = NULL; 413 int ret = 0; 414 char mod_name[64]; /* guid str = 32 chars + 4 hyphens */ 415 416 snprintf(mod_name, sizeof(mod_name), "intel/dsp_fw_%pUL.bin", guid); 417 418 module_entry = skl_module_get_from_id(ctx, mod_id); 419 if (module_entry == NULL) { 420 module_entry = skl_fill_module_table(ctx, mod_name, mod_id); 421 if (module_entry == NULL) { 422 dev_err(ctx->dev, "Failed to Load module\n"); 423 return -EINVAL; 424 } 425 } 426 427 if (!module_entry->usage_cnt) { 428 ret = skl_transfer_module(ctx, module_entry->mod_info->fw->data, 429 module_entry->mod_info->fw->size, 430 mod_id, 0, true); 431 if (ret < 0) { 432 dev_err(ctx->dev, "Failed to Load module\n"); 433 return ret; 434 } 435 } 436 437 ret = skl_get_module(ctx, mod_id); 438 439 return ret; 440} 441 442static int skl_unload_module(struct sst_dsp *ctx, u16 mod_id) 443{ 444 int usage_cnt; 445 struct skl_dev *skl = ctx->thread_context; 446 int ret = 0; 447 448 usage_cnt = skl_put_module(ctx, mod_id); 449 if (usage_cnt < 0) { 450 dev_err(ctx->dev, "Module bad usage cnt!:%d\n", usage_cnt); 451 return -EIO; 452 } 453 454 /* if module is used by others return, no need to unload */ 455 if (usage_cnt > 0) 456 return 0; 457 458 ret = skl_ipc_unload_modules(&skl->ipc, 459 SKL_NUM_MODULES, &mod_id); 460 if (ret < 0) { 461 dev_err(ctx->dev, "Failed to UnLoad module\n"); 462 skl_get_module(ctx, mod_id); 463 return ret; 464 } 465 466 return ret; 467} 468 469void skl_clear_module_cnt(struct sst_dsp *ctx) 470{ 471 struct skl_module_table *module; 472 473 if (list_empty(&ctx->module_list)) 474 return; 475 476 list_for_each_entry(module, &ctx->module_list, list) { 477 module->usage_cnt = 0; 478 } 479} 480EXPORT_SYMBOL_GPL(skl_clear_module_cnt); 481 482static void skl_clear_module_table(struct sst_dsp *ctx) 483{ 484 struct skl_module_table *module, *tmp; 485 486 if (list_empty(&ctx->module_list)) 487 return; 488 489 list_for_each_entry_safe(module, tmp, &ctx->module_list, list) { 490 list_del(&module->list); 491 release_firmware(module->mod_info->fw); 492 } 493} 494 495static const struct skl_dsp_fw_ops skl_fw_ops = { 496 .set_state_D0 = skl_set_dsp_D0, 497 .set_state_D3 = skl_set_dsp_D3, 498 .load_fw = skl_load_base_firmware, 499 .get_fw_errcode = skl_get_errorcode, 500 .load_library = skl_load_library, 501 .load_mod = skl_load_module, 502 .unload_mod = skl_unload_module, 503}; 504 505static struct sst_ops skl_ops = { 506 .irq_handler = skl_dsp_sst_interrupt, 507 .write = sst_shim32_write, 508 .read = sst_shim32_read, 509 .free = skl_dsp_free, 510}; 511 512static struct sst_dsp_device skl_dev = { 513 .thread = skl_dsp_irq_thread_handler, 514 .ops = &skl_ops, 515}; 516 517int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq, 518 const char *fw_name, struct skl_dsp_loader_ops dsp_ops, 519 struct skl_dev **dsp) 520{ 521 struct skl_dev *skl; 522 struct sst_dsp *sst; 523 int ret; 524 525 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &skl_dev); 526 if (ret < 0) { 527 dev_err(dev, "%s: no device\n", __func__); 528 return ret; 529 } 530 531 skl = *dsp; 532 sst = skl->dsp; 533 sst->addr.lpe = mmio_base; 534 sst->addr.shim = mmio_base; 535 sst->addr.sram0_base = SKL_ADSP_SRAM0_BASE; 536 sst->addr.sram1_base = SKL_ADSP_SRAM1_BASE; 537 sst->addr.w0_stat_sz = SKL_ADSP_W0_STAT_SZ; 538 sst->addr.w0_up_sz = SKL_ADSP_W0_UP_SZ; 539 540 sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 541 SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 542 543 ret = skl_ipc_init(dev, skl); 544 if (ret) { 545 skl_dsp_free(sst); 546 return ret; 547 } 548 549 sst->fw_ops = skl_fw_ops; 550 551 return skl_dsp_acquire_irq(sst); 552} 553EXPORT_SYMBOL_GPL(skl_sst_dsp_init); 554 555int skl_sst_init_fw(struct device *dev, struct skl_dev *skl) 556{ 557 int ret; 558 struct sst_dsp *sst = skl->dsp; 559 560 ret = sst->fw_ops.load_fw(sst); 561 if (ret < 0) { 562 dev_err(dev, "Load base fw failed : %d\n", ret); 563 return ret; 564 } 565 566 skl_dsp_init_core_state(sst); 567 568 if (skl->lib_count > 1) { 569 ret = sst->fw_ops.load_library(sst, skl->lib_info, 570 skl->lib_count); 571 if (ret < 0) { 572 dev_err(dev, "Load Library failed : %x\n", ret); 573 return ret; 574 } 575 } 576 skl->is_first_boot = false; 577 578 return 0; 579} 580EXPORT_SYMBOL_GPL(skl_sst_init_fw); 581 582void skl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl) 583{ 584 585 if (skl->dsp->fw) 586 release_firmware(skl->dsp->fw); 587 skl_clear_module_table(skl->dsp); 588 skl_freeup_uuid_list(skl); 589 skl_ipc_free(&skl->ipc); 590 skl->dsp->ops->free(skl->dsp); 591 if (skl->boot_complete) { 592 skl->dsp->cl_dev.ops.cl_cleanup_controller(skl->dsp); 593 skl_cldma_int_disable(skl->dsp); 594 } 595} 596EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup); 597 598MODULE_LICENSE("GPL v2"); 599MODULE_DESCRIPTION("Intel Skylake IPC driver");