ipc.c (16070B)
1// SPDX-License-Identifier: GPL-2.0-only 2// 3// Copyright(c) 2021-2022 Intel Corporation. All rights reserved. 4// 5// Authors: Cezary Rojewski <cezary.rojewski@intel.com> 6// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com> 7// 8 9#include <linux/io-64-nonatomic-lo-hi.h> 10#include <linux/slab.h> 11#include <sound/hdaudio_ext.h> 12#include "avs.h" 13#include "messages.h" 14#include "registers.h" 15#include "trace.h" 16 17#define AVS_IPC_TIMEOUT_MS 300 18#define AVS_D0IX_DELAY_MS 300 19 20static int 21avs_dsp_set_d0ix(struct avs_dev *adev, bool enable) 22{ 23 struct avs_ipc *ipc = adev->ipc; 24 int ret; 25 26 /* Is transition required? */ 27 if (ipc->in_d0ix == enable) 28 return 0; 29 30 ret = avs_dsp_op(adev, set_d0ix, enable); 31 if (ret) { 32 /* Prevent further d0ix attempts on conscious IPC failure. */ 33 if (ret == -AVS_EIPC) 34 atomic_inc(&ipc->d0ix_disable_depth); 35 36 ipc->in_d0ix = false; 37 return ret; 38 } 39 40 ipc->in_d0ix = enable; 41 return 0; 42} 43 44static void avs_dsp_schedule_d0ix(struct avs_dev *adev, struct avs_ipc_msg *tx) 45{ 46 if (atomic_read(&adev->ipc->d0ix_disable_depth)) 47 return; 48 49 mod_delayed_work(system_power_efficient_wq, &adev->ipc->d0ix_work, 50 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 51} 52 53static void avs_dsp_d0ix_work(struct work_struct *work) 54{ 55 struct avs_ipc *ipc = container_of(work, struct avs_ipc, d0ix_work.work); 56 57 avs_dsp_set_d0ix(to_avs_dev(ipc->dev), true); 58} 59 60static int avs_dsp_wake_d0i0(struct avs_dev *adev, struct avs_ipc_msg *tx) 61{ 62 struct avs_ipc *ipc = adev->ipc; 63 64 if (!atomic_read(&ipc->d0ix_disable_depth)) { 65 cancel_delayed_work_sync(&ipc->d0ix_work); 66 return avs_dsp_set_d0ix(adev, false); 67 } 68 69 return 0; 70} 71 72int avs_dsp_disable_d0ix(struct avs_dev *adev) 73{ 74 struct avs_ipc *ipc = adev->ipc; 75 76 /* Prevent PG only on the first disable. */ 77 if (atomic_add_return(1, &ipc->d0ix_disable_depth) == 1) { 78 cancel_delayed_work_sync(&ipc->d0ix_work); 79 return avs_dsp_set_d0ix(adev, false); 80 } 81 82 return 0; 83} 84 85int avs_dsp_enable_d0ix(struct avs_dev *adev) 86{ 87 struct avs_ipc *ipc = adev->ipc; 88 89 if (atomic_dec_and_test(&ipc->d0ix_disable_depth)) 90 queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work, 91 msecs_to_jiffies(AVS_D0IX_DELAY_MS)); 92 return 0; 93} 94 95static void avs_dsp_recovery(struct avs_dev *adev) 96{ 97 struct avs_soc_component *acomp; 98 unsigned int core_mask; 99 int ret; 100 101 mutex_lock(&adev->comp_list_mutex); 102 /* disconnect all running streams */ 103 list_for_each_entry(acomp, &adev->comp_list, node) { 104 struct snd_soc_pcm_runtime *rtd; 105 struct snd_soc_card *card; 106 107 card = acomp->base.card; 108 if (!card) 109 continue; 110 111 for_each_card_rtds(card, rtd) { 112 struct snd_pcm *pcm; 113 int dir; 114 115 pcm = rtd->pcm; 116 if (!pcm || rtd->dai_link->no_pcm) 117 continue; 118 119 for_each_pcm_streams(dir) { 120 struct snd_pcm_substream *substream; 121 122 substream = pcm->streams[dir].substream; 123 if (!substream || !substream->runtime) 124 continue; 125 126 snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); 127 } 128 } 129 } 130 mutex_unlock(&adev->comp_list_mutex); 131 132 /* forcibly shutdown all cores */ 133 core_mask = GENMASK(adev->hw_cfg.dsp_cores - 1, 0); 134 avs_dsp_core_disable(adev, core_mask); 135 136 /* attempt dsp reboot */ 137 ret = avs_dsp_boot_firmware(adev, true); 138 if (ret < 0) 139 dev_err(adev->dev, "dsp reboot failed: %d\n", ret); 140 141 pm_runtime_mark_last_busy(adev->dev); 142 pm_runtime_enable(adev->dev); 143 pm_request_autosuspend(adev->dev); 144 145 atomic_set(&adev->ipc->recovering, 0); 146} 147 148static void avs_dsp_recovery_work(struct work_struct *work) 149{ 150 struct avs_ipc *ipc = container_of(work, struct avs_ipc, recovery_work); 151 152 avs_dsp_recovery(to_avs_dev(ipc->dev)); 153} 154 155static void avs_dsp_exception_caught(struct avs_dev *adev, union avs_notify_msg *msg) 156{ 157 struct avs_ipc *ipc = adev->ipc; 158 159 /* Account for the double-exception case. */ 160 ipc->ready = false; 161 162 if (!atomic_add_unless(&ipc->recovering, 1, 1)) { 163 dev_err(adev->dev, "dsp recovery is already in progress\n"); 164 return; 165 } 166 167 dev_crit(adev->dev, "communication severed, rebooting dsp..\n"); 168 169 cancel_delayed_work_sync(&ipc->d0ix_work); 170 ipc->in_d0ix = false; 171 /* Re-enabled on recovery completion. */ 172 pm_runtime_disable(adev->dev); 173 174 /* Process received notification. */ 175 avs_dsp_op(adev, coredump, msg); 176 177 schedule_work(&ipc->recovery_work); 178} 179 180static void avs_dsp_receive_rx(struct avs_dev *adev, u64 header) 181{ 182 struct avs_ipc *ipc = adev->ipc; 183 union avs_reply_msg msg = AVS_MSG(header); 184 u64 reg; 185 186 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 187 trace_avs_ipc_reply_msg(header, reg); 188 189 ipc->rx.header = header; 190 /* Abort copying payload if request processing was unsuccessful. */ 191 if (!msg.status) { 192 /* update size in case of LARGE_CONFIG_GET */ 193 if (msg.msg_target == AVS_MOD_MSG && 194 msg.global_msg_type == AVS_MOD_LARGE_CONFIG_GET) 195 ipc->rx.size = msg.ext.large_config.data_off_size; 196 197 memcpy_fromio(ipc->rx.data, avs_uplink_addr(adev), ipc->rx.size); 198 trace_avs_msg_payload(ipc->rx.data, ipc->rx.size); 199 } 200} 201 202static void avs_dsp_process_notification(struct avs_dev *adev, u64 header) 203{ 204 struct avs_notify_mod_data mod_data; 205 union avs_notify_msg msg = AVS_MSG(header); 206 size_t data_size = 0; 207 void *data = NULL; 208 u64 reg; 209 210 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 211 trace_avs_ipc_notify_msg(header, reg); 212 213 /* Ignore spurious notifications until handshake is established. */ 214 if (!adev->ipc->ready && msg.notify_msg_type != AVS_NOTIFY_FW_READY) { 215 dev_dbg(adev->dev, "FW not ready, skip notification: 0x%08x\n", msg.primary); 216 return; 217 } 218 219 /* Calculate notification payload size. */ 220 switch (msg.notify_msg_type) { 221 case AVS_NOTIFY_FW_READY: 222 break; 223 224 case AVS_NOTIFY_PHRASE_DETECTED: 225 data_size = sizeof(struct avs_notify_voice_data); 226 break; 227 228 case AVS_NOTIFY_RESOURCE_EVENT: 229 data_size = sizeof(struct avs_notify_res_data); 230 break; 231 232 case AVS_NOTIFY_LOG_BUFFER_STATUS: 233 case AVS_NOTIFY_EXCEPTION_CAUGHT: 234 break; 235 236 case AVS_NOTIFY_MODULE_EVENT: 237 /* To know the total payload size, header needs to be read first. */ 238 memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data)); 239 data_size = sizeof(mod_data) + mod_data.data_size; 240 break; 241 242 default: 243 dev_info(adev->dev, "unknown notification: 0x%08x\n", msg.primary); 244 break; 245 } 246 247 if (data_size) { 248 data = kmalloc(data_size, GFP_KERNEL); 249 if (!data) 250 return; 251 252 memcpy_fromio(data, avs_uplink_addr(adev), data_size); 253 trace_avs_msg_payload(data, data_size); 254 } 255 256 /* Perform notification-specific operations. */ 257 switch (msg.notify_msg_type) { 258 case AVS_NOTIFY_FW_READY: 259 dev_dbg(adev->dev, "FW READY 0x%08x\n", msg.primary); 260 adev->ipc->ready = true; 261 complete(&adev->fw_ready); 262 break; 263 264 case AVS_NOTIFY_LOG_BUFFER_STATUS: 265 avs_dsp_op(adev, log_buffer_status, &msg); 266 break; 267 268 case AVS_NOTIFY_EXCEPTION_CAUGHT: 269 avs_dsp_exception_caught(adev, &msg); 270 break; 271 272 default: 273 break; 274 } 275 276 kfree(data); 277} 278 279void avs_dsp_process_response(struct avs_dev *adev, u64 header) 280{ 281 struct avs_ipc *ipc = adev->ipc; 282 283 /* 284 * Response may either be solicited - a reply for a request that has 285 * been sent beforehand - or unsolicited (notification). 286 */ 287 if (avs_msg_is_reply(header)) { 288 /* Response processing is invoked from IRQ thread. */ 289 spin_lock_irq(&ipc->rx_lock); 290 avs_dsp_receive_rx(adev, header); 291 ipc->rx_completed = true; 292 spin_unlock_irq(&ipc->rx_lock); 293 } else { 294 avs_dsp_process_notification(adev, header); 295 } 296 297 complete(&ipc->busy_completion); 298} 299 300irqreturn_t avs_dsp_irq_handler(int irq, void *dev_id) 301{ 302 struct avs_dev *adev = dev_id; 303 struct avs_ipc *ipc = adev->ipc; 304 u32 adspis, hipc_rsp, hipc_ack; 305 irqreturn_t ret = IRQ_NONE; 306 307 adspis = snd_hdac_adsp_readl(adev, AVS_ADSP_REG_ADSPIS); 308 if (adspis == UINT_MAX || !(adspis & AVS_ADSP_ADSPIS_IPC)) 309 return ret; 310 311 hipc_ack = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCIE); 312 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 313 314 /* DSP acked host's request */ 315 if (hipc_ack & SKL_ADSP_HIPCIE_DONE) { 316 /* 317 * As an extra precaution, mask done interrupt. Code executed 318 * due to complete() found below does not assume any masking. 319 */ 320 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 321 AVS_ADSP_HIPCCTL_DONE, 0); 322 323 complete(&ipc->done_completion); 324 325 /* tell DSP it has our attention */ 326 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCIE, 327 SKL_ADSP_HIPCIE_DONE, 328 SKL_ADSP_HIPCIE_DONE); 329 /* unmask done interrupt */ 330 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 331 AVS_ADSP_HIPCCTL_DONE, 332 AVS_ADSP_HIPCCTL_DONE); 333 ret = IRQ_HANDLED; 334 } 335 336 /* DSP sent new response to process */ 337 if (hipc_rsp & SKL_ADSP_HIPCT_BUSY) { 338 /* mask busy interrupt */ 339 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 340 AVS_ADSP_HIPCCTL_BUSY, 0); 341 342 ret = IRQ_WAKE_THREAD; 343 } 344 345 return ret; 346} 347 348irqreturn_t avs_dsp_irq_thread(int irq, void *dev_id) 349{ 350 struct avs_dev *adev = dev_id; 351 union avs_reply_msg msg; 352 u32 hipct, hipcte; 353 354 hipct = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 355 hipcte = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCTE); 356 357 /* ensure DSP sent new response to process */ 358 if (!(hipct & SKL_ADSP_HIPCT_BUSY)) 359 return IRQ_NONE; 360 361 msg.primary = hipct; 362 msg.ext.val = hipcte; 363 avs_dsp_process_response(adev, msg.val); 364 365 /* tell DSP we accepted its message */ 366 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCT, 367 SKL_ADSP_HIPCT_BUSY, SKL_ADSP_HIPCT_BUSY); 368 /* unmask busy interrupt */ 369 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, 370 AVS_ADSP_HIPCCTL_BUSY, AVS_ADSP_HIPCCTL_BUSY); 371 372 return IRQ_HANDLED; 373} 374 375static bool avs_ipc_is_busy(struct avs_ipc *ipc) 376{ 377 struct avs_dev *adev = to_avs_dev(ipc->dev); 378 u32 hipc_rsp; 379 380 hipc_rsp = snd_hdac_adsp_readl(adev, SKL_ADSP_REG_HIPCT); 381 return hipc_rsp & SKL_ADSP_HIPCT_BUSY; 382} 383 384static int avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout) 385{ 386 u32 repeats_left = 128; /* to avoid infinite looping */ 387 int ret; 388 389again: 390 ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout)); 391 392 /* DSP could be unresponsive at this point. */ 393 if (!ipc->ready) 394 return -EPERM; 395 396 if (!ret) { 397 if (!avs_ipc_is_busy(ipc)) 398 return -ETIMEDOUT; 399 /* 400 * Firmware did its job, either notification or reply 401 * has been received - now wait until it's processed. 402 */ 403 wait_for_completion_killable(&ipc->busy_completion); 404 } 405 406 /* Ongoing notification's bottom-half may cause early wakeup */ 407 spin_lock(&ipc->rx_lock); 408 if (!ipc->rx_completed) { 409 if (repeats_left) { 410 /* Reply delayed due to notification. */ 411 repeats_left--; 412 reinit_completion(&ipc->busy_completion); 413 spin_unlock(&ipc->rx_lock); 414 goto again; 415 } 416 417 spin_unlock(&ipc->rx_lock); 418 return -ETIMEDOUT; 419 } 420 421 spin_unlock(&ipc->rx_lock); 422 return 0; 423} 424 425static void avs_ipc_msg_init(struct avs_ipc *ipc, struct avs_ipc_msg *reply) 426{ 427 lockdep_assert_held(&ipc->rx_lock); 428 429 ipc->rx.header = 0; 430 ipc->rx.size = reply ? reply->size : 0; 431 ipc->rx_completed = false; 432 433 reinit_completion(&ipc->done_completion); 434 reinit_completion(&ipc->busy_completion); 435} 436 437static void avs_dsp_send_tx(struct avs_dev *adev, struct avs_ipc_msg *tx, bool read_fwregs) 438{ 439 u64 reg = ULONG_MAX; 440 441 tx->header |= SKL_ADSP_HIPCI_BUSY; 442 if (read_fwregs) 443 reg = readq(avs_sram_addr(adev, AVS_FW_REGS_WINDOW)); 444 445 trace_avs_request(tx, reg); 446 447 if (tx->size) 448 memcpy_toio(avs_downlink_addr(adev), tx->data, tx->size); 449 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCIE, tx->header >> 32); 450 snd_hdac_adsp_writel(adev, SKL_ADSP_REG_HIPCI, tx->header & UINT_MAX); 451} 452 453static int avs_dsp_do_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 454 struct avs_ipc_msg *reply, int timeout) 455{ 456 struct avs_ipc *ipc = adev->ipc; 457 int ret; 458 459 if (!ipc->ready) 460 return -EPERM; 461 462 mutex_lock(&ipc->msg_mutex); 463 464 spin_lock(&ipc->rx_lock); 465 avs_ipc_msg_init(ipc, reply); 466 avs_dsp_send_tx(adev, request, true); 467 spin_unlock(&ipc->rx_lock); 468 469 ret = avs_ipc_wait_busy_completion(ipc, timeout); 470 if (ret) { 471 if (ret == -ETIMEDOUT) { 472 union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT); 473 474 /* Same treatment as on exception, just stack_dump=0. */ 475 avs_dsp_exception_caught(adev, &msg); 476 } 477 goto exit; 478 } 479 480 ret = ipc->rx.rsp.status; 481 if (reply) { 482 reply->header = ipc->rx.header; 483 if (reply->data && ipc->rx.size) 484 memcpy(reply->data, ipc->rx.data, reply->size); 485 } 486 487exit: 488 mutex_unlock(&ipc->msg_mutex); 489 return ret; 490} 491 492static int avs_dsp_send_msg_sequence(struct avs_dev *adev, struct avs_ipc_msg *request, 493 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0, 494 bool schedule_d0ix) 495{ 496 int ret; 497 498 trace_avs_d0ix("wake", wake_d0i0, request->header); 499 if (wake_d0i0) { 500 ret = avs_dsp_wake_d0i0(adev, request); 501 if (ret) 502 return ret; 503 } 504 505 ret = avs_dsp_do_send_msg(adev, request, reply, timeout); 506 if (ret) 507 return ret; 508 509 trace_avs_d0ix("schedule", schedule_d0ix, request->header); 510 if (schedule_d0ix) 511 avs_dsp_schedule_d0ix(adev, request); 512 513 return 0; 514} 515 516int avs_dsp_send_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 517 struct avs_ipc_msg *reply, int timeout) 518{ 519 bool wake_d0i0 = avs_dsp_op(adev, d0ix_toggle, request, true); 520 bool schedule_d0ix = avs_dsp_op(adev, d0ix_toggle, request, false); 521 522 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, schedule_d0ix); 523} 524 525int avs_dsp_send_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 526 struct avs_ipc_msg *reply) 527{ 528 return avs_dsp_send_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms); 529} 530 531int avs_dsp_send_pm_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, 532 struct avs_ipc_msg *reply, int timeout, bool wake_d0i0) 533{ 534 return avs_dsp_send_msg_sequence(adev, request, reply, timeout, wake_d0i0, false); 535} 536 537int avs_dsp_send_pm_msg(struct avs_dev *adev, struct avs_ipc_msg *request, 538 struct avs_ipc_msg *reply, bool wake_d0i0) 539{ 540 return avs_dsp_send_pm_msg_timeout(adev, request, reply, adev->ipc->default_timeout_ms, 541 wake_d0i0); 542} 543 544static int avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) 545{ 546 struct avs_ipc *ipc = adev->ipc; 547 int ret; 548 549 mutex_lock(&ipc->msg_mutex); 550 551 spin_lock(&ipc->rx_lock); 552 avs_ipc_msg_init(ipc, NULL); 553 /* 554 * with hw still stalled, memory windows may not be 555 * configured properly so avoid accessing SRAM 556 */ 557 avs_dsp_send_tx(adev, request, false); 558 spin_unlock(&ipc->rx_lock); 559 560 /* ROM messages must be sent before main core is unstalled */ 561 ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); 562 if (!ret) { 563 ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout)); 564 ret = ret ? 0 : -ETIMEDOUT; 565 } 566 567 mutex_unlock(&ipc->msg_mutex); 568 569 return ret; 570} 571 572int avs_dsp_send_rom_msg_timeout(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout) 573{ 574 return avs_dsp_do_send_rom_msg(adev, request, timeout); 575} 576 577int avs_dsp_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request) 578{ 579 return avs_dsp_send_rom_msg_timeout(adev, request, adev->ipc->default_timeout_ms); 580} 581 582void avs_dsp_interrupt_control(struct avs_dev *adev, bool enable) 583{ 584 u32 value, mask; 585 586 /* 587 * No particular bit setting order. All of these are required 588 * to have a functional SW <-> FW communication. 589 */ 590 value = enable ? AVS_ADSP_ADSPIC_IPC : 0; 591 snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value); 592 593 mask = AVS_ADSP_HIPCCTL_DONE | AVS_ADSP_HIPCCTL_BUSY; 594 value = enable ? mask : 0; 595 snd_hdac_adsp_updatel(adev, SKL_ADSP_REG_HIPCCTL, mask, value); 596} 597 598int avs_ipc_init(struct avs_ipc *ipc, struct device *dev) 599{ 600 ipc->rx.data = devm_kzalloc(dev, AVS_MAILBOX_SIZE, GFP_KERNEL); 601 if (!ipc->rx.data) 602 return -ENOMEM; 603 604 ipc->dev = dev; 605 ipc->ready = false; 606 ipc->default_timeout_ms = AVS_IPC_TIMEOUT_MS; 607 INIT_WORK(&ipc->recovery_work, avs_dsp_recovery_work); 608 INIT_DELAYED_WORK(&ipc->d0ix_work, avs_dsp_d0ix_work); 609 init_completion(&ipc->done_completion); 610 init_completion(&ipc->busy_completion); 611 spin_lock_init(&ipc->rx_lock); 612 mutex_init(&ipc->msg_mutex); 613 614 return 0; 615} 616 617void avs_ipc_block(struct avs_ipc *ipc) 618{ 619 ipc->ready = false; 620 cancel_work_sync(&ipc->recovery_work); 621 cancel_delayed_work_sync(&ipc->d0ix_work); 622 ipc->in_d0ix = false; 623}