qcom_wcnss.c (16316B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Qualcomm Wireless Connectivity Subsystem Peripheral Image Loader 4 * 5 * Copyright (C) 2016 Linaro Ltd 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10#include <linux/clk.h> 11#include <linux/delay.h> 12#include <linux/firmware.h> 13#include <linux/interrupt.h> 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/io.h> 17#include <linux/of_address.h> 18#include <linux/of_device.h> 19#include <linux/platform_device.h> 20#include <linux/pm_domain.h> 21#include <linux/pm_runtime.h> 22#include <linux/qcom_scm.h> 23#include <linux/regulator/consumer.h> 24#include <linux/remoteproc.h> 25#include <linux/soc/qcom/mdt_loader.h> 26#include <linux/soc/qcom/smem.h> 27#include <linux/soc/qcom/smem_state.h> 28 29#include "qcom_common.h" 30#include "remoteproc_internal.h" 31#include "qcom_pil_info.h" 32#include "qcom_wcnss.h" 33 34#define WCNSS_CRASH_REASON_SMEM 422 35#define WCNSS_FIRMWARE_NAME "wcnss.mdt" 36#define WCNSS_PAS_ID 6 37#define WCNSS_SSCTL_ID 0x13 38 39#define WCNSS_SPARE_NVBIN_DLND BIT(25) 40 41#define WCNSS_PMU_IRIS_XO_CFG BIT(3) 42#define WCNSS_PMU_IRIS_XO_EN BIT(4) 43#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5) 44#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */ 45 46#define WCNSS_PMU_IRIS_RESET BIT(7) 47#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */ 48#define WCNSS_PMU_IRIS_XO_READ BIT(9) 49#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10) 50 51#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1) 52#define WCNSS_PMU_XO_MODE_19p2 0 53#define WCNSS_PMU_XO_MODE_48 3 54 55#define WCNSS_MAX_PDS 2 56 57struct wcnss_data { 58 size_t pmu_offset; 59 size_t spare_offset; 60 61 const char *pd_names[WCNSS_MAX_PDS]; 62 const struct wcnss_vreg_info *vregs; 63 size_t num_vregs, num_pd_vregs; 64}; 65 66struct qcom_wcnss { 67 struct device *dev; 68 struct rproc *rproc; 69 70 void __iomem *pmu_cfg; 71 void __iomem *spare_out; 72 73 bool use_48mhz_xo; 74 75 int wdog_irq; 76 int fatal_irq; 77 int ready_irq; 78 int handover_irq; 79 int stop_ack_irq; 80 81 struct qcom_smem_state *state; 82 unsigned stop_bit; 83 84 struct mutex iris_lock; 85 struct qcom_iris *iris; 86 87 struct device *pds[WCNSS_MAX_PDS]; 88 size_t num_pds; 89 struct regulator_bulk_data *vregs; 90 size_t num_vregs; 91 92 struct completion start_done; 93 struct completion stop_done; 94 95 phys_addr_t mem_phys; 96 phys_addr_t mem_reloc; 97 void *mem_region; 98 size_t mem_size; 99 100 struct qcom_rproc_subdev smd_subdev; 101 struct qcom_sysmon *sysmon; 102}; 103 104static const struct wcnss_data riva_data = { 105 .pmu_offset = 0x28, 106 .spare_offset = 0xb4, 107 108 .vregs = (struct wcnss_vreg_info[]) { 109 { "vddmx", 1050000, 1150000, 0 }, 110 { "vddcx", 1050000, 1150000, 0 }, 111 { "vddpx", 1800000, 1800000, 0 }, 112 }, 113 .num_vregs = 3, 114}; 115 116static const struct wcnss_data pronto_v1_data = { 117 .pmu_offset = 0x1004, 118 .spare_offset = 0x1088, 119 120 .pd_names = { "mx", "cx" }, 121 .vregs = (struct wcnss_vreg_info[]) { 122 { "vddmx", 950000, 1150000, 0 }, 123 { "vddcx", .super_turbo = true}, 124 { "vddpx", 1800000, 1800000, 0 }, 125 }, 126 .num_pd_vregs = 2, 127 .num_vregs = 1, 128}; 129 130static const struct wcnss_data pronto_v2_data = { 131 .pmu_offset = 0x1004, 132 .spare_offset = 0x1088, 133 134 .pd_names = { "mx", "cx" }, 135 .vregs = (struct wcnss_vreg_info[]) { 136 { "vddmx", 1287500, 1287500, 0 }, 137 { "vddcx", .super_turbo = true }, 138 { "vddpx", 1800000, 1800000, 0 }, 139 }, 140 .num_pd_vregs = 2, 141 .num_vregs = 1, 142}; 143 144static int wcnss_load(struct rproc *rproc, const struct firmware *fw) 145{ 146 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; 147 int ret; 148 149 ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID, 150 wcnss->mem_region, wcnss->mem_phys, 151 wcnss->mem_size, &wcnss->mem_reloc); 152 if (ret) 153 return ret; 154 155 qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size); 156 157 return 0; 158} 159 160static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss) 161{ 162 u32 val; 163 164 /* Indicate NV download capability */ 165 val = readl(wcnss->spare_out); 166 val |= WCNSS_SPARE_NVBIN_DLND; 167 writel(val, wcnss->spare_out); 168} 169 170static void wcnss_configure_iris(struct qcom_wcnss *wcnss) 171{ 172 u32 val; 173 174 /* Clear PMU cfg register */ 175 writel(0, wcnss->pmu_cfg); 176 177 val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN; 178 writel(val, wcnss->pmu_cfg); 179 180 /* Clear XO_MODE */ 181 val &= ~WCNSS_PMU_XO_MODE_MASK; 182 if (wcnss->use_48mhz_xo) 183 val |= WCNSS_PMU_XO_MODE_48 << 1; 184 else 185 val |= WCNSS_PMU_XO_MODE_19p2 << 1; 186 writel(val, wcnss->pmu_cfg); 187 188 /* Reset IRIS */ 189 val |= WCNSS_PMU_IRIS_RESET; 190 writel(val, wcnss->pmu_cfg); 191 192 /* Wait for PMU.iris_reg_reset_sts */ 193 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS) 194 cpu_relax(); 195 196 /* Clear IRIS reset */ 197 val &= ~WCNSS_PMU_IRIS_RESET; 198 writel(val, wcnss->pmu_cfg); 199 200 /* Start IRIS XO configuration */ 201 val |= WCNSS_PMU_IRIS_XO_CFG; 202 writel(val, wcnss->pmu_cfg); 203 204 /* Wait for XO configuration to finish */ 205 while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS) 206 cpu_relax(); 207 208 /* Stop IRIS XO configuration */ 209 val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP; 210 val &= ~WCNSS_PMU_IRIS_XO_CFG; 211 writel(val, wcnss->pmu_cfg); 212 213 /* Add some delay for XO to settle */ 214 msleep(20); 215} 216 217static int wcnss_start(struct rproc *rproc) 218{ 219 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; 220 int ret, i; 221 222 mutex_lock(&wcnss->iris_lock); 223 if (!wcnss->iris) { 224 dev_err(wcnss->dev, "no iris registered\n"); 225 ret = -EINVAL; 226 goto release_iris_lock; 227 } 228 229 for (i = 0; i < wcnss->num_pds; i++) { 230 dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX); 231 ret = pm_runtime_get_sync(wcnss->pds[i]); 232 if (ret < 0) { 233 pm_runtime_put_noidle(wcnss->pds[i]); 234 goto disable_pds; 235 } 236 } 237 238 ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs); 239 if (ret) 240 goto disable_pds; 241 242 ret = qcom_iris_enable(wcnss->iris); 243 if (ret) 244 goto disable_regulators; 245 246 wcnss_indicate_nv_download(wcnss); 247 wcnss_configure_iris(wcnss); 248 249 ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID); 250 if (ret) { 251 dev_err(wcnss->dev, 252 "failed to authenticate image and release reset\n"); 253 goto disable_iris; 254 } 255 256 ret = wait_for_completion_timeout(&wcnss->start_done, 257 msecs_to_jiffies(5000)); 258 if (wcnss->ready_irq > 0 && ret == 0) { 259 /* We have a ready_irq, but it didn't fire in time. */ 260 dev_err(wcnss->dev, "start timed out\n"); 261 qcom_scm_pas_shutdown(WCNSS_PAS_ID); 262 ret = -ETIMEDOUT; 263 goto disable_iris; 264 } 265 266 ret = 0; 267 268disable_iris: 269 qcom_iris_disable(wcnss->iris); 270disable_regulators: 271 regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs); 272disable_pds: 273 for (i--; i >= 0; i--) { 274 pm_runtime_put(wcnss->pds[i]); 275 dev_pm_genpd_set_performance_state(wcnss->pds[i], 0); 276 } 277release_iris_lock: 278 mutex_unlock(&wcnss->iris_lock); 279 280 return ret; 281} 282 283static int wcnss_stop(struct rproc *rproc) 284{ 285 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; 286 int ret; 287 288 if (wcnss->state) { 289 qcom_smem_state_update_bits(wcnss->state, 290 BIT(wcnss->stop_bit), 291 BIT(wcnss->stop_bit)); 292 293 ret = wait_for_completion_timeout(&wcnss->stop_done, 294 msecs_to_jiffies(5000)); 295 if (ret == 0) 296 dev_err(wcnss->dev, "timed out on wait\n"); 297 298 qcom_smem_state_update_bits(wcnss->state, 299 BIT(wcnss->stop_bit), 300 0); 301 } 302 303 ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID); 304 if (ret) 305 dev_err(wcnss->dev, "failed to shutdown: %d\n", ret); 306 307 return ret; 308} 309 310static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem) 311{ 312 struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; 313 int offset; 314 315 offset = da - wcnss->mem_reloc; 316 if (offset < 0 || offset + len > wcnss->mem_size) 317 return NULL; 318 319 return wcnss->mem_region + offset; 320} 321 322static const struct rproc_ops wcnss_ops = { 323 .start = wcnss_start, 324 .stop = wcnss_stop, 325 .da_to_va = wcnss_da_to_va, 326 .parse_fw = qcom_register_dump_segments, 327 .load = wcnss_load, 328}; 329 330static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev) 331{ 332 struct qcom_wcnss *wcnss = dev; 333 334 rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG); 335 336 return IRQ_HANDLED; 337} 338 339static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev) 340{ 341 struct qcom_wcnss *wcnss = dev; 342 size_t len; 343 char *msg; 344 345 msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len); 346 if (!IS_ERR(msg) && len > 0 && msg[0]) 347 dev_err(wcnss->dev, "fatal error received: %s\n", msg); 348 349 rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR); 350 351 return IRQ_HANDLED; 352} 353 354static irqreturn_t wcnss_ready_interrupt(int irq, void *dev) 355{ 356 struct qcom_wcnss *wcnss = dev; 357 358 complete(&wcnss->start_done); 359 360 return IRQ_HANDLED; 361} 362 363static irqreturn_t wcnss_handover_interrupt(int irq, void *dev) 364{ 365 /* 366 * XXX: At this point we're supposed to release the resources that we 367 * have been holding on behalf of the WCNSS. Unfortunately this 368 * interrupt comes way before the other side seems to be done. 369 * 370 * So we're currently relying on the ready interrupt firing later then 371 * this and we just disable the resources at the end of wcnss_start(). 372 */ 373 374 return IRQ_HANDLED; 375} 376 377static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) 378{ 379 struct qcom_wcnss *wcnss = dev; 380 381 complete(&wcnss->stop_done); 382 383 return IRQ_HANDLED; 384} 385 386static int wcnss_init_pds(struct qcom_wcnss *wcnss, 387 const char * const pd_names[WCNSS_MAX_PDS]) 388{ 389 int i, ret; 390 391 for (i = 0; i < WCNSS_MAX_PDS; i++) { 392 if (!pd_names[i]) 393 break; 394 395 wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]); 396 if (IS_ERR_OR_NULL(wcnss->pds[i])) { 397 ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA; 398 for (i--; i >= 0; i--) 399 dev_pm_domain_detach(wcnss->pds[i], false); 400 return ret; 401 } 402 } 403 wcnss->num_pds = i; 404 405 return 0; 406} 407 408static void wcnss_release_pds(struct qcom_wcnss *wcnss) 409{ 410 int i; 411 412 for (i = 0; i < wcnss->num_pds; i++) 413 dev_pm_domain_detach(wcnss->pds[i], false); 414} 415 416static int wcnss_init_regulators(struct qcom_wcnss *wcnss, 417 const struct wcnss_vreg_info *info, 418 int num_vregs, int num_pd_vregs) 419{ 420 struct regulator_bulk_data *bulk; 421 int ret; 422 int i; 423 424 /* 425 * If attaching the power domains suceeded we can skip requesting 426 * the regulators for the power domains. For old device trees we need to 427 * reserve extra space to manage them through the regulator interface. 428 */ 429 if (wcnss->num_pds) 430 info += num_pd_vregs; 431 else 432 num_vregs += num_pd_vregs; 433 434 bulk = devm_kcalloc(wcnss->dev, 435 num_vregs, sizeof(struct regulator_bulk_data), 436 GFP_KERNEL); 437 if (!bulk) 438 return -ENOMEM; 439 440 for (i = 0; i < num_vregs; i++) 441 bulk[i].supply = info[i].name; 442 443 ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk); 444 if (ret) 445 return ret; 446 447 for (i = 0; i < num_vregs; i++) { 448 if (info[i].max_voltage) 449 regulator_set_voltage(bulk[i].consumer, 450 info[i].min_voltage, 451 info[i].max_voltage); 452 453 if (info[i].load_uA) 454 regulator_set_load(bulk[i].consumer, info[i].load_uA); 455 } 456 457 wcnss->vregs = bulk; 458 wcnss->num_vregs = num_vregs; 459 460 return 0; 461} 462 463static int wcnss_request_irq(struct qcom_wcnss *wcnss, 464 struct platform_device *pdev, 465 const char *name, 466 bool optional, 467 irq_handler_t thread_fn) 468{ 469 int ret; 470 471 ret = platform_get_irq_byname(pdev, name); 472 if (ret < 0 && optional) { 473 dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name); 474 return 0; 475 } else if (ret < 0) { 476 dev_err(&pdev->dev, "no %s IRQ defined\n", name); 477 return ret; 478 } 479 480 ret = devm_request_threaded_irq(&pdev->dev, ret, 481 NULL, thread_fn, 482 IRQF_TRIGGER_RISING | IRQF_ONESHOT, 483 "wcnss", wcnss); 484 if (ret) 485 dev_err(&pdev->dev, "request %s IRQ failed\n", name); 486 487 return ret; 488} 489 490static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss) 491{ 492 struct device_node *node; 493 struct resource r; 494 int ret; 495 496 node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0); 497 if (!node) { 498 dev_err(wcnss->dev, "no memory-region specified\n"); 499 return -EINVAL; 500 } 501 502 ret = of_address_to_resource(node, 0, &r); 503 of_node_put(node); 504 if (ret) 505 return ret; 506 507 wcnss->mem_phys = wcnss->mem_reloc = r.start; 508 wcnss->mem_size = resource_size(&r); 509 wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size); 510 if (!wcnss->mem_region) { 511 dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n", 512 &r.start, wcnss->mem_size); 513 return -EBUSY; 514 } 515 516 return 0; 517} 518 519static int wcnss_probe(struct platform_device *pdev) 520{ 521 const char *fw_name = WCNSS_FIRMWARE_NAME; 522 const struct wcnss_data *data; 523 struct qcom_wcnss *wcnss; 524 struct resource *res; 525 struct rproc *rproc; 526 void __iomem *mmio; 527 int ret; 528 529 data = of_device_get_match_data(&pdev->dev); 530 531 if (!qcom_scm_is_available()) 532 return -EPROBE_DEFER; 533 534 if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) { 535 dev_err(&pdev->dev, "PAS is not available for WCNSS\n"); 536 return -ENXIO; 537 } 538 539 ret = of_property_read_string(pdev->dev.of_node, "firmware-name", 540 &fw_name); 541 if (ret < 0 && ret != -EINVAL) 542 return ret; 543 544 rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops, 545 fw_name, sizeof(*wcnss)); 546 if (!rproc) { 547 dev_err(&pdev->dev, "unable to allocate remoteproc\n"); 548 return -ENOMEM; 549 } 550 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 551 552 wcnss = (struct qcom_wcnss *)rproc->priv; 553 wcnss->dev = &pdev->dev; 554 wcnss->rproc = rproc; 555 platform_set_drvdata(pdev, wcnss); 556 557 init_completion(&wcnss->start_done); 558 init_completion(&wcnss->stop_done); 559 560 mutex_init(&wcnss->iris_lock); 561 562 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu"); 563 mmio = devm_ioremap_resource(&pdev->dev, res); 564 if (IS_ERR(mmio)) { 565 ret = PTR_ERR(mmio); 566 goto free_rproc; 567 } 568 569 ret = wcnss_alloc_memory_region(wcnss); 570 if (ret) 571 goto free_rproc; 572 573 wcnss->pmu_cfg = mmio + data->pmu_offset; 574 wcnss->spare_out = mmio + data->spare_offset; 575 576 /* 577 * We might need to fallback to regulators instead of power domains 578 * for old device trees. Don't report an error in that case. 579 */ 580 ret = wcnss_init_pds(wcnss, data->pd_names); 581 if (ret && (ret != -ENODATA || !data->num_pd_vregs)) 582 goto free_rproc; 583 584 ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs, 585 data->num_pd_vregs); 586 if (ret) 587 goto detach_pds; 588 589 ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt); 590 if (ret < 0) 591 goto detach_pds; 592 wcnss->wdog_irq = ret; 593 594 ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt); 595 if (ret < 0) 596 goto detach_pds; 597 wcnss->fatal_irq = ret; 598 599 ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt); 600 if (ret < 0) 601 goto detach_pds; 602 wcnss->ready_irq = ret; 603 604 ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt); 605 if (ret < 0) 606 goto detach_pds; 607 wcnss->handover_irq = ret; 608 609 ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt); 610 if (ret < 0) 611 goto detach_pds; 612 wcnss->stop_ack_irq = ret; 613 614 if (wcnss->stop_ack_irq) { 615 wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop", 616 &wcnss->stop_bit); 617 if (IS_ERR(wcnss->state)) { 618 ret = PTR_ERR(wcnss->state); 619 goto detach_pds; 620 } 621 } 622 623 qcom_add_smd_subdev(rproc, &wcnss->smd_subdev); 624 wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID); 625 if (IS_ERR(wcnss->sysmon)) { 626 ret = PTR_ERR(wcnss->sysmon); 627 goto detach_pds; 628 } 629 630 wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo); 631 if (IS_ERR(wcnss->iris)) { 632 ret = PTR_ERR(wcnss->iris); 633 goto detach_pds; 634 } 635 636 ret = rproc_add(rproc); 637 if (ret) 638 goto remove_iris; 639 640 return 0; 641 642remove_iris: 643 qcom_iris_remove(wcnss->iris); 644detach_pds: 645 wcnss_release_pds(wcnss); 646free_rproc: 647 rproc_free(rproc); 648 649 return ret; 650} 651 652static int wcnss_remove(struct platform_device *pdev) 653{ 654 struct qcom_wcnss *wcnss = platform_get_drvdata(pdev); 655 656 qcom_iris_remove(wcnss->iris); 657 658 rproc_del(wcnss->rproc); 659 660 qcom_remove_sysmon_subdev(wcnss->sysmon); 661 qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev); 662 wcnss_release_pds(wcnss); 663 rproc_free(wcnss->rproc); 664 665 return 0; 666} 667 668static const struct of_device_id wcnss_of_match[] = { 669 { .compatible = "qcom,riva-pil", &riva_data }, 670 { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data }, 671 { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data }, 672 { }, 673}; 674MODULE_DEVICE_TABLE(of, wcnss_of_match); 675 676static struct platform_driver wcnss_driver = { 677 .probe = wcnss_probe, 678 .remove = wcnss_remove, 679 .driver = { 680 .name = "qcom-wcnss-pil", 681 .of_match_table = wcnss_of_match, 682 }, 683}; 684 685module_platform_driver(wcnss_driver); 686 687MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem"); 688MODULE_LICENSE("GPL v2");