cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qcom_q6v5_mss.c (57816B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Qualcomm self-authenticating modem subsystem remoteproc driver
      4 *
      5 * Copyright (C) 2016 Linaro Ltd.
      6 * Copyright (C) 2014 Sony Mobile Communications AB
      7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
      8 */
      9
     10#include <linux/clk.h>
     11#include <linux/delay.h>
     12#include <linux/devcoredump.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/interrupt.h>
     15#include <linux/kernel.h>
     16#include <linux/mfd/syscon.h>
     17#include <linux/module.h>
     18#include <linux/of_address.h>
     19#include <linux/of_device.h>
     20#include <linux/platform_device.h>
     21#include <linux/pm_domain.h>
     22#include <linux/pm_runtime.h>
     23#include <linux/regmap.h>
     24#include <linux/regulator/consumer.h>
     25#include <linux/remoteproc.h>
     26#include <linux/reset.h>
     27#include <linux/soc/qcom/mdt_loader.h>
     28#include <linux/iopoll.h>
     29#include <linux/slab.h>
     30
     31#include "remoteproc_internal.h"
     32#include "qcom_common.h"
     33#include "qcom_pil_info.h"
     34#include "qcom_q6v5.h"
     35
     36#include <linux/qcom_scm.h>
     37
     38#define MPSS_CRASH_REASON_SMEM		421
     39
     40#define MBA_LOG_SIZE			SZ_4K
     41
     42/* RMB Status Register Values */
     43#define RMB_PBL_SUCCESS			0x1
     44
     45#define RMB_MBA_XPU_UNLOCKED		0x1
     46#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED	0x2
     47#define RMB_MBA_META_DATA_AUTH_SUCCESS	0x3
     48#define RMB_MBA_AUTH_COMPLETE		0x4
     49
     50/* PBL/MBA interface registers */
     51#define RMB_MBA_IMAGE_REG		0x00
     52#define RMB_PBL_STATUS_REG		0x04
     53#define RMB_MBA_COMMAND_REG		0x08
     54#define RMB_MBA_STATUS_REG		0x0C
     55#define RMB_PMI_META_DATA_REG		0x10
     56#define RMB_PMI_CODE_START_REG		0x14
     57#define RMB_PMI_CODE_LENGTH_REG		0x18
     58#define RMB_MBA_MSS_STATUS		0x40
     59#define RMB_MBA_ALT_RESET		0x44
     60
     61#define RMB_CMD_META_DATA_READY		0x1
     62#define RMB_CMD_LOAD_READY		0x2
     63
     64/* QDSP6SS Register Offsets */
     65#define QDSP6SS_RESET_REG		0x014
     66#define QDSP6SS_GFMUX_CTL_REG		0x020
     67#define QDSP6SS_PWR_CTL_REG		0x030
     68#define QDSP6SS_MEM_PWR_CTL		0x0B0
     69#define QDSP6V6SS_MEM_PWR_CTL		0x034
     70#define QDSP6SS_STRAP_ACC		0x110
     71
     72/* AXI Halt Register Offsets */
     73#define AXI_HALTREQ_REG			0x0
     74#define AXI_HALTACK_REG			0x4
     75#define AXI_IDLE_REG			0x8
     76#define AXI_GATING_VALID_OVERRIDE	BIT(0)
     77
     78#define HALT_ACK_TIMEOUT_US		100000
     79
     80/* QACCEPT Register Offsets */
     81#define QACCEPT_ACCEPT_REG		0x0
     82#define QACCEPT_ACTIVE_REG		0x4
     83#define QACCEPT_DENY_REG		0x8
     84#define QACCEPT_REQ_REG			0xC
     85
     86#define QACCEPT_TIMEOUT_US		50
     87
     88/* QDSP6SS_RESET */
     89#define Q6SS_STOP_CORE			BIT(0)
     90#define Q6SS_CORE_ARES			BIT(1)
     91#define Q6SS_BUS_ARES_ENABLE		BIT(2)
     92
     93/* QDSP6SS CBCR */
     94#define Q6SS_CBCR_CLKEN			BIT(0)
     95#define Q6SS_CBCR_CLKOFF		BIT(31)
     96#define Q6SS_CBCR_TIMEOUT_US		200
     97
     98/* QDSP6SS_GFMUX_CTL */
     99#define Q6SS_CLK_ENABLE			BIT(1)
    100
    101/* QDSP6SS_PWR_CTL */
    102#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
    103#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
    104#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
    105#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
    106#define Q6SS_ETB_SLP_NRET_N		BIT(17)
    107#define Q6SS_L2DATA_STBY_N		BIT(18)
    108#define Q6SS_SLP_RET_N			BIT(19)
    109#define Q6SS_CLAMP_IO			BIT(20)
    110#define QDSS_BHS_ON			BIT(21)
    111#define QDSS_LDO_BYP			BIT(22)
    112
    113/* QDSP6v56 parameters */
    114#define QDSP6v56_LDO_BYP		BIT(25)
    115#define QDSP6v56_BHS_ON		BIT(24)
    116#define QDSP6v56_CLAMP_WL		BIT(21)
    117#define QDSP6v56_CLAMP_QMC_MEM		BIT(22)
    118#define QDSP6SS_XO_CBCR		0x0038
    119#define QDSP6SS_ACC_OVERRIDE_VAL		0x20
    120
    121/* QDSP6v65 parameters */
    122#define QDSP6SS_CORE_CBCR		0x20
    123#define QDSP6SS_SLEEP                   0x3C
    124#define QDSP6SS_BOOT_CORE_START         0x400
    125#define QDSP6SS_BOOT_CMD                0x404
    126#define BOOT_FSM_TIMEOUT                10000
    127
    128struct reg_info {
    129	struct regulator *reg;
    130	int uV;
    131	int uA;
    132};
    133
    134struct qcom_mss_reg_res {
    135	const char *supply;
    136	int uV;
    137	int uA;
    138};
    139
    140struct rproc_hexagon_res {
    141	const char *hexagon_mba_image;
    142	struct qcom_mss_reg_res *proxy_supply;
    143	struct qcom_mss_reg_res *fallback_proxy_supply;
    144	struct qcom_mss_reg_res *active_supply;
    145	char **proxy_clk_names;
    146	char **reset_clk_names;
    147	char **active_clk_names;
    148	char **proxy_pd_names;
    149	int version;
    150	bool need_mem_protection;
    151	bool has_alt_reset;
    152	bool has_mba_logs;
    153	bool has_spare_reg;
    154	bool has_qaccept_regs;
    155	bool has_ext_cntl_regs;
    156	bool has_vq6;
    157};
    158
    159struct q6v5 {
    160	struct device *dev;
    161	struct rproc *rproc;
    162
    163	void __iomem *reg_base;
    164	void __iomem *rmb_base;
    165
    166	struct regmap *halt_map;
    167	struct regmap *conn_map;
    168
    169	u32 halt_q6;
    170	u32 halt_modem;
    171	u32 halt_nc;
    172	u32 halt_vq6;
    173	u32 conn_box;
    174
    175	u32 qaccept_mdm;
    176	u32 qaccept_cx;
    177	u32 qaccept_axi;
    178
    179	u32 axim1_clk_off;
    180	u32 crypto_clk_off;
    181	u32 force_clk_on;
    182	u32 rscc_disable;
    183
    184	struct reset_control *mss_restart;
    185	struct reset_control *pdc_reset;
    186
    187	struct qcom_q6v5 q6v5;
    188
    189	struct clk *active_clks[8];
    190	struct clk *reset_clks[4];
    191	struct clk *proxy_clks[4];
    192	struct device *proxy_pds[3];
    193	int active_clk_count;
    194	int reset_clk_count;
    195	int proxy_clk_count;
    196	int proxy_pd_count;
    197
    198	struct reg_info active_regs[1];
    199	struct reg_info proxy_regs[1];
    200	struct reg_info fallback_proxy_regs[2];
    201	int active_reg_count;
    202	int proxy_reg_count;
    203	int fallback_proxy_reg_count;
    204
    205	bool dump_mba_loaded;
    206	size_t current_dump_size;
    207	size_t total_dump_size;
    208
    209	phys_addr_t mba_phys;
    210	size_t mba_size;
    211	size_t dp_size;
    212
    213	phys_addr_t mpss_phys;
    214	phys_addr_t mpss_reloc;
    215	size_t mpss_size;
    216
    217	struct qcom_rproc_glink glink_subdev;
    218	struct qcom_rproc_subdev smd_subdev;
    219	struct qcom_rproc_ssr ssr_subdev;
    220	struct qcom_sysmon *sysmon;
    221	struct platform_device *bam_dmux;
    222	bool need_mem_protection;
    223	bool has_alt_reset;
    224	bool has_mba_logs;
    225	bool has_spare_reg;
    226	bool has_qaccept_regs;
    227	bool has_ext_cntl_regs;
    228	bool has_vq6;
    229	int mpss_perm;
    230	int mba_perm;
    231	const char *hexagon_mdt_image;
    232	int version;
    233};
    234
    235enum {
    236	MSS_MSM8916,
    237	MSS_MSM8974,
    238	MSS_MSM8996,
    239	MSS_MSM8998,
    240	MSS_SC7180,
    241	MSS_SC7280,
    242	MSS_SDM845,
    243};
    244
    245static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
    246			       const struct qcom_mss_reg_res *reg_res)
    247{
    248	int rc;
    249	int i;
    250
    251	if (!reg_res)
    252		return 0;
    253
    254	for (i = 0; reg_res[i].supply; i++) {
    255		regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
    256		if (IS_ERR(regs[i].reg)) {
    257			rc = PTR_ERR(regs[i].reg);
    258			if (rc != -EPROBE_DEFER)
    259				dev_err(dev, "Failed to get %s\n regulator",
    260					reg_res[i].supply);
    261			return rc;
    262		}
    263
    264		regs[i].uV = reg_res[i].uV;
    265		regs[i].uA = reg_res[i].uA;
    266	}
    267
    268	return i;
    269}
    270
    271static int q6v5_regulator_enable(struct q6v5 *qproc,
    272				 struct reg_info *regs, int count)
    273{
    274	int ret;
    275	int i;
    276
    277	for (i = 0; i < count; i++) {
    278		if (regs[i].uV > 0) {
    279			ret = regulator_set_voltage(regs[i].reg,
    280					regs[i].uV, INT_MAX);
    281			if (ret) {
    282				dev_err(qproc->dev,
    283					"Failed to request voltage for %d.\n",
    284						i);
    285				goto err;
    286			}
    287		}
    288
    289		if (regs[i].uA > 0) {
    290			ret = regulator_set_load(regs[i].reg,
    291						 regs[i].uA);
    292			if (ret < 0) {
    293				dev_err(qproc->dev,
    294					"Failed to set regulator mode\n");
    295				goto err;
    296			}
    297		}
    298
    299		ret = regulator_enable(regs[i].reg);
    300		if (ret) {
    301			dev_err(qproc->dev, "Regulator enable failed\n");
    302			goto err;
    303		}
    304	}
    305
    306	return 0;
    307err:
    308	for (; i >= 0; i--) {
    309		if (regs[i].uV > 0)
    310			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
    311
    312		if (regs[i].uA > 0)
    313			regulator_set_load(regs[i].reg, 0);
    314
    315		regulator_disable(regs[i].reg);
    316	}
    317
    318	return ret;
    319}
    320
    321static void q6v5_regulator_disable(struct q6v5 *qproc,
    322				   struct reg_info *regs, int count)
    323{
    324	int i;
    325
    326	for (i = 0; i < count; i++) {
    327		if (regs[i].uV > 0)
    328			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
    329
    330		if (regs[i].uA > 0)
    331			regulator_set_load(regs[i].reg, 0);
    332
    333		regulator_disable(regs[i].reg);
    334	}
    335}
    336
    337static int q6v5_clk_enable(struct device *dev,
    338			   struct clk **clks, int count)
    339{
    340	int rc;
    341	int i;
    342
    343	for (i = 0; i < count; i++) {
    344		rc = clk_prepare_enable(clks[i]);
    345		if (rc) {
    346			dev_err(dev, "Clock enable failed\n");
    347			goto err;
    348		}
    349	}
    350
    351	return 0;
    352err:
    353	for (i--; i >= 0; i--)
    354		clk_disable_unprepare(clks[i]);
    355
    356	return rc;
    357}
    358
    359static void q6v5_clk_disable(struct device *dev,
    360			     struct clk **clks, int count)
    361{
    362	int i;
    363
    364	for (i = 0; i < count; i++)
    365		clk_disable_unprepare(clks[i]);
    366}
    367
    368static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
    369			   size_t pd_count)
    370{
    371	int ret;
    372	int i;
    373
    374	for (i = 0; i < pd_count; i++) {
    375		dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
    376		ret = pm_runtime_get_sync(pds[i]);
    377		if (ret < 0) {
    378			pm_runtime_put_noidle(pds[i]);
    379			dev_pm_genpd_set_performance_state(pds[i], 0);
    380			goto unroll_pd_votes;
    381		}
    382	}
    383
    384	return 0;
    385
    386unroll_pd_votes:
    387	for (i--; i >= 0; i--) {
    388		dev_pm_genpd_set_performance_state(pds[i], 0);
    389		pm_runtime_put(pds[i]);
    390	}
    391
    392	return ret;
    393}
    394
    395static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
    396			     size_t pd_count)
    397{
    398	int i;
    399
    400	for (i = 0; i < pd_count; i++) {
    401		dev_pm_genpd_set_performance_state(pds[i], 0);
    402		pm_runtime_put(pds[i]);
    403	}
    404}
    405
    406static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
    407				   bool local, bool remote, phys_addr_t addr,
    408				   size_t size)
    409{
    410	struct qcom_scm_vmperm next[2];
    411	int perms = 0;
    412
    413	if (!qproc->need_mem_protection)
    414		return 0;
    415
    416	if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
    417	    remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
    418		return 0;
    419
    420	if (local) {
    421		next[perms].vmid = QCOM_SCM_VMID_HLOS;
    422		next[perms].perm = QCOM_SCM_PERM_RWX;
    423		perms++;
    424	}
    425
    426	if (remote) {
    427		next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
    428		next[perms].perm = QCOM_SCM_PERM_RW;
    429		perms++;
    430	}
    431
    432	return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
    433				   current_perm, next, perms);
    434}
    435
    436static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
    437{
    438	const struct firmware *dp_fw;
    439
    440	if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
    441		return;
    442
    443	if (SZ_1M + dp_fw->size <= qproc->mba_size) {
    444		memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
    445		qproc->dp_size = dp_fw->size;
    446	}
    447
    448	release_firmware(dp_fw);
    449}
    450
    451static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
    452{
    453	struct q6v5 *qproc = rproc->priv;
    454	void *mba_region;
    455
    456	/* MBA is restricted to a maximum size of 1M */
    457	if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
    458		dev_err(qproc->dev, "MBA firmware load failed\n");
    459		return -EINVAL;
    460	}
    461
    462	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
    463	if (!mba_region) {
    464		dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
    465			&qproc->mba_phys, qproc->mba_size);
    466		return -EBUSY;
    467	}
    468
    469	memcpy(mba_region, fw->data, fw->size);
    470	q6v5_debug_policy_load(qproc, mba_region);
    471	memunmap(mba_region);
    472
    473	return 0;
    474}
    475
    476static int q6v5_reset_assert(struct q6v5 *qproc)
    477{
    478	int ret;
    479
    480	if (qproc->has_alt_reset) {
    481		reset_control_assert(qproc->pdc_reset);
    482		ret = reset_control_reset(qproc->mss_restart);
    483		reset_control_deassert(qproc->pdc_reset);
    484	} else if (qproc->has_spare_reg) {
    485		/*
    486		 * When the AXI pipeline is being reset with the Q6 modem partly
    487		 * operational there is possibility of AXI valid signal to
    488		 * glitch, leading to spurious transactions and Q6 hangs. A work
    489		 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
    490		 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
    491		 * is withdrawn post MSS assert followed by a MSS deassert,
    492		 * while holding the PDC reset.
    493		 */
    494		reset_control_assert(qproc->pdc_reset);
    495		regmap_update_bits(qproc->conn_map, qproc->conn_box,
    496				   AXI_GATING_VALID_OVERRIDE, 1);
    497		reset_control_assert(qproc->mss_restart);
    498		reset_control_deassert(qproc->pdc_reset);
    499		regmap_update_bits(qproc->conn_map, qproc->conn_box,
    500				   AXI_GATING_VALID_OVERRIDE, 0);
    501		ret = reset_control_deassert(qproc->mss_restart);
    502	} else if (qproc->has_ext_cntl_regs) {
    503		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
    504		reset_control_assert(qproc->pdc_reset);
    505		reset_control_assert(qproc->mss_restart);
    506		reset_control_deassert(qproc->pdc_reset);
    507		ret = reset_control_deassert(qproc->mss_restart);
    508	} else {
    509		ret = reset_control_assert(qproc->mss_restart);
    510	}
    511
    512	return ret;
    513}
    514
    515static int q6v5_reset_deassert(struct q6v5 *qproc)
    516{
    517	int ret;
    518
    519	if (qproc->has_alt_reset) {
    520		reset_control_assert(qproc->pdc_reset);
    521		writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
    522		ret = reset_control_reset(qproc->mss_restart);
    523		writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
    524		reset_control_deassert(qproc->pdc_reset);
    525	} else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
    526		ret = reset_control_reset(qproc->mss_restart);
    527	} else {
    528		ret = reset_control_deassert(qproc->mss_restart);
    529	}
    530
    531	return ret;
    532}
    533
    534static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
    535{
    536	unsigned long timeout;
    537	s32 val;
    538
    539	timeout = jiffies + msecs_to_jiffies(ms);
    540	for (;;) {
    541		val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
    542		if (val)
    543			break;
    544
    545		if (time_after(jiffies, timeout))
    546			return -ETIMEDOUT;
    547
    548		msleep(1);
    549	}
    550
    551	return val;
    552}
    553
    554static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
    555{
    556
    557	unsigned long timeout;
    558	s32 val;
    559
    560	timeout = jiffies + msecs_to_jiffies(ms);
    561	for (;;) {
    562		val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
    563		if (val < 0)
    564			break;
    565
    566		if (!status && val)
    567			break;
    568		else if (status && val == status)
    569			break;
    570
    571		if (time_after(jiffies, timeout))
    572			return -ETIMEDOUT;
    573
    574		msleep(1);
    575	}
    576
    577	return val;
    578}
    579
    580static void q6v5_dump_mba_logs(struct q6v5 *qproc)
    581{
    582	struct rproc *rproc = qproc->rproc;
    583	void *data;
    584	void *mba_region;
    585
    586	if (!qproc->has_mba_logs)
    587		return;
    588
    589	if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
    590				    qproc->mba_size))
    591		return;
    592
    593	mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
    594	if (!mba_region)
    595		return;
    596
    597	data = vmalloc(MBA_LOG_SIZE);
    598	if (data) {
    599		memcpy(data, mba_region, MBA_LOG_SIZE);
    600		dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
    601	}
    602	memunmap(mba_region);
    603}
    604
    605static int q6v5proc_reset(struct q6v5 *qproc)
    606{
    607	u32 val;
    608	int ret;
    609	int i;
    610
    611	if (qproc->version == MSS_SDM845) {
    612		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
    613		val |= Q6SS_CBCR_CLKEN;
    614		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
    615
    616		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
    617					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
    618					 Q6SS_CBCR_TIMEOUT_US);
    619		if (ret) {
    620			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
    621			return -ETIMEDOUT;
    622		}
    623
    624		/* De-assert QDSP6 stop core */
    625		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
    626		/* Trigger boot FSM */
    627		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
    628
    629		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
    630				val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
    631		if (ret) {
    632			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
    633			/* Reset the modem so that boot FSM is in reset state */
    634			q6v5_reset_deassert(qproc);
    635			return ret;
    636		}
    637
    638		goto pbl_wait;
    639	} else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
    640		val = readl(qproc->reg_base + QDSP6SS_SLEEP);
    641		val |= Q6SS_CBCR_CLKEN;
    642		writel(val, qproc->reg_base + QDSP6SS_SLEEP);
    643
    644		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
    645					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
    646					 Q6SS_CBCR_TIMEOUT_US);
    647		if (ret) {
    648			dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
    649			return -ETIMEDOUT;
    650		}
    651
    652		/* Turn on the XO clock needed for PLL setup */
    653		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
    654		val |= Q6SS_CBCR_CLKEN;
    655		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
    656
    657		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
    658					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
    659					 Q6SS_CBCR_TIMEOUT_US);
    660		if (ret) {
    661			dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
    662			return -ETIMEDOUT;
    663		}
    664
    665		/* Configure Q6 core CBCR to auto-enable after reset sequence */
    666		val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
    667		val |= Q6SS_CBCR_CLKEN;
    668		writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
    669
    670		/* De-assert the Q6 stop core signal */
    671		writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
    672
    673		/* Wait for 10 us for any staggering logic to settle */
    674		usleep_range(10, 20);
    675
    676		/* Trigger the boot FSM to start the Q6 out-of-reset sequence */
    677		writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
    678
    679		/* Poll the MSS_STATUS for FSM completion */
    680		ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
    681					 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
    682		if (ret) {
    683			dev_err(qproc->dev, "Boot FSM failed to complete.\n");
    684			/* Reset the modem so that boot FSM is in reset state */
    685			q6v5_reset_deassert(qproc);
    686			return ret;
    687		}
    688		goto pbl_wait;
    689	} else if (qproc->version == MSS_MSM8996 ||
    690		   qproc->version == MSS_MSM8998) {
    691		int mem_pwr_ctl;
    692
    693		/* Override the ACC value if required */
    694		writel(QDSP6SS_ACC_OVERRIDE_VAL,
    695		       qproc->reg_base + QDSP6SS_STRAP_ACC);
    696
    697		/* Assert resets, stop core */
    698		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
    699		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
    700		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
    701
    702		/* BHS require xo cbcr to be enabled */
    703		val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
    704		val |= Q6SS_CBCR_CLKEN;
    705		writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
    706
    707		/* Read CLKOFF bit to go low indicating CLK is enabled */
    708		ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
    709					 val, !(val & Q6SS_CBCR_CLKOFF), 1,
    710					 Q6SS_CBCR_TIMEOUT_US);
    711		if (ret) {
    712			dev_err(qproc->dev,
    713				"xo cbcr enabling timed out (rc:%d)\n", ret);
    714			return ret;
    715		}
    716		/* Enable power block headswitch and wait for it to stabilize */
    717		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    718		val |= QDSP6v56_BHS_ON;
    719		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    720		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    721		udelay(1);
    722
    723		/* Put LDO in bypass mode */
    724		val |= QDSP6v56_LDO_BYP;
    725		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    726
    727		/* Deassert QDSP6 compiler memory clamp */
    728		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    729		val &= ~QDSP6v56_CLAMP_QMC_MEM;
    730		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    731
    732		/* Deassert memory peripheral sleep and L2 memory standby */
    733		val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
    734		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    735
    736		/* Turn on L1, L2, ETB and JU memories 1 at a time */
    737		if (qproc->version == MSS_MSM8996) {
    738			mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
    739			i = 19;
    740		} else {
    741			/* MSS_MSM8998 */
    742			mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
    743			i = 28;
    744		}
    745		val = readl(qproc->reg_base + mem_pwr_ctl);
    746		for (; i >= 0; i--) {
    747			val |= BIT(i);
    748			writel(val, qproc->reg_base + mem_pwr_ctl);
    749			/*
    750			 * Read back value to ensure the write is done then
    751			 * wait for 1us for both memory peripheral and data
    752			 * array to turn on.
    753			 */
    754			val |= readl(qproc->reg_base + mem_pwr_ctl);
    755			udelay(1);
    756		}
    757		/* Remove word line clamp */
    758		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    759		val &= ~QDSP6v56_CLAMP_WL;
    760		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    761	} else {
    762		/* Assert resets, stop core */
    763		val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
    764		val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
    765		writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
    766
    767		/* Enable power block headswitch and wait for it to stabilize */
    768		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    769		val |= QDSS_BHS_ON | QDSS_LDO_BYP;
    770		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    771		val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    772		udelay(1);
    773		/*
    774		 * Turn on memories. L2 banks should be done individually
    775		 * to minimize inrush current.
    776		 */
    777		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    778		val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
    779			Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
    780		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    781		val |= Q6SS_L2DATA_SLP_NRET_N_2;
    782		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    783		val |= Q6SS_L2DATA_SLP_NRET_N_1;
    784		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    785		val |= Q6SS_L2DATA_SLP_NRET_N_0;
    786		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    787	}
    788	/* Remove IO clamp */
    789	val &= ~Q6SS_CLAMP_IO;
    790	writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
    791
    792	/* Bring core out of reset */
    793	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
    794	val &= ~Q6SS_CORE_ARES;
    795	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
    796
    797	/* Turn on core clock */
    798	val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
    799	val |= Q6SS_CLK_ENABLE;
    800	writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
    801
    802	/* Start core execution */
    803	val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
    804	val &= ~Q6SS_STOP_CORE;
    805	writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
    806
    807pbl_wait:
    808	/* Wait for PBL status */
    809	ret = q6v5_rmb_pbl_wait(qproc, 1000);
    810	if (ret == -ETIMEDOUT) {
    811		dev_err(qproc->dev, "PBL boot timed out\n");
    812	} else if (ret != RMB_PBL_SUCCESS) {
    813		dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
    814		ret = -EINVAL;
    815	} else {
    816		ret = 0;
    817	}
    818
    819	return ret;
    820}
    821
    822static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
    823{
    824	unsigned int val;
    825	int ret;
    826
    827	if (!qproc->has_qaccept_regs)
    828		return 0;
    829
    830	if (qproc->has_ext_cntl_regs) {
    831		regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
    832		regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
    833
    834		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
    835					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
    836		if (ret) {
    837			dev_err(qproc->dev, "failed to enable axim1 clock\n");
    838			return -ETIMEDOUT;
    839		}
    840	}
    841
    842	regmap_write(map, offset + QACCEPT_REQ_REG, 1);
    843
    844	/* Wait for accept */
    845	ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
    846				       QACCEPT_TIMEOUT_US);
    847	if (ret) {
    848		dev_err(qproc->dev, "qchannel enable failed\n");
    849		return -ETIMEDOUT;
    850	}
    851
    852	return 0;
    853}
    854
    855static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
    856{
    857	int ret;
    858	unsigned int val, retry;
    859	unsigned int nretry = 10;
    860	bool takedown_complete = false;
    861
    862	if (!qproc->has_qaccept_regs)
    863		return;
    864
    865	while (!takedown_complete && nretry) {
    866		nretry--;
    867
    868		/* Wait for active transactions to complete */
    869		regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
    870					 QACCEPT_TIMEOUT_US);
    871
    872		/* Request Q-channel transaction takedown */
    873		regmap_write(map, offset + QACCEPT_REQ_REG, 0);
    874
    875		/*
    876		 * If the request is denied, reset the Q-channel takedown request,
    877		 * wait for active transactions to complete and retry takedown.
    878		 */
    879		retry = 10;
    880		while (retry) {
    881			usleep_range(5, 10);
    882			retry--;
    883			ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
    884			if (!ret && val) {
    885				regmap_write(map, offset + QACCEPT_REQ_REG, 1);
    886				break;
    887			}
    888
    889			ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
    890			if (!ret && !val) {
    891				takedown_complete = true;
    892				break;
    893			}
    894		}
    895
    896		if (!retry)
    897			break;
    898	}
    899
    900	/* Rely on mss_restart to clear out pending transactions on takedown failure */
    901	if (!takedown_complete)
    902		dev_err(qproc->dev, "qchannel takedown failed\n");
    903}
    904
    905static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
    906				   struct regmap *halt_map,
    907				   u32 offset)
    908{
    909	unsigned int val;
    910	int ret;
    911
    912	/* Check if we're already idle */
    913	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
    914	if (!ret && val)
    915		return;
    916
    917	/* Assert halt request */
    918	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
    919
    920	/* Wait for halt */
    921	regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
    922				 val, 1000, HALT_ACK_TIMEOUT_US);
    923
    924	ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
    925	if (ret || !val)
    926		dev_err(qproc->dev, "port failed halt\n");
    927
    928	/* Clear halt request (port will remain halted until reset) */
    929	regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
    930}
    931
    932static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
    933				const char *fw_name)
    934{
    935	unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
    936	dma_addr_t phys;
    937	void *metadata;
    938	int mdata_perm;
    939	int xferop_ret;
    940	size_t size;
    941	void *ptr;
    942	int ret;
    943
    944	metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
    945	if (IS_ERR(metadata))
    946		return PTR_ERR(metadata);
    947
    948	ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
    949	if (!ptr) {
    950		kfree(metadata);
    951		dev_err(qproc->dev, "failed to allocate mdt buffer\n");
    952		return -ENOMEM;
    953	}
    954
    955	memcpy(ptr, metadata, size);
    956
    957	/* Hypervisor mapping to access metadata by modem */
    958	mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
    959	ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
    960				      phys, size);
    961	if (ret) {
    962		dev_err(qproc->dev,
    963			"assigning Q6 access to metadata failed: %d\n", ret);
    964		ret = -EAGAIN;
    965		goto free_dma_attrs;
    966	}
    967
    968	writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
    969	writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
    970
    971	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
    972	if (ret == -ETIMEDOUT)
    973		dev_err(qproc->dev, "MPSS header authentication timed out\n");
    974	else if (ret < 0)
    975		dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
    976
    977	/* Metadata authentication done, remove modem access */
    978	xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
    979					     phys, size);
    980	if (xferop_ret)
    981		dev_warn(qproc->dev,
    982			 "mdt buffer not reclaimed system may become unstable\n");
    983
    984free_dma_attrs:
    985	dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
    986	kfree(metadata);
    987
    988	return ret < 0 ? ret : 0;
    989}
    990
    991static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
    992{
    993	if (phdr->p_type != PT_LOAD)
    994		return false;
    995
    996	if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
    997		return false;
    998
    999	if (!phdr->p_memsz)
   1000		return false;
   1001
   1002	return true;
   1003}
   1004
   1005static int q6v5_mba_load(struct q6v5 *qproc)
   1006{
   1007	int ret;
   1008	int xfermemop_ret;
   1009	bool mba_load_err = false;
   1010
   1011	ret = qcom_q6v5_prepare(&qproc->q6v5);
   1012	if (ret)
   1013		return ret;
   1014
   1015	ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
   1016	if (ret < 0) {
   1017		dev_err(qproc->dev, "failed to enable proxy power domains\n");
   1018		goto disable_irqs;
   1019	}
   1020
   1021	ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
   1022				    qproc->fallback_proxy_reg_count);
   1023	if (ret) {
   1024		dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
   1025		goto disable_proxy_pds;
   1026	}
   1027
   1028	ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
   1029				    qproc->proxy_reg_count);
   1030	if (ret) {
   1031		dev_err(qproc->dev, "failed to enable proxy supplies\n");
   1032		goto disable_fallback_proxy_reg;
   1033	}
   1034
   1035	ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
   1036			      qproc->proxy_clk_count);
   1037	if (ret) {
   1038		dev_err(qproc->dev, "failed to enable proxy clocks\n");
   1039		goto disable_proxy_reg;
   1040	}
   1041
   1042	ret = q6v5_regulator_enable(qproc, qproc->active_regs,
   1043				    qproc->active_reg_count);
   1044	if (ret) {
   1045		dev_err(qproc->dev, "failed to enable supplies\n");
   1046		goto disable_proxy_clk;
   1047	}
   1048
   1049	ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
   1050			      qproc->reset_clk_count);
   1051	if (ret) {
   1052		dev_err(qproc->dev, "failed to enable reset clocks\n");
   1053		goto disable_vdd;
   1054	}
   1055
   1056	ret = q6v5_reset_deassert(qproc);
   1057	if (ret) {
   1058		dev_err(qproc->dev, "failed to deassert mss restart\n");
   1059		goto disable_reset_clks;
   1060	}
   1061
   1062	ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
   1063			      qproc->active_clk_count);
   1064	if (ret) {
   1065		dev_err(qproc->dev, "failed to enable clocks\n");
   1066		goto assert_reset;
   1067	}
   1068
   1069	ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
   1070	if (ret) {
   1071		dev_err(qproc->dev, "failed to enable axi bridge\n");
   1072		goto disable_active_clks;
   1073	}
   1074
   1075	/*
   1076	 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
   1077	 * the Q6 access to this region.
   1078	 */
   1079	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
   1080				      qproc->mpss_phys, qproc->mpss_size);
   1081	if (ret) {
   1082		dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
   1083		goto disable_active_clks;
   1084	}
   1085
   1086	/* Assign MBA image access in DDR to q6 */
   1087	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
   1088				      qproc->mba_phys, qproc->mba_size);
   1089	if (ret) {
   1090		dev_err(qproc->dev,
   1091			"assigning Q6 access to mba memory failed: %d\n", ret);
   1092		goto disable_active_clks;
   1093	}
   1094
   1095	writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
   1096	if (qproc->dp_size) {
   1097		writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
   1098		writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
   1099	}
   1100
   1101	ret = q6v5proc_reset(qproc);
   1102	if (ret)
   1103		goto reclaim_mba;
   1104
   1105	ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
   1106	if (ret == -ETIMEDOUT) {
   1107		dev_err(qproc->dev, "MBA boot timed out\n");
   1108		goto halt_axi_ports;
   1109	} else if (ret != RMB_MBA_XPU_UNLOCKED &&
   1110		   ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
   1111		dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
   1112		ret = -EINVAL;
   1113		goto halt_axi_ports;
   1114	}
   1115
   1116	qproc->dump_mba_loaded = true;
   1117	return 0;
   1118
   1119halt_axi_ports:
   1120	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
   1121	if (qproc->has_vq6)
   1122		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
   1123	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
   1124	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
   1125	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
   1126	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
   1127	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
   1128	mba_load_err = true;
   1129reclaim_mba:
   1130	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
   1131						false, qproc->mba_phys,
   1132						qproc->mba_size);
   1133	if (xfermemop_ret) {
   1134		dev_err(qproc->dev,
   1135			"Failed to reclaim mba buffer, system may become unstable\n");
   1136	} else if (mba_load_err) {
   1137		q6v5_dump_mba_logs(qproc);
   1138	}
   1139
   1140disable_active_clks:
   1141	q6v5_clk_disable(qproc->dev, qproc->active_clks,
   1142			 qproc->active_clk_count);
   1143assert_reset:
   1144	q6v5_reset_assert(qproc);
   1145disable_reset_clks:
   1146	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
   1147			 qproc->reset_clk_count);
   1148disable_vdd:
   1149	q6v5_regulator_disable(qproc, qproc->active_regs,
   1150			       qproc->active_reg_count);
   1151disable_proxy_clk:
   1152	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
   1153			 qproc->proxy_clk_count);
   1154disable_proxy_reg:
   1155	q6v5_regulator_disable(qproc, qproc->proxy_regs,
   1156			       qproc->proxy_reg_count);
   1157disable_fallback_proxy_reg:
   1158	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
   1159			       qproc->fallback_proxy_reg_count);
   1160disable_proxy_pds:
   1161	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
   1162disable_irqs:
   1163	qcom_q6v5_unprepare(&qproc->q6v5);
   1164
   1165	return ret;
   1166}
   1167
   1168static void q6v5_mba_reclaim(struct q6v5 *qproc)
   1169{
   1170	int ret;
   1171	u32 val;
   1172
   1173	qproc->dump_mba_loaded = false;
   1174	qproc->dp_size = 0;
   1175
   1176	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
   1177	if (qproc->has_vq6)
   1178		q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
   1179	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
   1180	q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
   1181	if (qproc->version == MSS_MSM8996) {
   1182		/*
   1183		 * To avoid high MX current during LPASS/MSS restart.
   1184		 */
   1185		val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
   1186		val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
   1187			QDSP6v56_CLAMP_QMC_MEM;
   1188		writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
   1189	}
   1190
   1191	if (qproc->has_ext_cntl_regs) {
   1192		regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
   1193
   1194		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
   1195					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
   1196		if (ret)
   1197			dev_err(qproc->dev, "failed to enable axim1 clock\n");
   1198
   1199		ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
   1200					       !val, 1, Q6SS_CBCR_TIMEOUT_US);
   1201		if (ret)
   1202			dev_err(qproc->dev, "failed to enable crypto clock\n");
   1203	}
   1204
   1205	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
   1206	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
   1207	q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
   1208
   1209	q6v5_reset_assert(qproc);
   1210
   1211	q6v5_clk_disable(qproc->dev, qproc->reset_clks,
   1212			 qproc->reset_clk_count);
   1213	q6v5_clk_disable(qproc->dev, qproc->active_clks,
   1214			 qproc->active_clk_count);
   1215	q6v5_regulator_disable(qproc, qproc->active_regs,
   1216			       qproc->active_reg_count);
   1217
   1218	/* In case of failure or coredump scenario where reclaiming MBA memory
   1219	 * could not happen reclaim it here.
   1220	 */
   1221	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
   1222				      qproc->mba_phys,
   1223				      qproc->mba_size);
   1224	WARN_ON(ret);
   1225
   1226	ret = qcom_q6v5_unprepare(&qproc->q6v5);
   1227	if (ret) {
   1228		q6v5_pds_disable(qproc, qproc->proxy_pds,
   1229				 qproc->proxy_pd_count);
   1230		q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
   1231				 qproc->proxy_clk_count);
   1232		q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
   1233				       qproc->fallback_proxy_reg_count);
   1234		q6v5_regulator_disable(qproc, qproc->proxy_regs,
   1235				       qproc->proxy_reg_count);
   1236	}
   1237}
   1238
   1239static int q6v5_reload_mba(struct rproc *rproc)
   1240{
   1241	struct q6v5 *qproc = rproc->priv;
   1242	const struct firmware *fw;
   1243	int ret;
   1244
   1245	ret = request_firmware(&fw, rproc->firmware, qproc->dev);
   1246	if (ret < 0)
   1247		return ret;
   1248
   1249	q6v5_load(rproc, fw);
   1250	ret = q6v5_mba_load(qproc);
   1251	release_firmware(fw);
   1252
   1253	return ret;
   1254}
   1255
   1256static int q6v5_mpss_load(struct q6v5 *qproc)
   1257{
   1258	const struct elf32_phdr *phdrs;
   1259	const struct elf32_phdr *phdr;
   1260	const struct firmware *seg_fw;
   1261	const struct firmware *fw;
   1262	struct elf32_hdr *ehdr;
   1263	phys_addr_t mpss_reloc;
   1264	phys_addr_t boot_addr;
   1265	phys_addr_t min_addr = PHYS_ADDR_MAX;
   1266	phys_addr_t max_addr = 0;
   1267	u32 code_length;
   1268	bool relocate = false;
   1269	char *fw_name;
   1270	size_t fw_name_len;
   1271	ssize_t offset;
   1272	size_t size = 0;
   1273	void *ptr;
   1274	int ret;
   1275	int i;
   1276
   1277	fw_name_len = strlen(qproc->hexagon_mdt_image);
   1278	if (fw_name_len <= 4)
   1279		return -EINVAL;
   1280
   1281	fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
   1282	if (!fw_name)
   1283		return -ENOMEM;
   1284
   1285	ret = request_firmware(&fw, fw_name, qproc->dev);
   1286	if (ret < 0) {
   1287		dev_err(qproc->dev, "unable to load %s\n", fw_name);
   1288		goto out;
   1289	}
   1290
   1291	/* Initialize the RMB validator */
   1292	writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
   1293
   1294	ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
   1295	if (ret)
   1296		goto release_firmware;
   1297
   1298	ehdr = (struct elf32_hdr *)fw->data;
   1299	phdrs = (struct elf32_phdr *)(ehdr + 1);
   1300
   1301	for (i = 0; i < ehdr->e_phnum; i++) {
   1302		phdr = &phdrs[i];
   1303
   1304		if (!q6v5_phdr_valid(phdr))
   1305			continue;
   1306
   1307		if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
   1308			relocate = true;
   1309
   1310		if (phdr->p_paddr < min_addr)
   1311			min_addr = phdr->p_paddr;
   1312
   1313		if (phdr->p_paddr + phdr->p_memsz > max_addr)
   1314			max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
   1315	}
   1316
   1317	/*
   1318	 * In case of a modem subsystem restart on secure devices, the modem
   1319	 * memory can be reclaimed only after MBA is loaded.
   1320	 */
   1321	q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
   1322				qproc->mpss_phys, qproc->mpss_size);
   1323
   1324	/* Share ownership between Linux and MSS, during segment loading */
   1325	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
   1326				      qproc->mpss_phys, qproc->mpss_size);
   1327	if (ret) {
   1328		dev_err(qproc->dev,
   1329			"assigning Q6 access to mpss memory failed: %d\n", ret);
   1330		ret = -EAGAIN;
   1331		goto release_firmware;
   1332	}
   1333
   1334	mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
   1335	qproc->mpss_reloc = mpss_reloc;
   1336	/* Load firmware segments */
   1337	for (i = 0; i < ehdr->e_phnum; i++) {
   1338		phdr = &phdrs[i];
   1339
   1340		if (!q6v5_phdr_valid(phdr))
   1341			continue;
   1342
   1343		offset = phdr->p_paddr - mpss_reloc;
   1344		if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
   1345			dev_err(qproc->dev, "segment outside memory range\n");
   1346			ret = -EINVAL;
   1347			goto release_firmware;
   1348		}
   1349
   1350		if (phdr->p_filesz > phdr->p_memsz) {
   1351			dev_err(qproc->dev,
   1352				"refusing to load segment %d with p_filesz > p_memsz\n",
   1353				i);
   1354			ret = -EINVAL;
   1355			goto release_firmware;
   1356		}
   1357
   1358		ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
   1359		if (!ptr) {
   1360			dev_err(qproc->dev,
   1361				"unable to map memory region: %pa+%zx-%x\n",
   1362				&qproc->mpss_phys, offset, phdr->p_memsz);
   1363			goto release_firmware;
   1364		}
   1365
   1366		if (phdr->p_filesz && phdr->p_offset < fw->size) {
   1367			/* Firmware is large enough to be non-split */
   1368			if (phdr->p_offset + phdr->p_filesz > fw->size) {
   1369				dev_err(qproc->dev,
   1370					"failed to load segment %d from truncated file %s\n",
   1371					i, fw_name);
   1372				ret = -EINVAL;
   1373				memunmap(ptr);
   1374				goto release_firmware;
   1375			}
   1376
   1377			memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
   1378		} else if (phdr->p_filesz) {
   1379			/* Replace "xxx.xxx" with "xxx.bxx" */
   1380			sprintf(fw_name + fw_name_len - 3, "b%02d", i);
   1381			ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
   1382							ptr, phdr->p_filesz);
   1383			if (ret) {
   1384				dev_err(qproc->dev, "failed to load %s\n", fw_name);
   1385				memunmap(ptr);
   1386				goto release_firmware;
   1387			}
   1388
   1389			if (seg_fw->size != phdr->p_filesz) {
   1390				dev_err(qproc->dev,
   1391					"failed to load segment %d from truncated file %s\n",
   1392					i, fw_name);
   1393				ret = -EINVAL;
   1394				release_firmware(seg_fw);
   1395				memunmap(ptr);
   1396				goto release_firmware;
   1397			}
   1398
   1399			release_firmware(seg_fw);
   1400		}
   1401
   1402		if (phdr->p_memsz > phdr->p_filesz) {
   1403			memset(ptr + phdr->p_filesz, 0,
   1404			       phdr->p_memsz - phdr->p_filesz);
   1405		}
   1406		memunmap(ptr);
   1407		size += phdr->p_memsz;
   1408
   1409		code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
   1410		if (!code_length) {
   1411			boot_addr = relocate ? qproc->mpss_phys : min_addr;
   1412			writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
   1413			writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
   1414		}
   1415		writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
   1416
   1417		ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
   1418		if (ret < 0) {
   1419			dev_err(qproc->dev, "MPSS authentication failed: %d\n",
   1420				ret);
   1421			goto release_firmware;
   1422		}
   1423	}
   1424
   1425	/* Transfer ownership of modem ddr region to q6 */
   1426	ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
   1427				      qproc->mpss_phys, qproc->mpss_size);
   1428	if (ret) {
   1429		dev_err(qproc->dev,
   1430			"assigning Q6 access to mpss memory failed: %d\n", ret);
   1431		ret = -EAGAIN;
   1432		goto release_firmware;
   1433	}
   1434
   1435	ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
   1436	if (ret == -ETIMEDOUT)
   1437		dev_err(qproc->dev, "MPSS authentication timed out\n");
   1438	else if (ret < 0)
   1439		dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
   1440
   1441	qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
   1442
   1443release_firmware:
   1444	release_firmware(fw);
   1445out:
   1446	kfree(fw_name);
   1447
   1448	return ret < 0 ? ret : 0;
   1449}
   1450
   1451static void qcom_q6v5_dump_segment(struct rproc *rproc,
   1452				   struct rproc_dump_segment *segment,
   1453				   void *dest, size_t cp_offset, size_t size)
   1454{
   1455	int ret = 0;
   1456	struct q6v5 *qproc = rproc->priv;
   1457	int offset = segment->da - qproc->mpss_reloc;
   1458	void *ptr = NULL;
   1459
   1460	/* Unlock mba before copying segments */
   1461	if (!qproc->dump_mba_loaded) {
   1462		ret = q6v5_reload_mba(rproc);
   1463		if (!ret) {
   1464			/* Reset ownership back to Linux to copy segments */
   1465			ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
   1466						      true, false,
   1467						      qproc->mpss_phys,
   1468						      qproc->mpss_size);
   1469		}
   1470	}
   1471
   1472	if (!ret)
   1473		ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
   1474
   1475	if (ptr) {
   1476		memcpy(dest, ptr, size);
   1477		memunmap(ptr);
   1478	} else {
   1479		memset(dest, 0xff, size);
   1480	}
   1481
   1482	qproc->current_dump_size += size;
   1483
   1484	/* Reclaim mba after copying segments */
   1485	if (qproc->current_dump_size == qproc->total_dump_size) {
   1486		if (qproc->dump_mba_loaded) {
   1487			/* Try to reset ownership back to Q6 */
   1488			q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
   1489						false, true,
   1490						qproc->mpss_phys,
   1491						qproc->mpss_size);
   1492			q6v5_mba_reclaim(qproc);
   1493		}
   1494	}
   1495}
   1496
   1497static int q6v5_start(struct rproc *rproc)
   1498{
   1499	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
   1500	int xfermemop_ret;
   1501	int ret;
   1502
   1503	ret = q6v5_mba_load(qproc);
   1504	if (ret)
   1505		return ret;
   1506
   1507	dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
   1508		 qproc->dp_size ? "" : "out");
   1509
   1510	ret = q6v5_mpss_load(qproc);
   1511	if (ret)
   1512		goto reclaim_mpss;
   1513
   1514	ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
   1515	if (ret == -ETIMEDOUT) {
   1516		dev_err(qproc->dev, "start timed out\n");
   1517		goto reclaim_mpss;
   1518	}
   1519
   1520	xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
   1521						false, qproc->mba_phys,
   1522						qproc->mba_size);
   1523	if (xfermemop_ret)
   1524		dev_err(qproc->dev,
   1525			"Failed to reclaim mba buffer system may become unstable\n");
   1526
   1527	/* Reset Dump Segment Mask */
   1528	qproc->current_dump_size = 0;
   1529
   1530	return 0;
   1531
   1532reclaim_mpss:
   1533	q6v5_mba_reclaim(qproc);
   1534	q6v5_dump_mba_logs(qproc);
   1535
   1536	return ret;
   1537}
   1538
   1539static int q6v5_stop(struct rproc *rproc)
   1540{
   1541	struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
   1542	int ret;
   1543
   1544	ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
   1545	if (ret == -ETIMEDOUT)
   1546		dev_err(qproc->dev, "timed out on wait\n");
   1547
   1548	q6v5_mba_reclaim(qproc);
   1549
   1550	return 0;
   1551}
   1552
   1553static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
   1554					    const struct firmware *mba_fw)
   1555{
   1556	const struct firmware *fw;
   1557	const struct elf32_phdr *phdrs;
   1558	const struct elf32_phdr *phdr;
   1559	const struct elf32_hdr *ehdr;
   1560	struct q6v5 *qproc = rproc->priv;
   1561	unsigned long i;
   1562	int ret;
   1563
   1564	ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
   1565	if (ret < 0) {
   1566		dev_err(qproc->dev, "unable to load %s\n",
   1567			qproc->hexagon_mdt_image);
   1568		return ret;
   1569	}
   1570
   1571	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
   1572
   1573	ehdr = (struct elf32_hdr *)fw->data;
   1574	phdrs = (struct elf32_phdr *)(ehdr + 1);
   1575	qproc->total_dump_size = 0;
   1576
   1577	for (i = 0; i < ehdr->e_phnum; i++) {
   1578		phdr = &phdrs[i];
   1579
   1580		if (!q6v5_phdr_valid(phdr))
   1581			continue;
   1582
   1583		ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
   1584							phdr->p_memsz,
   1585							qcom_q6v5_dump_segment,
   1586							NULL);
   1587		if (ret)
   1588			break;
   1589
   1590		qproc->total_dump_size += phdr->p_memsz;
   1591	}
   1592
   1593	release_firmware(fw);
   1594	return ret;
   1595}
   1596
   1597static const struct rproc_ops q6v5_ops = {
   1598	.start = q6v5_start,
   1599	.stop = q6v5_stop,
   1600	.parse_fw = qcom_q6v5_register_dump_segments,
   1601	.load = q6v5_load,
   1602};
   1603
   1604static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
   1605{
   1606	struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
   1607
   1608	q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
   1609			 qproc->proxy_clk_count);
   1610	q6v5_regulator_disable(qproc, qproc->proxy_regs,
   1611			       qproc->proxy_reg_count);
   1612	q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
   1613			       qproc->fallback_proxy_reg_count);
   1614	q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
   1615}
   1616
   1617static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
   1618{
   1619	struct of_phandle_args args;
   1620	int halt_cell_cnt = 3;
   1621	int ret;
   1622
   1623	qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
   1624	if (IS_ERR(qproc->reg_base))
   1625		return PTR_ERR(qproc->reg_base);
   1626
   1627	qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
   1628	if (IS_ERR(qproc->rmb_base))
   1629		return PTR_ERR(qproc->rmb_base);
   1630
   1631	if (qproc->has_vq6)
   1632		halt_cell_cnt++;
   1633
   1634	ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
   1635					       "qcom,halt-regs", halt_cell_cnt, 0, &args);
   1636	if (ret < 0) {
   1637		dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
   1638		return -EINVAL;
   1639	}
   1640
   1641	qproc->halt_map = syscon_node_to_regmap(args.np);
   1642	of_node_put(args.np);
   1643	if (IS_ERR(qproc->halt_map))
   1644		return PTR_ERR(qproc->halt_map);
   1645
   1646	qproc->halt_q6 = args.args[0];
   1647	qproc->halt_modem = args.args[1];
   1648	qproc->halt_nc = args.args[2];
   1649
   1650	if (qproc->has_vq6)
   1651		qproc->halt_vq6 = args.args[3];
   1652
   1653	if (qproc->has_qaccept_regs) {
   1654		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
   1655						       "qcom,qaccept-regs",
   1656						       3, 0, &args);
   1657		if (ret < 0) {
   1658			dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
   1659			return -EINVAL;
   1660		}
   1661
   1662		qproc->qaccept_mdm = args.args[0];
   1663		qproc->qaccept_cx = args.args[1];
   1664		qproc->qaccept_axi = args.args[2];
   1665	}
   1666
   1667	if (qproc->has_ext_cntl_regs) {
   1668		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
   1669						       "qcom,ext-regs",
   1670						       2, 0, &args);
   1671		if (ret < 0) {
   1672			dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
   1673			return -EINVAL;
   1674		}
   1675
   1676		qproc->conn_map = syscon_node_to_regmap(args.np);
   1677		of_node_put(args.np);
   1678		if (IS_ERR(qproc->conn_map))
   1679			return PTR_ERR(qproc->conn_map);
   1680
   1681		qproc->force_clk_on = args.args[0];
   1682		qproc->rscc_disable = args.args[1];
   1683
   1684		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
   1685						       "qcom,ext-regs",
   1686						       2, 1, &args);
   1687		if (ret < 0) {
   1688			dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
   1689			return -EINVAL;
   1690		}
   1691
   1692		qproc->axim1_clk_off = args.args[0];
   1693		qproc->crypto_clk_off = args.args[1];
   1694	}
   1695
   1696	if (qproc->has_spare_reg) {
   1697		ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
   1698						       "qcom,spare-regs",
   1699						       1, 0, &args);
   1700		if (ret < 0) {
   1701			dev_err(&pdev->dev, "failed to parse spare-regs\n");
   1702			return -EINVAL;
   1703		}
   1704
   1705		qproc->conn_map = syscon_node_to_regmap(args.np);
   1706		of_node_put(args.np);
   1707		if (IS_ERR(qproc->conn_map))
   1708			return PTR_ERR(qproc->conn_map);
   1709
   1710		qproc->conn_box = args.args[0];
   1711	}
   1712
   1713	return 0;
   1714}
   1715
   1716static int q6v5_init_clocks(struct device *dev, struct clk **clks,
   1717		char **clk_names)
   1718{
   1719	int i;
   1720
   1721	if (!clk_names)
   1722		return 0;
   1723
   1724	for (i = 0; clk_names[i]; i++) {
   1725		clks[i] = devm_clk_get(dev, clk_names[i]);
   1726		if (IS_ERR(clks[i])) {
   1727			int rc = PTR_ERR(clks[i]);
   1728
   1729			if (rc != -EPROBE_DEFER)
   1730				dev_err(dev, "Failed to get %s clock\n",
   1731					clk_names[i]);
   1732			return rc;
   1733		}
   1734	}
   1735
   1736	return i;
   1737}
   1738
   1739static int q6v5_pds_attach(struct device *dev, struct device **devs,
   1740			   char **pd_names)
   1741{
   1742	size_t num_pds = 0;
   1743	int ret;
   1744	int i;
   1745
   1746	if (!pd_names)
   1747		return 0;
   1748
   1749	while (pd_names[num_pds])
   1750		num_pds++;
   1751
   1752	for (i = 0; i < num_pds; i++) {
   1753		devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
   1754		if (IS_ERR_OR_NULL(devs[i])) {
   1755			ret = PTR_ERR(devs[i]) ? : -ENODATA;
   1756			goto unroll_attach;
   1757		}
   1758	}
   1759
   1760	return num_pds;
   1761
   1762unroll_attach:
   1763	for (i--; i >= 0; i--)
   1764		dev_pm_domain_detach(devs[i], false);
   1765
   1766	return ret;
   1767}
   1768
   1769static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
   1770			    size_t pd_count)
   1771{
   1772	int i;
   1773
   1774	for (i = 0; i < pd_count; i++)
   1775		dev_pm_domain_detach(pds[i], false);
   1776}
   1777
   1778static int q6v5_init_reset(struct q6v5 *qproc)
   1779{
   1780	qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
   1781							      "mss_restart");
   1782	if (IS_ERR(qproc->mss_restart)) {
   1783		dev_err(qproc->dev, "failed to acquire mss restart\n");
   1784		return PTR_ERR(qproc->mss_restart);
   1785	}
   1786
   1787	if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
   1788		qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
   1789								    "pdc_reset");
   1790		if (IS_ERR(qproc->pdc_reset)) {
   1791			dev_err(qproc->dev, "failed to acquire pdc reset\n");
   1792			return PTR_ERR(qproc->pdc_reset);
   1793		}
   1794	}
   1795
   1796	return 0;
   1797}
   1798
   1799static int q6v5_alloc_memory_region(struct q6v5 *qproc)
   1800{
   1801	struct device_node *child;
   1802	struct device_node *node;
   1803	struct resource r;
   1804	int ret;
   1805
   1806	/*
   1807	 * In the absence of mba/mpss sub-child, extract the mba and mpss
   1808	 * reserved memory regions from device's memory-region property.
   1809	 */
   1810	child = of_get_child_by_name(qproc->dev->of_node, "mba");
   1811	if (!child) {
   1812		node = of_parse_phandle(qproc->dev->of_node,
   1813					"memory-region", 0);
   1814	} else {
   1815		node = of_parse_phandle(child, "memory-region", 0);
   1816		of_node_put(child);
   1817	}
   1818
   1819	ret = of_address_to_resource(node, 0, &r);
   1820	of_node_put(node);
   1821	if (ret) {
   1822		dev_err(qproc->dev, "unable to resolve mba region\n");
   1823		return ret;
   1824	}
   1825
   1826	qproc->mba_phys = r.start;
   1827	qproc->mba_size = resource_size(&r);
   1828
   1829	if (!child) {
   1830		node = of_parse_phandle(qproc->dev->of_node,
   1831					"memory-region", 1);
   1832	} else {
   1833		child = of_get_child_by_name(qproc->dev->of_node, "mpss");
   1834		node = of_parse_phandle(child, "memory-region", 0);
   1835		of_node_put(child);
   1836	}
   1837
   1838	ret = of_address_to_resource(node, 0, &r);
   1839	of_node_put(node);
   1840	if (ret) {
   1841		dev_err(qproc->dev, "unable to resolve mpss region\n");
   1842		return ret;
   1843	}
   1844
   1845	qproc->mpss_phys = qproc->mpss_reloc = r.start;
   1846	qproc->mpss_size = resource_size(&r);
   1847
   1848	return 0;
   1849}
   1850
   1851static int q6v5_probe(struct platform_device *pdev)
   1852{
   1853	const struct rproc_hexagon_res *desc;
   1854	struct device_node *node;
   1855	struct q6v5 *qproc;
   1856	struct rproc *rproc;
   1857	const char *mba_image;
   1858	int ret;
   1859
   1860	desc = of_device_get_match_data(&pdev->dev);
   1861	if (!desc)
   1862		return -EINVAL;
   1863
   1864	if (desc->need_mem_protection && !qcom_scm_is_available())
   1865		return -EPROBE_DEFER;
   1866
   1867	mba_image = desc->hexagon_mba_image;
   1868	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
   1869					    0, &mba_image);
   1870	if (ret < 0 && ret != -EINVAL) {
   1871		dev_err(&pdev->dev, "unable to read mba firmware-name\n");
   1872		return ret;
   1873	}
   1874
   1875	rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
   1876			    mba_image, sizeof(*qproc));
   1877	if (!rproc) {
   1878		dev_err(&pdev->dev, "failed to allocate rproc\n");
   1879		return -ENOMEM;
   1880	}
   1881
   1882	rproc->auto_boot = false;
   1883	rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
   1884
   1885	qproc = (struct q6v5 *)rproc->priv;
   1886	qproc->dev = &pdev->dev;
   1887	qproc->rproc = rproc;
   1888	qproc->hexagon_mdt_image = "modem.mdt";
   1889	ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
   1890					    1, &qproc->hexagon_mdt_image);
   1891	if (ret < 0 && ret != -EINVAL) {
   1892		dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
   1893		goto free_rproc;
   1894	}
   1895
   1896	platform_set_drvdata(pdev, qproc);
   1897
   1898	qproc->has_qaccept_regs = desc->has_qaccept_regs;
   1899	qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
   1900	qproc->has_vq6 = desc->has_vq6;
   1901	qproc->has_spare_reg = desc->has_spare_reg;
   1902	ret = q6v5_init_mem(qproc, pdev);
   1903	if (ret)
   1904		goto free_rproc;
   1905
   1906	ret = q6v5_alloc_memory_region(qproc);
   1907	if (ret)
   1908		goto free_rproc;
   1909
   1910	ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
   1911			       desc->proxy_clk_names);
   1912	if (ret < 0) {
   1913		dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
   1914		goto free_rproc;
   1915	}
   1916	qproc->proxy_clk_count = ret;
   1917
   1918	ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
   1919			       desc->reset_clk_names);
   1920	if (ret < 0) {
   1921		dev_err(&pdev->dev, "Failed to get reset clocks.\n");
   1922		goto free_rproc;
   1923	}
   1924	qproc->reset_clk_count = ret;
   1925
   1926	ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
   1927			       desc->active_clk_names);
   1928	if (ret < 0) {
   1929		dev_err(&pdev->dev, "Failed to get active clocks.\n");
   1930		goto free_rproc;
   1931	}
   1932	qproc->active_clk_count = ret;
   1933
   1934	ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
   1935				  desc->proxy_supply);
   1936	if (ret < 0) {
   1937		dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
   1938		goto free_rproc;
   1939	}
   1940	qproc->proxy_reg_count = ret;
   1941
   1942	ret = q6v5_regulator_init(&pdev->dev,  qproc->active_regs,
   1943				  desc->active_supply);
   1944	if (ret < 0) {
   1945		dev_err(&pdev->dev, "Failed to get active regulators.\n");
   1946		goto free_rproc;
   1947	}
   1948	qproc->active_reg_count = ret;
   1949
   1950	ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
   1951			      desc->proxy_pd_names);
   1952	/* Fallback to regulators for old device trees */
   1953	if (ret == -ENODATA && desc->fallback_proxy_supply) {
   1954		ret = q6v5_regulator_init(&pdev->dev,
   1955					  qproc->fallback_proxy_regs,
   1956					  desc->fallback_proxy_supply);
   1957		if (ret < 0) {
   1958			dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
   1959			goto free_rproc;
   1960		}
   1961		qproc->fallback_proxy_reg_count = ret;
   1962	} else if (ret < 0) {
   1963		dev_err(&pdev->dev, "Failed to init power domains\n");
   1964		goto free_rproc;
   1965	} else {
   1966		qproc->proxy_pd_count = ret;
   1967	}
   1968
   1969	qproc->has_alt_reset = desc->has_alt_reset;
   1970	ret = q6v5_init_reset(qproc);
   1971	if (ret)
   1972		goto detach_proxy_pds;
   1973
   1974	qproc->version = desc->version;
   1975	qproc->need_mem_protection = desc->need_mem_protection;
   1976	qproc->has_mba_logs = desc->has_mba_logs;
   1977
   1978	ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
   1979			     qcom_msa_handover);
   1980	if (ret)
   1981		goto detach_proxy_pds;
   1982
   1983	qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
   1984	qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
   1985	qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
   1986	qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
   1987	qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
   1988	qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
   1989	if (IS_ERR(qproc->sysmon)) {
   1990		ret = PTR_ERR(qproc->sysmon);
   1991		goto remove_subdevs;
   1992	}
   1993
   1994	ret = rproc_add(rproc);
   1995	if (ret)
   1996		goto remove_sysmon_subdev;
   1997
   1998	node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
   1999	qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
   2000	of_node_put(node);
   2001
   2002	return 0;
   2003
   2004remove_sysmon_subdev:
   2005	qcom_remove_sysmon_subdev(qproc->sysmon);
   2006remove_subdevs:
   2007	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
   2008	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
   2009	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
   2010detach_proxy_pds:
   2011	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
   2012free_rproc:
   2013	rproc_free(rproc);
   2014
   2015	return ret;
   2016}
   2017
   2018static int q6v5_remove(struct platform_device *pdev)
   2019{
   2020	struct q6v5 *qproc = platform_get_drvdata(pdev);
   2021	struct rproc *rproc = qproc->rproc;
   2022
   2023	if (qproc->bam_dmux)
   2024		of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
   2025	rproc_del(rproc);
   2026
   2027	qcom_q6v5_deinit(&qproc->q6v5);
   2028	qcom_remove_sysmon_subdev(qproc->sysmon);
   2029	qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
   2030	qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
   2031	qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
   2032
   2033	q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
   2034
   2035	rproc_free(rproc);
   2036
   2037	return 0;
   2038}
   2039
   2040static const struct rproc_hexagon_res sc7180_mss = {
   2041	.hexagon_mba_image = "mba.mbn",
   2042	.proxy_clk_names = (char*[]){
   2043		"xo",
   2044		NULL
   2045	},
   2046	.reset_clk_names = (char*[]){
   2047		"iface",
   2048		"bus",
   2049		"snoc_axi",
   2050		NULL
   2051	},
   2052	.active_clk_names = (char*[]){
   2053		"mnoc_axi",
   2054		"nav",
   2055		NULL
   2056	},
   2057	.proxy_pd_names = (char*[]){
   2058		"cx",
   2059		"mx",
   2060		"mss",
   2061		NULL
   2062	},
   2063	.need_mem_protection = true,
   2064	.has_alt_reset = false,
   2065	.has_mba_logs = true,
   2066	.has_spare_reg = true,
   2067	.has_qaccept_regs = false,
   2068	.has_ext_cntl_regs = false,
   2069	.has_vq6 = false,
   2070	.version = MSS_SC7180,
   2071};
   2072
   2073static const struct rproc_hexagon_res sc7280_mss = {
   2074	.hexagon_mba_image = "mba.mbn",
   2075	.proxy_clk_names = (char*[]){
   2076		"xo",
   2077		"pka",
   2078		NULL
   2079	},
   2080	.active_clk_names = (char*[]){
   2081		"iface",
   2082		"offline",
   2083		"snoc_axi",
   2084		NULL
   2085	},
   2086	.proxy_pd_names = (char*[]){
   2087		"cx",
   2088		"mss",
   2089		NULL
   2090	},
   2091	.need_mem_protection = true,
   2092	.has_alt_reset = false,
   2093	.has_mba_logs = true,
   2094	.has_spare_reg = false,
   2095	.has_qaccept_regs = true,
   2096	.has_ext_cntl_regs = true,
   2097	.has_vq6 = true,
   2098	.version = MSS_SC7280,
   2099};
   2100
   2101static const struct rproc_hexagon_res sdm845_mss = {
   2102	.hexagon_mba_image = "mba.mbn",
   2103	.proxy_clk_names = (char*[]){
   2104			"xo",
   2105			"prng",
   2106			NULL
   2107	},
   2108	.reset_clk_names = (char*[]){
   2109			"iface",
   2110			"snoc_axi",
   2111			NULL
   2112	},
   2113	.active_clk_names = (char*[]){
   2114			"bus",
   2115			"mem",
   2116			"gpll0_mss",
   2117			"mnoc_axi",
   2118			NULL
   2119	},
   2120	.proxy_pd_names = (char*[]){
   2121			"cx",
   2122			"mx",
   2123			"mss",
   2124			NULL
   2125	},
   2126	.need_mem_protection = true,
   2127	.has_alt_reset = true,
   2128	.has_mba_logs = false,
   2129	.has_spare_reg = false,
   2130	.has_qaccept_regs = false,
   2131	.has_ext_cntl_regs = false,
   2132	.has_vq6 = false,
   2133	.version = MSS_SDM845,
   2134};
   2135
   2136static const struct rproc_hexagon_res msm8998_mss = {
   2137	.hexagon_mba_image = "mba.mbn",
   2138	.proxy_clk_names = (char*[]){
   2139			"xo",
   2140			"qdss",
   2141			"mem",
   2142			NULL
   2143	},
   2144	.active_clk_names = (char*[]){
   2145			"iface",
   2146			"bus",
   2147			"gpll0_mss",
   2148			"mnoc_axi",
   2149			"snoc_axi",
   2150			NULL
   2151	},
   2152	.proxy_pd_names = (char*[]){
   2153			"cx",
   2154			"mx",
   2155			NULL
   2156	},
   2157	.need_mem_protection = true,
   2158	.has_alt_reset = false,
   2159	.has_mba_logs = false,
   2160	.has_spare_reg = false,
   2161	.has_qaccept_regs = false,
   2162	.has_ext_cntl_regs = false,
   2163	.has_vq6 = false,
   2164	.version = MSS_MSM8998,
   2165};
   2166
   2167static const struct rproc_hexagon_res msm8996_mss = {
   2168	.hexagon_mba_image = "mba.mbn",
   2169	.proxy_supply = (struct qcom_mss_reg_res[]) {
   2170		{
   2171			.supply = "pll",
   2172			.uA = 100000,
   2173		},
   2174		{}
   2175	},
   2176	.proxy_clk_names = (char*[]){
   2177			"xo",
   2178			"pnoc",
   2179			"qdss",
   2180			NULL
   2181	},
   2182	.active_clk_names = (char*[]){
   2183			"iface",
   2184			"bus",
   2185			"mem",
   2186			"gpll0_mss",
   2187			"snoc_axi",
   2188			"mnoc_axi",
   2189			NULL
   2190	},
   2191	.need_mem_protection = true,
   2192	.has_alt_reset = false,
   2193	.has_mba_logs = false,
   2194	.has_spare_reg = false,
   2195	.has_qaccept_regs = false,
   2196	.has_ext_cntl_regs = false,
   2197	.has_vq6 = false,
   2198	.version = MSS_MSM8996,
   2199};
   2200
   2201static const struct rproc_hexagon_res msm8916_mss = {
   2202	.hexagon_mba_image = "mba.mbn",
   2203	.proxy_supply = (struct qcom_mss_reg_res[]) {
   2204		{
   2205			.supply = "pll",
   2206			.uA = 100000,
   2207		},
   2208		{}
   2209	},
   2210	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
   2211		{
   2212			.supply = "mx",
   2213			.uV = 1050000,
   2214		},
   2215		{
   2216			.supply = "cx",
   2217			.uA = 100000,
   2218		},
   2219		{}
   2220	},
   2221	.proxy_clk_names = (char*[]){
   2222		"xo",
   2223		NULL
   2224	},
   2225	.active_clk_names = (char*[]){
   2226		"iface",
   2227		"bus",
   2228		"mem",
   2229		NULL
   2230	},
   2231	.proxy_pd_names = (char*[]){
   2232		"mx",
   2233		"cx",
   2234		NULL
   2235	},
   2236	.need_mem_protection = false,
   2237	.has_alt_reset = false,
   2238	.has_mba_logs = false,
   2239	.has_spare_reg = false,
   2240	.has_qaccept_regs = false,
   2241	.has_ext_cntl_regs = false,
   2242	.has_vq6 = false,
   2243	.version = MSS_MSM8916,
   2244};
   2245
   2246static const struct rproc_hexagon_res msm8974_mss = {
   2247	.hexagon_mba_image = "mba.b00",
   2248	.proxy_supply = (struct qcom_mss_reg_res[]) {
   2249		{
   2250			.supply = "pll",
   2251			.uA = 100000,
   2252		},
   2253		{}
   2254	},
   2255	.fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
   2256		{
   2257			.supply = "mx",
   2258			.uV = 1050000,
   2259		},
   2260		{
   2261			.supply = "cx",
   2262			.uA = 100000,
   2263		},
   2264		{}
   2265	},
   2266	.active_supply = (struct qcom_mss_reg_res[]) {
   2267		{
   2268			.supply = "mss",
   2269			.uV = 1050000,
   2270			.uA = 100000,
   2271		},
   2272		{}
   2273	},
   2274	.proxy_clk_names = (char*[]){
   2275		"xo",
   2276		NULL
   2277	},
   2278	.active_clk_names = (char*[]){
   2279		"iface",
   2280		"bus",
   2281		"mem",
   2282		NULL
   2283	},
   2284	.proxy_pd_names = (char*[]){
   2285		"mx",
   2286		"cx",
   2287		NULL
   2288	},
   2289	.need_mem_protection = false,
   2290	.has_alt_reset = false,
   2291	.has_mba_logs = false,
   2292	.has_spare_reg = false,
   2293	.has_qaccept_regs = false,
   2294	.has_ext_cntl_regs = false,
   2295	.has_vq6 = false,
   2296	.version = MSS_MSM8974,
   2297};
   2298
   2299static const struct of_device_id q6v5_of_match[] = {
   2300	{ .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
   2301	{ .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
   2302	{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
   2303	{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
   2304	{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
   2305	{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
   2306	{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
   2307	{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
   2308	{ },
   2309};
   2310MODULE_DEVICE_TABLE(of, q6v5_of_match);
   2311
   2312static struct platform_driver q6v5_driver = {
   2313	.probe = q6v5_probe,
   2314	.remove = q6v5_remove,
   2315	.driver = {
   2316		.name = "qcom-q6v5-mss",
   2317		.of_match_table = q6v5_of_match,
   2318	},
   2319};
   2320module_platform_driver(q6v5_driver);
   2321
   2322MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
   2323MODULE_LICENSE("GPL v2");