cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

init.c (37520B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
      4 *
      5 */
      6
      7#include <linux/bitfield.h>
      8#include <linux/debugfs.h>
      9#include <linux/device.h>
     10#include <linux/dma-direction.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/idr.h>
     13#include <linux/interrupt.h>
     14#include <linux/list.h>
     15#include <linux/mhi.h>
     16#include <linux/mod_devicetable.h>
     17#include <linux/module.h>
     18#include <linux/slab.h>
     19#include <linux/vmalloc.h>
     20#include <linux/wait.h>
     21#include "internal.h"
     22
     23static DEFINE_IDA(mhi_controller_ida);
     24
     25const char * const mhi_ee_str[MHI_EE_MAX] = {
     26	[MHI_EE_PBL] = "PRIMARY BOOTLOADER",
     27	[MHI_EE_SBL] = "SECONDARY BOOTLOADER",
     28	[MHI_EE_AMSS] = "MISSION MODE",
     29	[MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
     30	[MHI_EE_WFW] = "WLAN FIRMWARE",
     31	[MHI_EE_PTHRU] = "PASS THROUGH",
     32	[MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
     33	[MHI_EE_FP] = "FLASH PROGRAMMER",
     34	[MHI_EE_DISABLE_TRANSITION] = "DISABLE",
     35	[MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
     36};
     37
     38const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
     39	[DEV_ST_TRANSITION_PBL] = "PBL",
     40	[DEV_ST_TRANSITION_READY] = "READY",
     41	[DEV_ST_TRANSITION_SBL] = "SBL",
     42	[DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
     43	[DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
     44	[DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
     45	[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
     46};
     47
     48const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
     49	[MHI_CH_STATE_TYPE_RESET] = "RESET",
     50	[MHI_CH_STATE_TYPE_STOP] = "STOP",
     51	[MHI_CH_STATE_TYPE_START] = "START",
     52};
     53
     54static const char * const mhi_pm_state_str[] = {
     55	[MHI_PM_STATE_DISABLE] = "DISABLE",
     56	[MHI_PM_STATE_POR] = "POWER ON RESET",
     57	[MHI_PM_STATE_M0] = "M0",
     58	[MHI_PM_STATE_M2] = "M2",
     59	[MHI_PM_STATE_M3_ENTER] = "M?->M3",
     60	[MHI_PM_STATE_M3] = "M3",
     61	[MHI_PM_STATE_M3_EXIT] = "M3->M0",
     62	[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
     63	[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
     64	[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
     65	[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
     66	[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
     67};
     68
     69const char *to_mhi_pm_state_str(u32 state)
     70{
     71	int index;
     72
     73	if (state)
     74		index = __fls(state);
     75
     76	if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
     77		return "Invalid State";
     78
     79	return mhi_pm_state_str[index];
     80}
     81
     82static ssize_t serial_number_show(struct device *dev,
     83				  struct device_attribute *attr,
     84				  char *buf)
     85{
     86	struct mhi_device *mhi_dev = to_mhi_device(dev);
     87	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
     88
     89	return sysfs_emit(buf, "Serial Number: %u\n",
     90			mhi_cntrl->serial_number);
     91}
     92static DEVICE_ATTR_RO(serial_number);
     93
     94static ssize_t oem_pk_hash_show(struct device *dev,
     95				struct device_attribute *attr,
     96				char *buf)
     97{
     98	struct mhi_device *mhi_dev = to_mhi_device(dev);
     99	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
    100	int i, cnt = 0;
    101
    102	for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
    103		cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
    104				i, mhi_cntrl->oem_pk_hash[i]);
    105
    106	return cnt;
    107}
    108static DEVICE_ATTR_RO(oem_pk_hash);
    109
    110static ssize_t soc_reset_store(struct device *dev,
    111			       struct device_attribute *attr,
    112			       const char *buf,
    113			       size_t count)
    114{
    115	struct mhi_device *mhi_dev = to_mhi_device(dev);
    116	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
    117
    118	mhi_soc_reset(mhi_cntrl);
    119	return count;
    120}
    121static DEVICE_ATTR_WO(soc_reset);
    122
    123static struct attribute *mhi_dev_attrs[] = {
    124	&dev_attr_serial_number.attr,
    125	&dev_attr_oem_pk_hash.attr,
    126	&dev_attr_soc_reset.attr,
    127	NULL,
    128};
    129ATTRIBUTE_GROUPS(mhi_dev);
    130
    131/* MHI protocol requires the transfer ring to be aligned with ring length */
    132static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
    133				  struct mhi_ring *ring,
    134				  u64 len)
    135{
    136	ring->alloc_size = len + (len - 1);
    137	ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
    138					       &ring->dma_handle, GFP_KERNEL);
    139	if (!ring->pre_aligned)
    140		return -ENOMEM;
    141
    142	ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
    143	ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
    144
    145	return 0;
    146}
    147
    148void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
    149{
    150	int i;
    151	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
    152
    153	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    154		if (mhi_event->offload_ev)
    155			continue;
    156
    157		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
    158	}
    159
    160	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
    161}
    162
    163int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
    164{
    165	struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
    166	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    167	unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
    168	int i, ret;
    169
    170	/* if controller driver has set irq_flags, use it */
    171	if (mhi_cntrl->irq_flags)
    172		irq_flags = mhi_cntrl->irq_flags;
    173
    174	/* Setup BHI_INTVEC IRQ */
    175	ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
    176				   mhi_intvec_threaded_handler,
    177				   irq_flags,
    178				   "bhi", mhi_cntrl);
    179	if (ret)
    180		return ret;
    181
    182	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    183		if (mhi_event->offload_ev)
    184			continue;
    185
    186		if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
    187			dev_err(dev, "irq %d not available for event ring\n",
    188				mhi_event->irq);
    189			ret = -EINVAL;
    190			goto error_request;
    191		}
    192
    193		ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
    194				  mhi_irq_handler,
    195				  irq_flags,
    196				  "mhi", mhi_event);
    197		if (ret) {
    198			dev_err(dev, "Error requesting irq:%d for ev:%d\n",
    199				mhi_cntrl->irq[mhi_event->irq], i);
    200			goto error_request;
    201		}
    202	}
    203
    204	return 0;
    205
    206error_request:
    207	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
    208		if (mhi_event->offload_ev)
    209			continue;
    210
    211		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
    212	}
    213	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
    214
    215	return ret;
    216}
    217
    218void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
    219{
    220	int i;
    221	struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
    222	struct mhi_cmd *mhi_cmd;
    223	struct mhi_event *mhi_event;
    224	struct mhi_ring *ring;
    225
    226	mhi_cmd = mhi_cntrl->mhi_cmd;
    227	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
    228		ring = &mhi_cmd->ring;
    229		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
    230				  ring->pre_aligned, ring->dma_handle);
    231		ring->base = NULL;
    232		ring->iommu_base = 0;
    233	}
    234
    235	dma_free_coherent(mhi_cntrl->cntrl_dev,
    236			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
    237			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
    238
    239	mhi_event = mhi_cntrl->mhi_event;
    240	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    241		if (mhi_event->offload_ev)
    242			continue;
    243
    244		ring = &mhi_event->ring;
    245		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
    246				  ring->pre_aligned, ring->dma_handle);
    247		ring->base = NULL;
    248		ring->iommu_base = 0;
    249	}
    250
    251	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
    252			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
    253			  mhi_ctxt->er_ctxt_addr);
    254
    255	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
    256			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
    257			  mhi_ctxt->chan_ctxt_addr);
    258
    259	kfree(mhi_ctxt);
    260	mhi_cntrl->mhi_ctxt = NULL;
    261}
    262
    263int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
    264{
    265	struct mhi_ctxt *mhi_ctxt;
    266	struct mhi_chan_ctxt *chan_ctxt;
    267	struct mhi_event_ctxt *er_ctxt;
    268	struct mhi_cmd_ctxt *cmd_ctxt;
    269	struct mhi_chan *mhi_chan;
    270	struct mhi_event *mhi_event;
    271	struct mhi_cmd *mhi_cmd;
    272	u32 tmp;
    273	int ret = -ENOMEM, i;
    274
    275	atomic_set(&mhi_cntrl->dev_wake, 0);
    276	atomic_set(&mhi_cntrl->pending_pkts, 0);
    277
    278	mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
    279	if (!mhi_ctxt)
    280		return -ENOMEM;
    281
    282	/* Setup channel ctxt */
    283	mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
    284						 sizeof(*mhi_ctxt->chan_ctxt) *
    285						 mhi_cntrl->max_chan,
    286						 &mhi_ctxt->chan_ctxt_addr,
    287						 GFP_KERNEL);
    288	if (!mhi_ctxt->chan_ctxt)
    289		goto error_alloc_chan_ctxt;
    290
    291	mhi_chan = mhi_cntrl->mhi_chan;
    292	chan_ctxt = mhi_ctxt->chan_ctxt;
    293	for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
    294		/* Skip if it is an offload channel */
    295		if (mhi_chan->offload_ch)
    296			continue;
    297
    298		tmp = le32_to_cpu(chan_ctxt->chcfg);
    299		tmp &= ~CHAN_CTX_CHSTATE_MASK;
    300		tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
    301		tmp &= ~CHAN_CTX_BRSTMODE_MASK;
    302		tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
    303		tmp &= ~CHAN_CTX_POLLCFG_MASK;
    304		tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
    305		chan_ctxt->chcfg = cpu_to_le32(tmp);
    306
    307		chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
    308		chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
    309
    310		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
    311		mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
    312	}
    313
    314	/* Setup event context */
    315	mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
    316					       sizeof(*mhi_ctxt->er_ctxt) *
    317					       mhi_cntrl->total_ev_rings,
    318					       &mhi_ctxt->er_ctxt_addr,
    319					       GFP_KERNEL);
    320	if (!mhi_ctxt->er_ctxt)
    321		goto error_alloc_er_ctxt;
    322
    323	er_ctxt = mhi_ctxt->er_ctxt;
    324	mhi_event = mhi_cntrl->mhi_event;
    325	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
    326		     mhi_event++) {
    327		struct mhi_ring *ring = &mhi_event->ring;
    328
    329		/* Skip if it is an offload event */
    330		if (mhi_event->offload_ev)
    331			continue;
    332
    333		tmp = le32_to_cpu(er_ctxt->intmod);
    334		tmp &= ~EV_CTX_INTMODC_MASK;
    335		tmp &= ~EV_CTX_INTMODT_MASK;
    336		tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
    337		er_ctxt->intmod = cpu_to_le32(tmp);
    338
    339		er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
    340		er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
    341		mhi_event->db_cfg.db_mode = true;
    342
    343		ring->el_size = sizeof(struct mhi_ring_element);
    344		ring->len = ring->el_size * ring->elements;
    345		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
    346		if (ret)
    347			goto error_alloc_er;
    348
    349		/*
    350		 * If the read pointer equals to the write pointer, then the
    351		 * ring is empty
    352		 */
    353		ring->rp = ring->wp = ring->base;
    354		er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
    355		er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
    356		er_ctxt->rlen = cpu_to_le64(ring->len);
    357		ring->ctxt_wp = &er_ctxt->wp;
    358	}
    359
    360	/* Setup cmd context */
    361	ret = -ENOMEM;
    362	mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
    363						sizeof(*mhi_ctxt->cmd_ctxt) *
    364						NR_OF_CMD_RINGS,
    365						&mhi_ctxt->cmd_ctxt_addr,
    366						GFP_KERNEL);
    367	if (!mhi_ctxt->cmd_ctxt)
    368		goto error_alloc_er;
    369
    370	mhi_cmd = mhi_cntrl->mhi_cmd;
    371	cmd_ctxt = mhi_ctxt->cmd_ctxt;
    372	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
    373		struct mhi_ring *ring = &mhi_cmd->ring;
    374
    375		ring->el_size = sizeof(struct mhi_ring_element);
    376		ring->elements = CMD_EL_PER_RING;
    377		ring->len = ring->el_size * ring->elements;
    378		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
    379		if (ret)
    380			goto error_alloc_cmd;
    381
    382		ring->rp = ring->wp = ring->base;
    383		cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
    384		cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
    385		cmd_ctxt->rlen = cpu_to_le64(ring->len);
    386		ring->ctxt_wp = &cmd_ctxt->wp;
    387	}
    388
    389	mhi_cntrl->mhi_ctxt = mhi_ctxt;
    390
    391	return 0;
    392
    393error_alloc_cmd:
    394	for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
    395		struct mhi_ring *ring = &mhi_cmd->ring;
    396
    397		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
    398				  ring->pre_aligned, ring->dma_handle);
    399	}
    400	dma_free_coherent(mhi_cntrl->cntrl_dev,
    401			  sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
    402			  mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
    403	i = mhi_cntrl->total_ev_rings;
    404	mhi_event = mhi_cntrl->mhi_event + i;
    405
    406error_alloc_er:
    407	for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
    408		struct mhi_ring *ring = &mhi_event->ring;
    409
    410		if (mhi_event->offload_ev)
    411			continue;
    412
    413		dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
    414				  ring->pre_aligned, ring->dma_handle);
    415	}
    416	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
    417			  mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
    418			  mhi_ctxt->er_ctxt_addr);
    419
    420error_alloc_er_ctxt:
    421	dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
    422			  mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
    423			  mhi_ctxt->chan_ctxt_addr);
    424
    425error_alloc_chan_ctxt:
    426	kfree(mhi_ctxt);
    427
    428	return ret;
    429}
    430
    431int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
    432{
    433	u32 val;
    434	int i, ret;
    435	struct mhi_chan *mhi_chan;
    436	struct mhi_event *mhi_event;
    437	void __iomem *base = mhi_cntrl->regs;
    438	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    439	struct {
    440		u32 offset;
    441		u32 val;
    442	} reg_info[] = {
    443		{
    444			CCABAP_HIGHER,
    445			upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
    446		},
    447		{
    448			CCABAP_LOWER,
    449			lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
    450		},
    451		{
    452			ECABAP_HIGHER,
    453			upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
    454		},
    455		{
    456			ECABAP_LOWER,
    457			lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
    458		},
    459		{
    460			CRCBAP_HIGHER,
    461			upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
    462		},
    463		{
    464			CRCBAP_LOWER,
    465			lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
    466		},
    467		{
    468			MHICTRLBASE_HIGHER,
    469			upper_32_bits(mhi_cntrl->iova_start),
    470		},
    471		{
    472			MHICTRLBASE_LOWER,
    473			lower_32_bits(mhi_cntrl->iova_start),
    474		},
    475		{
    476			MHIDATABASE_HIGHER,
    477			upper_32_bits(mhi_cntrl->iova_start),
    478		},
    479		{
    480			MHIDATABASE_LOWER,
    481			lower_32_bits(mhi_cntrl->iova_start),
    482		},
    483		{
    484			MHICTRLLIMIT_HIGHER,
    485			upper_32_bits(mhi_cntrl->iova_stop),
    486		},
    487		{
    488			MHICTRLLIMIT_LOWER,
    489			lower_32_bits(mhi_cntrl->iova_stop),
    490		},
    491		{
    492			MHIDATALIMIT_HIGHER,
    493			upper_32_bits(mhi_cntrl->iova_stop),
    494		},
    495		{
    496			MHIDATALIMIT_LOWER,
    497			lower_32_bits(mhi_cntrl->iova_stop),
    498		},
    499		{0, 0}
    500	};
    501
    502	dev_dbg(dev, "Initializing MHI registers\n");
    503
    504	/* Read channel db offset */
    505	ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
    506	if (ret) {
    507		dev_err(dev, "Unable to read CHDBOFF register\n");
    508		return -EIO;
    509	}
    510
    511	/* Setup wake db */
    512	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
    513	mhi_cntrl->wake_set = false;
    514
    515	/* Setup channel db address for each channel in tre_ring */
    516	mhi_chan = mhi_cntrl->mhi_chan;
    517	for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
    518		mhi_chan->tre_ring.db_addr = base + val;
    519
    520	/* Read event ring db offset */
    521	ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
    522	if (ret) {
    523		dev_err(dev, "Unable to read ERDBOFF register\n");
    524		return -EIO;
    525	}
    526
    527	/* Setup event db address for each ev_ring */
    528	mhi_event = mhi_cntrl->mhi_event;
    529	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
    530		if (mhi_event->offload_ev)
    531			continue;
    532
    533		mhi_event->ring.db_addr = base + val;
    534	}
    535
    536	/* Setup DB register for primary CMD rings */
    537	mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
    538
    539	/* Write to MMIO registers */
    540	for (i = 0; reg_info[i].offset; i++)
    541		mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
    542			      reg_info[i].val);
    543
    544	ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
    545				  mhi_cntrl->total_ev_rings);
    546	if (ret) {
    547		dev_err(dev, "Unable to write MHICFG register\n");
    548		return ret;
    549	}
    550
    551	ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
    552				  mhi_cntrl->hw_ev_rings);
    553	if (ret) {
    554		dev_err(dev, "Unable to write MHICFG register\n");
    555		return ret;
    556	}
    557
    558	return 0;
    559}
    560
    561void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
    562			  struct mhi_chan *mhi_chan)
    563{
    564	struct mhi_ring *buf_ring;
    565	struct mhi_ring *tre_ring;
    566	struct mhi_chan_ctxt *chan_ctxt;
    567	u32 tmp;
    568
    569	buf_ring = &mhi_chan->buf_ring;
    570	tre_ring = &mhi_chan->tre_ring;
    571	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
    572
    573	if (!chan_ctxt->rbase) /* Already uninitialized */
    574		return;
    575
    576	dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
    577			  tre_ring->pre_aligned, tre_ring->dma_handle);
    578	vfree(buf_ring->base);
    579
    580	buf_ring->base = tre_ring->base = NULL;
    581	tre_ring->ctxt_wp = NULL;
    582	chan_ctxt->rbase = 0;
    583	chan_ctxt->rlen = 0;
    584	chan_ctxt->rp = 0;
    585	chan_ctxt->wp = 0;
    586
    587	tmp = le32_to_cpu(chan_ctxt->chcfg);
    588	tmp &= ~CHAN_CTX_CHSTATE_MASK;
    589	tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
    590	chan_ctxt->chcfg = cpu_to_le32(tmp);
    591
    592	/* Update to all cores */
    593	smp_wmb();
    594}
    595
    596int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
    597		       struct mhi_chan *mhi_chan)
    598{
    599	struct mhi_ring *buf_ring;
    600	struct mhi_ring *tre_ring;
    601	struct mhi_chan_ctxt *chan_ctxt;
    602	u32 tmp;
    603	int ret;
    604
    605	buf_ring = &mhi_chan->buf_ring;
    606	tre_ring = &mhi_chan->tre_ring;
    607	tre_ring->el_size = sizeof(struct mhi_ring_element);
    608	tre_ring->len = tre_ring->el_size * tre_ring->elements;
    609	chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
    610	ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
    611	if (ret)
    612		return -ENOMEM;
    613
    614	buf_ring->el_size = sizeof(struct mhi_buf_info);
    615	buf_ring->len = buf_ring->el_size * buf_ring->elements;
    616	buf_ring->base = vzalloc(buf_ring->len);
    617
    618	if (!buf_ring->base) {
    619		dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
    620				  tre_ring->pre_aligned, tre_ring->dma_handle);
    621		return -ENOMEM;
    622	}
    623
    624	tmp = le32_to_cpu(chan_ctxt->chcfg);
    625	tmp &= ~CHAN_CTX_CHSTATE_MASK;
    626	tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
    627	chan_ctxt->chcfg = cpu_to_le32(tmp);
    628
    629	chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
    630	chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
    631	chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
    632	tre_ring->ctxt_wp = &chan_ctxt->wp;
    633
    634	tre_ring->rp = tre_ring->wp = tre_ring->base;
    635	buf_ring->rp = buf_ring->wp = buf_ring->base;
    636	mhi_chan->db_cfg.db_mode = 1;
    637
    638	/* Update to all cores */
    639	smp_wmb();
    640
    641	return 0;
    642}
    643
    644static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
    645			const struct mhi_controller_config *config)
    646{
    647	struct mhi_event *mhi_event;
    648	const struct mhi_event_config *event_cfg;
    649	struct device *dev = mhi_cntrl->cntrl_dev;
    650	int i, num;
    651
    652	num = config->num_events;
    653	mhi_cntrl->total_ev_rings = num;
    654	mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
    655				       GFP_KERNEL);
    656	if (!mhi_cntrl->mhi_event)
    657		return -ENOMEM;
    658
    659	/* Populate event ring */
    660	mhi_event = mhi_cntrl->mhi_event;
    661	for (i = 0; i < num; i++) {
    662		event_cfg = &config->event_cfg[i];
    663
    664		mhi_event->er_index = i;
    665		mhi_event->ring.elements = event_cfg->num_elements;
    666		mhi_event->intmod = event_cfg->irq_moderation_ms;
    667		mhi_event->irq = event_cfg->irq;
    668
    669		if (event_cfg->channel != U32_MAX) {
    670			/* This event ring has a dedicated channel */
    671			mhi_event->chan = event_cfg->channel;
    672			if (mhi_event->chan >= mhi_cntrl->max_chan) {
    673				dev_err(dev,
    674					"Event Ring channel not available\n");
    675				goto error_ev_cfg;
    676			}
    677
    678			mhi_event->mhi_chan =
    679				&mhi_cntrl->mhi_chan[mhi_event->chan];
    680		}
    681
    682		/* Priority is fixed to 1 for now */
    683		mhi_event->priority = 1;
    684
    685		mhi_event->db_cfg.brstmode = event_cfg->mode;
    686		if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
    687			goto error_ev_cfg;
    688
    689		if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
    690			mhi_event->db_cfg.process_db = mhi_db_brstmode;
    691		else
    692			mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
    693
    694		mhi_event->data_type = event_cfg->data_type;
    695
    696		switch (mhi_event->data_type) {
    697		case MHI_ER_DATA:
    698			mhi_event->process_event = mhi_process_data_event_ring;
    699			break;
    700		case MHI_ER_CTRL:
    701			mhi_event->process_event = mhi_process_ctrl_ev_ring;
    702			break;
    703		default:
    704			dev_err(dev, "Event Ring type not supported\n");
    705			goto error_ev_cfg;
    706		}
    707
    708		mhi_event->hw_ring = event_cfg->hardware_event;
    709		if (mhi_event->hw_ring)
    710			mhi_cntrl->hw_ev_rings++;
    711		else
    712			mhi_cntrl->sw_ev_rings++;
    713
    714		mhi_event->cl_manage = event_cfg->client_managed;
    715		mhi_event->offload_ev = event_cfg->offload_channel;
    716		mhi_event++;
    717	}
    718
    719	return 0;
    720
    721error_ev_cfg:
    722
    723	kfree(mhi_cntrl->mhi_event);
    724	return -EINVAL;
    725}
    726
    727static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
    728			const struct mhi_controller_config *config)
    729{
    730	const struct mhi_channel_config *ch_cfg;
    731	struct device *dev = mhi_cntrl->cntrl_dev;
    732	int i;
    733	u32 chan;
    734
    735	mhi_cntrl->max_chan = config->max_channels;
    736
    737	/*
    738	 * The allocation of MHI channels can exceed 32KB in some scenarios,
    739	 * so to avoid any memory possible allocation failures, vzalloc is
    740	 * used here
    741	 */
    742	mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
    743				      sizeof(*mhi_cntrl->mhi_chan));
    744	if (!mhi_cntrl->mhi_chan)
    745		return -ENOMEM;
    746
    747	INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
    748
    749	/* Populate channel configurations */
    750	for (i = 0; i < config->num_channels; i++) {
    751		struct mhi_chan *mhi_chan;
    752
    753		ch_cfg = &config->ch_cfg[i];
    754
    755		chan = ch_cfg->num;
    756		if (chan >= mhi_cntrl->max_chan) {
    757			dev_err(dev, "Channel %d not available\n", chan);
    758			goto error_chan_cfg;
    759		}
    760
    761		mhi_chan = &mhi_cntrl->mhi_chan[chan];
    762		mhi_chan->name = ch_cfg->name;
    763		mhi_chan->chan = chan;
    764
    765		mhi_chan->tre_ring.elements = ch_cfg->num_elements;
    766		if (!mhi_chan->tre_ring.elements)
    767			goto error_chan_cfg;
    768
    769		/*
    770		 * For some channels, local ring length should be bigger than
    771		 * the transfer ring length due to internal logical channels
    772		 * in device. So host can queue much more buffers than transfer
    773		 * ring length. Example, RSC channels should have a larger local
    774		 * channel length than transfer ring length.
    775		 */
    776		mhi_chan->buf_ring.elements = ch_cfg->local_elements;
    777		if (!mhi_chan->buf_ring.elements)
    778			mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
    779		mhi_chan->er_index = ch_cfg->event_ring;
    780		mhi_chan->dir = ch_cfg->dir;
    781
    782		/*
    783		 * For most channels, chtype is identical to channel directions.
    784		 * So, if it is not defined then assign channel direction to
    785		 * chtype
    786		 */
    787		mhi_chan->type = ch_cfg->type;
    788		if (!mhi_chan->type)
    789			mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
    790
    791		mhi_chan->ee_mask = ch_cfg->ee_mask;
    792		mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
    793		mhi_chan->lpm_notify = ch_cfg->lpm_notify;
    794		mhi_chan->offload_ch = ch_cfg->offload_channel;
    795		mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
    796		mhi_chan->pre_alloc = ch_cfg->auto_queue;
    797		mhi_chan->wake_capable = ch_cfg->wake_capable;
    798
    799		/*
    800		 * If MHI host allocates buffers, then the channel direction
    801		 * should be DMA_FROM_DEVICE
    802		 */
    803		if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
    804			dev_err(dev, "Invalid channel configuration\n");
    805			goto error_chan_cfg;
    806		}
    807
    808		/*
    809		 * Bi-directional and direction less channel must be an
    810		 * offload channel
    811		 */
    812		if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
    813		     mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
    814			dev_err(dev, "Invalid channel configuration\n");
    815			goto error_chan_cfg;
    816		}
    817
    818		if (!mhi_chan->offload_ch) {
    819			mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
    820			if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
    821				dev_err(dev, "Invalid Door bell mode\n");
    822				goto error_chan_cfg;
    823			}
    824		}
    825
    826		if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
    827			mhi_chan->db_cfg.process_db = mhi_db_brstmode;
    828		else
    829			mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
    830
    831		mhi_chan->configured = true;
    832
    833		if (mhi_chan->lpm_notify)
    834			list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
    835	}
    836
    837	return 0;
    838
    839error_chan_cfg:
    840	vfree(mhi_cntrl->mhi_chan);
    841
    842	return -EINVAL;
    843}
    844
    845static int parse_config(struct mhi_controller *mhi_cntrl,
    846			const struct mhi_controller_config *config)
    847{
    848	int ret;
    849
    850	/* Parse MHI channel configuration */
    851	ret = parse_ch_cfg(mhi_cntrl, config);
    852	if (ret)
    853		return ret;
    854
    855	/* Parse MHI event configuration */
    856	ret = parse_ev_cfg(mhi_cntrl, config);
    857	if (ret)
    858		goto error_ev_cfg;
    859
    860	mhi_cntrl->timeout_ms = config->timeout_ms;
    861	if (!mhi_cntrl->timeout_ms)
    862		mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
    863
    864	mhi_cntrl->bounce_buf = config->use_bounce_buf;
    865	mhi_cntrl->buffer_len = config->buf_len;
    866	if (!mhi_cntrl->buffer_len)
    867		mhi_cntrl->buffer_len = MHI_MAX_MTU;
    868
    869	/* By default, host is allowed to ring DB in both M0 and M2 states */
    870	mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
    871	if (config->m2_no_db)
    872		mhi_cntrl->db_access &= ~MHI_PM_M2;
    873
    874	return 0;
    875
    876error_ev_cfg:
    877	vfree(mhi_cntrl->mhi_chan);
    878
    879	return ret;
    880}
    881
    882int mhi_register_controller(struct mhi_controller *mhi_cntrl,
    883			    const struct mhi_controller_config *config)
    884{
    885	struct mhi_event *mhi_event;
    886	struct mhi_chan *mhi_chan;
    887	struct mhi_cmd *mhi_cmd;
    888	struct mhi_device *mhi_dev;
    889	u32 soc_info;
    890	int ret, i;
    891
    892	if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
    893	    !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
    894	    !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
    895	    !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
    896	    !mhi_cntrl->irq || !mhi_cntrl->reg_len)
    897		return -EINVAL;
    898
    899	ret = parse_config(mhi_cntrl, config);
    900	if (ret)
    901		return -EINVAL;
    902
    903	mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
    904				     sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
    905	if (!mhi_cntrl->mhi_cmd) {
    906		ret = -ENOMEM;
    907		goto err_free_event;
    908	}
    909
    910	INIT_LIST_HEAD(&mhi_cntrl->transition_list);
    911	mutex_init(&mhi_cntrl->pm_mutex);
    912	rwlock_init(&mhi_cntrl->pm_lock);
    913	spin_lock_init(&mhi_cntrl->transition_lock);
    914	spin_lock_init(&mhi_cntrl->wlock);
    915	INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
    916	init_waitqueue_head(&mhi_cntrl->state_event);
    917
    918	mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
    919	if (!mhi_cntrl->hiprio_wq) {
    920		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
    921		ret = -ENOMEM;
    922		goto err_free_cmd;
    923	}
    924
    925	mhi_cmd = mhi_cntrl->mhi_cmd;
    926	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
    927		spin_lock_init(&mhi_cmd->lock);
    928
    929	mhi_event = mhi_cntrl->mhi_event;
    930	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    931		/* Skip for offload events */
    932		if (mhi_event->offload_ev)
    933			continue;
    934
    935		mhi_event->mhi_cntrl = mhi_cntrl;
    936		spin_lock_init(&mhi_event->lock);
    937		if (mhi_event->data_type == MHI_ER_CTRL)
    938			tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
    939				     (ulong)mhi_event);
    940		else
    941			tasklet_init(&mhi_event->task, mhi_ev_task,
    942				     (ulong)mhi_event);
    943	}
    944
    945	mhi_chan = mhi_cntrl->mhi_chan;
    946	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
    947		mutex_init(&mhi_chan->mutex);
    948		init_completion(&mhi_chan->completion);
    949		rwlock_init(&mhi_chan->lock);
    950
    951		/* used in setting bei field of TRE */
    952		mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
    953		mhi_chan->intmod = mhi_event->intmod;
    954	}
    955
    956	if (mhi_cntrl->bounce_buf) {
    957		mhi_cntrl->map_single = mhi_map_single_use_bb;
    958		mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
    959	} else {
    960		mhi_cntrl->map_single = mhi_map_single_no_bb;
    961		mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
    962	}
    963
    964	/* Read the MHI device info */
    965	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
    966			   SOC_HW_VERSION_OFFS, &soc_info);
    967	if (ret)
    968		goto err_destroy_wq;
    969
    970	mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
    971	mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
    972	mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
    973	mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
    974
    975	mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
    976	if (mhi_cntrl->index < 0) {
    977		ret = mhi_cntrl->index;
    978		goto err_destroy_wq;
    979	}
    980
    981	/* Register controller with MHI bus */
    982	mhi_dev = mhi_alloc_device(mhi_cntrl);
    983	if (IS_ERR(mhi_dev)) {
    984		dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
    985		ret = PTR_ERR(mhi_dev);
    986		goto err_ida_free;
    987	}
    988
    989	mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
    990	mhi_dev->mhi_cntrl = mhi_cntrl;
    991	dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
    992	mhi_dev->name = dev_name(&mhi_dev->dev);
    993
    994	/* Init wakeup source */
    995	device_init_wakeup(&mhi_dev->dev, true);
    996
    997	ret = device_add(&mhi_dev->dev);
    998	if (ret)
    999		goto err_release_dev;
   1000
   1001	mhi_cntrl->mhi_dev = mhi_dev;
   1002
   1003	mhi_create_debugfs(mhi_cntrl);
   1004
   1005	return 0;
   1006
   1007err_release_dev:
   1008	put_device(&mhi_dev->dev);
   1009err_ida_free:
   1010	ida_free(&mhi_controller_ida, mhi_cntrl->index);
   1011err_destroy_wq:
   1012	destroy_workqueue(mhi_cntrl->hiprio_wq);
   1013err_free_cmd:
   1014	kfree(mhi_cntrl->mhi_cmd);
   1015err_free_event:
   1016	kfree(mhi_cntrl->mhi_event);
   1017	vfree(mhi_cntrl->mhi_chan);
   1018
   1019	return ret;
   1020}
   1021EXPORT_SYMBOL_GPL(mhi_register_controller);
   1022
   1023void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
   1024{
   1025	struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
   1026	struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
   1027	unsigned int i;
   1028
   1029	mhi_destroy_debugfs(mhi_cntrl);
   1030
   1031	destroy_workqueue(mhi_cntrl->hiprio_wq);
   1032	kfree(mhi_cntrl->mhi_cmd);
   1033	kfree(mhi_cntrl->mhi_event);
   1034
   1035	/* Drop the references to MHI devices created for channels */
   1036	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
   1037		if (!mhi_chan->mhi_dev)
   1038			continue;
   1039
   1040		put_device(&mhi_chan->mhi_dev->dev);
   1041	}
   1042	vfree(mhi_cntrl->mhi_chan);
   1043
   1044	device_del(&mhi_dev->dev);
   1045	put_device(&mhi_dev->dev);
   1046
   1047	ida_free(&mhi_controller_ida, mhi_cntrl->index);
   1048}
   1049EXPORT_SYMBOL_GPL(mhi_unregister_controller);
   1050
   1051struct mhi_controller *mhi_alloc_controller(void)
   1052{
   1053	struct mhi_controller *mhi_cntrl;
   1054
   1055	mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
   1056
   1057	return mhi_cntrl;
   1058}
   1059EXPORT_SYMBOL_GPL(mhi_alloc_controller);
   1060
   1061void mhi_free_controller(struct mhi_controller *mhi_cntrl)
   1062{
   1063	kfree(mhi_cntrl);
   1064}
   1065EXPORT_SYMBOL_GPL(mhi_free_controller);
   1066
   1067int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
   1068{
   1069	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1070	u32 bhi_off, bhie_off;
   1071	int ret;
   1072
   1073	mutex_lock(&mhi_cntrl->pm_mutex);
   1074
   1075	ret = mhi_init_dev_ctxt(mhi_cntrl);
   1076	if (ret)
   1077		goto error_dev_ctxt;
   1078
   1079	ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
   1080	if (ret) {
   1081		dev_err(dev, "Error getting BHI offset\n");
   1082		goto error_reg_offset;
   1083	}
   1084
   1085	if (bhi_off >= mhi_cntrl->reg_len) {
   1086		dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
   1087			bhi_off, mhi_cntrl->reg_len);
   1088		ret = -EINVAL;
   1089		goto error_reg_offset;
   1090	}
   1091	mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
   1092
   1093	if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
   1094		ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
   1095				   &bhie_off);
   1096		if (ret) {
   1097			dev_err(dev, "Error getting BHIE offset\n");
   1098			goto error_reg_offset;
   1099		}
   1100
   1101		if (bhie_off >= mhi_cntrl->reg_len) {
   1102			dev_err(dev,
   1103				"BHIe offset: 0x%x is out of range: 0x%zx\n",
   1104				bhie_off, mhi_cntrl->reg_len);
   1105			ret = -EINVAL;
   1106			goto error_reg_offset;
   1107		}
   1108		mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
   1109	}
   1110
   1111	if (mhi_cntrl->rddm_size) {
   1112		/*
   1113		 * This controller supports RDDM, so we need to manually clear
   1114		 * BHIE RX registers since POR values are undefined.
   1115		 */
   1116		memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
   1117			  0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
   1118			  4);
   1119		/*
   1120		 * Allocate RDDM table for debugging purpose if specified
   1121		 */
   1122		mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
   1123				     mhi_cntrl->rddm_size);
   1124		if (mhi_cntrl->rddm_image) {
   1125			ret = mhi_rddm_prepare(mhi_cntrl,
   1126					       mhi_cntrl->rddm_image);
   1127			if (ret) {
   1128				mhi_free_bhie_table(mhi_cntrl,
   1129						    mhi_cntrl->rddm_image);
   1130				goto error_reg_offset;
   1131			}
   1132		}
   1133	}
   1134
   1135	mutex_unlock(&mhi_cntrl->pm_mutex);
   1136
   1137	return 0;
   1138
   1139error_reg_offset:
   1140	mhi_deinit_dev_ctxt(mhi_cntrl);
   1141
   1142error_dev_ctxt:
   1143	mutex_unlock(&mhi_cntrl->pm_mutex);
   1144
   1145	return ret;
   1146}
   1147EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
   1148
   1149void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
   1150{
   1151	if (mhi_cntrl->fbc_image) {
   1152		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
   1153		mhi_cntrl->fbc_image = NULL;
   1154	}
   1155
   1156	if (mhi_cntrl->rddm_image) {
   1157		mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
   1158		mhi_cntrl->rddm_image = NULL;
   1159	}
   1160
   1161	mhi_cntrl->bhi = NULL;
   1162	mhi_cntrl->bhie = NULL;
   1163
   1164	mhi_deinit_dev_ctxt(mhi_cntrl);
   1165}
   1166EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
   1167
   1168static void mhi_release_device(struct device *dev)
   1169{
   1170	struct mhi_device *mhi_dev = to_mhi_device(dev);
   1171
   1172	/*
   1173	 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
   1174	 * devices for the channels will only get created if the mhi_dev
   1175	 * associated with it is NULL. This scenario will happen during the
   1176	 * controller suspend and resume.
   1177	 */
   1178	if (mhi_dev->ul_chan)
   1179		mhi_dev->ul_chan->mhi_dev = NULL;
   1180
   1181	if (mhi_dev->dl_chan)
   1182		mhi_dev->dl_chan->mhi_dev = NULL;
   1183
   1184	kfree(mhi_dev);
   1185}
   1186
   1187struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
   1188{
   1189	struct mhi_device *mhi_dev;
   1190	struct device *dev;
   1191
   1192	mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
   1193	if (!mhi_dev)
   1194		return ERR_PTR(-ENOMEM);
   1195
   1196	dev = &mhi_dev->dev;
   1197	device_initialize(dev);
   1198	dev->bus = &mhi_bus_type;
   1199	dev->release = mhi_release_device;
   1200
   1201	if (mhi_cntrl->mhi_dev) {
   1202		/* for MHI client devices, parent is the MHI controller device */
   1203		dev->parent = &mhi_cntrl->mhi_dev->dev;
   1204	} else {
   1205		/* for MHI controller device, parent is the bus device (e.g. pci device) */
   1206		dev->parent = mhi_cntrl->cntrl_dev;
   1207	}
   1208
   1209	mhi_dev->mhi_cntrl = mhi_cntrl;
   1210	mhi_dev->dev_wake = 0;
   1211
   1212	return mhi_dev;
   1213}
   1214
   1215static int mhi_driver_probe(struct device *dev)
   1216{
   1217	struct mhi_device *mhi_dev = to_mhi_device(dev);
   1218	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1219	struct device_driver *drv = dev->driver;
   1220	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
   1221	struct mhi_event *mhi_event;
   1222	struct mhi_chan *ul_chan = mhi_dev->ul_chan;
   1223	struct mhi_chan *dl_chan = mhi_dev->dl_chan;
   1224	int ret;
   1225
   1226	/* Bring device out of LPM */
   1227	ret = mhi_device_get_sync(mhi_dev);
   1228	if (ret)
   1229		return ret;
   1230
   1231	ret = -EINVAL;
   1232
   1233	if (ul_chan) {
   1234		/*
   1235		 * If channel supports LPM notifications then status_cb should
   1236		 * be provided
   1237		 */
   1238		if (ul_chan->lpm_notify && !mhi_drv->status_cb)
   1239			goto exit_probe;
   1240
   1241		/* For non-offload channels then xfer_cb should be provided */
   1242		if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
   1243			goto exit_probe;
   1244
   1245		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
   1246	}
   1247
   1248	ret = -EINVAL;
   1249	if (dl_chan) {
   1250		/*
   1251		 * If channel supports LPM notifications then status_cb should
   1252		 * be provided
   1253		 */
   1254		if (dl_chan->lpm_notify && !mhi_drv->status_cb)
   1255			goto exit_probe;
   1256
   1257		/* For non-offload channels then xfer_cb should be provided */
   1258		if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
   1259			goto exit_probe;
   1260
   1261		mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
   1262
   1263		/*
   1264		 * If the channel event ring is managed by client, then
   1265		 * status_cb must be provided so that the framework can
   1266		 * notify pending data
   1267		 */
   1268		if (mhi_event->cl_manage && !mhi_drv->status_cb)
   1269			goto exit_probe;
   1270
   1271		dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
   1272	}
   1273
   1274	/* Call the user provided probe function */
   1275	ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
   1276	if (ret)
   1277		goto exit_probe;
   1278
   1279	mhi_device_put(mhi_dev);
   1280
   1281	return ret;
   1282
   1283exit_probe:
   1284	mhi_unprepare_from_transfer(mhi_dev);
   1285
   1286	mhi_device_put(mhi_dev);
   1287
   1288	return ret;
   1289}
   1290
   1291static int mhi_driver_remove(struct device *dev)
   1292{
   1293	struct mhi_device *mhi_dev = to_mhi_device(dev);
   1294	struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
   1295	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1296	struct mhi_chan *mhi_chan;
   1297	enum mhi_ch_state ch_state[] = {
   1298		MHI_CH_STATE_DISABLED,
   1299		MHI_CH_STATE_DISABLED
   1300	};
   1301	int dir;
   1302
   1303	/* Skip if it is a controller device */
   1304	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
   1305		return 0;
   1306
   1307	/* Reset both channels */
   1308	for (dir = 0; dir < 2; dir++) {
   1309		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
   1310
   1311		if (!mhi_chan)
   1312			continue;
   1313
   1314		/* Wake all threads waiting for completion */
   1315		write_lock_irq(&mhi_chan->lock);
   1316		mhi_chan->ccs = MHI_EV_CC_INVALID;
   1317		complete_all(&mhi_chan->completion);
   1318		write_unlock_irq(&mhi_chan->lock);
   1319
   1320		/* Set the channel state to disabled */
   1321		mutex_lock(&mhi_chan->mutex);
   1322		write_lock_irq(&mhi_chan->lock);
   1323		ch_state[dir] = mhi_chan->ch_state;
   1324		mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
   1325		write_unlock_irq(&mhi_chan->lock);
   1326
   1327		/* Reset the non-offload channel */
   1328		if (!mhi_chan->offload_ch)
   1329			mhi_reset_chan(mhi_cntrl, mhi_chan);
   1330
   1331		mutex_unlock(&mhi_chan->mutex);
   1332	}
   1333
   1334	mhi_drv->remove(mhi_dev);
   1335
   1336	/* De-init channel if it was enabled */
   1337	for (dir = 0; dir < 2; dir++) {
   1338		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
   1339
   1340		if (!mhi_chan)
   1341			continue;
   1342
   1343		mutex_lock(&mhi_chan->mutex);
   1344
   1345		if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
   1346		     ch_state[dir] == MHI_CH_STATE_STOP) &&
   1347		    !mhi_chan->offload_ch)
   1348			mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
   1349
   1350		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
   1351
   1352		mutex_unlock(&mhi_chan->mutex);
   1353	}
   1354
   1355	while (mhi_dev->dev_wake)
   1356		mhi_device_put(mhi_dev);
   1357
   1358	return 0;
   1359}
   1360
   1361int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
   1362{
   1363	struct device_driver *driver = &mhi_drv->driver;
   1364
   1365	if (!mhi_drv->probe || !mhi_drv->remove)
   1366		return -EINVAL;
   1367
   1368	driver->bus = &mhi_bus_type;
   1369	driver->owner = owner;
   1370	driver->probe = mhi_driver_probe;
   1371	driver->remove = mhi_driver_remove;
   1372
   1373	return driver_register(driver);
   1374}
   1375EXPORT_SYMBOL_GPL(__mhi_driver_register);
   1376
   1377void mhi_driver_unregister(struct mhi_driver *mhi_drv)
   1378{
   1379	driver_unregister(&mhi_drv->driver);
   1380}
   1381EXPORT_SYMBOL_GPL(mhi_driver_unregister);
   1382
   1383static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
   1384{
   1385	struct mhi_device *mhi_dev = to_mhi_device(dev);
   1386
   1387	return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
   1388					mhi_dev->name);
   1389}
   1390
   1391static int mhi_match(struct device *dev, struct device_driver *drv)
   1392{
   1393	struct mhi_device *mhi_dev = to_mhi_device(dev);
   1394	struct mhi_driver *mhi_drv = to_mhi_driver(drv);
   1395	const struct mhi_device_id *id;
   1396
   1397	/*
   1398	 * If the device is a controller type then there is no client driver
   1399	 * associated with it
   1400	 */
   1401	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
   1402		return 0;
   1403
   1404	for (id = mhi_drv->id_table; id->chan[0]; id++)
   1405		if (!strcmp(mhi_dev->name, id->chan)) {
   1406			mhi_dev->id = id;
   1407			return 1;
   1408		}
   1409
   1410	return 0;
   1411};
   1412
   1413struct bus_type mhi_bus_type = {
   1414	.name = "mhi",
   1415	.dev_name = "mhi",
   1416	.match = mhi_match,
   1417	.uevent = mhi_uevent,
   1418	.dev_groups = mhi_dev_groups,
   1419};
   1420
   1421static int __init mhi_init(void)
   1422{
   1423	mhi_debugfs_init();
   1424	return bus_register(&mhi_bus_type);
   1425}
   1426
   1427static void __exit mhi_exit(void)
   1428{
   1429	mhi_debugfs_exit();
   1430	bus_unregister(&mhi_bus_type);
   1431}
   1432
   1433postcore_initcall(mhi_init);
   1434module_exit(mhi_exit);
   1435
   1436MODULE_LICENSE("GPL v2");
   1437MODULE_DESCRIPTION("MHI Host Interface");