cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (43956B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
      4 *
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/device.h>
      9#include <linux/dma-direction.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/interrupt.h>
     12#include <linux/list.h>
     13#include <linux/mhi.h>
     14#include <linux/module.h>
     15#include <linux/skbuff.h>
     16#include <linux/slab.h>
     17#include "internal.h"
     18
     19int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
     20			      void __iomem *base, u32 offset, u32 *out)
     21{
     22	return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
     23}
     24
     25int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
     26				    void __iomem *base, u32 offset,
     27				    u32 mask, u32 *out)
     28{
     29	u32 tmp;
     30	int ret;
     31
     32	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
     33	if (ret)
     34		return ret;
     35
     36	*out = (tmp & mask) >> __ffs(mask);
     37
     38	return 0;
     39}
     40
     41int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl,
     42				    void __iomem *base, u32 offset,
     43				    u32 mask, u32 val, u32 delayus)
     44{
     45	int ret;
     46	u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus;
     47
     48	while (retry--) {
     49		ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out);
     50		if (ret)
     51			return ret;
     52
     53		if (out == val)
     54			return 0;
     55
     56		fsleep(delayus);
     57	}
     58
     59	return -ETIMEDOUT;
     60}
     61
     62void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
     63		   u32 offset, u32 val)
     64{
     65	mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
     66}
     67
     68int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
     69				     void __iomem *base, u32 offset, u32 mask,
     70				     u32 val)
     71{
     72	int ret;
     73	u32 tmp;
     74
     75	ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
     76	if (ret)
     77		return ret;
     78
     79	tmp &= ~mask;
     80	tmp |= (val << __ffs(mask));
     81	mhi_write_reg(mhi_cntrl, base, offset, tmp);
     82
     83	return 0;
     84}
     85
     86void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
     87		  dma_addr_t db_val)
     88{
     89	mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
     90	mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
     91}
     92
     93void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
     94		     struct db_cfg *db_cfg,
     95		     void __iomem *db_addr,
     96		     dma_addr_t db_val)
     97{
     98	if (db_cfg->db_mode) {
     99		db_cfg->db_val = db_val;
    100		mhi_write_db(mhi_cntrl, db_addr, db_val);
    101		db_cfg->db_mode = 0;
    102	}
    103}
    104
    105void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
    106			     struct db_cfg *db_cfg,
    107			     void __iomem *db_addr,
    108			     dma_addr_t db_val)
    109{
    110	db_cfg->db_val = db_val;
    111	mhi_write_db(mhi_cntrl, db_addr, db_val);
    112}
    113
    114void mhi_ring_er_db(struct mhi_event *mhi_event)
    115{
    116	struct mhi_ring *ring = &mhi_event->ring;
    117
    118	mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
    119				     ring->db_addr, le64_to_cpu(*ring->ctxt_wp));
    120}
    121
    122void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
    123{
    124	dma_addr_t db;
    125	struct mhi_ring *ring = &mhi_cmd->ring;
    126
    127	db = ring->iommu_base + (ring->wp - ring->base);
    128	*ring->ctxt_wp = cpu_to_le64(db);
    129	mhi_write_db(mhi_cntrl, ring->db_addr, db);
    130}
    131
    132void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
    133		      struct mhi_chan *mhi_chan)
    134{
    135	struct mhi_ring *ring = &mhi_chan->tre_ring;
    136	dma_addr_t db;
    137
    138	db = ring->iommu_base + (ring->wp - ring->base);
    139
    140	/*
    141	 * Writes to the new ring element must be visible to the hardware
    142	 * before letting h/w know there is new element to fetch.
    143	 */
    144	dma_wmb();
    145	*ring->ctxt_wp = cpu_to_le64(db);
    146
    147	mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
    148				    ring->db_addr, db);
    149}
    150
    151enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
    152{
    153	u32 exec;
    154	int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
    155
    156	return (ret) ? MHI_EE_MAX : exec;
    157}
    158EXPORT_SYMBOL_GPL(mhi_get_exec_env);
    159
    160enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
    161{
    162	u32 state;
    163	int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
    164				     MHISTATUS_MHISTATE_MASK, &state);
    165	return ret ? MHI_STATE_MAX : state;
    166}
    167EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
    168
    169void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
    170{
    171	if (mhi_cntrl->reset) {
    172		mhi_cntrl->reset(mhi_cntrl);
    173		return;
    174	}
    175
    176	/* Generic MHI SoC reset */
    177	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
    178		      MHI_SOC_RESET_REQ);
    179}
    180EXPORT_SYMBOL_GPL(mhi_soc_reset);
    181
    182int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
    183			 struct mhi_buf_info *buf_info)
    184{
    185	buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
    186					  buf_info->v_addr, buf_info->len,
    187					  buf_info->dir);
    188	if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
    189		return -ENOMEM;
    190
    191	return 0;
    192}
    193
    194int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
    195			  struct mhi_buf_info *buf_info)
    196{
    197	void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
    198				       &buf_info->p_addr, GFP_ATOMIC);
    199
    200	if (!buf)
    201		return -ENOMEM;
    202
    203	if (buf_info->dir == DMA_TO_DEVICE)
    204		memcpy(buf, buf_info->v_addr, buf_info->len);
    205
    206	buf_info->bb_addr = buf;
    207
    208	return 0;
    209}
    210
    211void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
    212			    struct mhi_buf_info *buf_info)
    213{
    214	dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
    215			 buf_info->dir);
    216}
    217
    218void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
    219			     struct mhi_buf_info *buf_info)
    220{
    221	if (buf_info->dir == DMA_FROM_DEVICE)
    222		memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
    223
    224	dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len,
    225			  buf_info->bb_addr, buf_info->p_addr);
    226}
    227
    228static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
    229				      struct mhi_ring *ring)
    230{
    231	int nr_el;
    232
    233	if (ring->wp < ring->rp) {
    234		nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
    235	} else {
    236		nr_el = (ring->rp - ring->base) / ring->el_size;
    237		nr_el += ((ring->base + ring->len - ring->wp) /
    238			  ring->el_size) - 1;
    239	}
    240
    241	return nr_el;
    242}
    243
    244static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
    245{
    246	return (addr - ring->iommu_base) + ring->base;
    247}
    248
    249static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
    250				 struct mhi_ring *ring)
    251{
    252	ring->wp += ring->el_size;
    253	if (ring->wp >= (ring->base + ring->len))
    254		ring->wp = ring->base;
    255	/* smp update */
    256	smp_wmb();
    257}
    258
    259static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
    260				 struct mhi_ring *ring)
    261{
    262	ring->rp += ring->el_size;
    263	if (ring->rp >= (ring->base + ring->len))
    264		ring->rp = ring->base;
    265	/* smp update */
    266	smp_wmb();
    267}
    268
    269static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
    270{
    271	return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len;
    272}
    273
    274int mhi_destroy_device(struct device *dev, void *data)
    275{
    276	struct mhi_chan *ul_chan, *dl_chan;
    277	struct mhi_device *mhi_dev;
    278	struct mhi_controller *mhi_cntrl;
    279	enum mhi_ee_type ee = MHI_EE_MAX;
    280
    281	if (dev->bus != &mhi_bus_type)
    282		return 0;
    283
    284	mhi_dev = to_mhi_device(dev);
    285	mhi_cntrl = mhi_dev->mhi_cntrl;
    286
    287	/* Only destroy virtual devices thats attached to bus */
    288	if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
    289		return 0;
    290
    291	ul_chan = mhi_dev->ul_chan;
    292	dl_chan = mhi_dev->dl_chan;
    293
    294	/*
    295	 * If execution environment is specified, remove only those devices that
    296	 * started in them based on ee_mask for the channels as we move on to a
    297	 * different execution environment
    298	 */
    299	if (data)
    300		ee = *(enum mhi_ee_type *)data;
    301
    302	/*
    303	 * For the suspend and resume case, this function will get called
    304	 * without mhi_unregister_controller(). Hence, we need to drop the
    305	 * references to mhi_dev created for ul and dl channels. We can
    306	 * be sure that there will be no instances of mhi_dev left after
    307	 * this.
    308	 */
    309	if (ul_chan) {
    310		if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
    311			return 0;
    312
    313		put_device(&ul_chan->mhi_dev->dev);
    314	}
    315
    316	if (dl_chan) {
    317		if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
    318			return 0;
    319
    320		put_device(&dl_chan->mhi_dev->dev);
    321	}
    322
    323	dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
    324		 mhi_dev->name);
    325
    326	/* Notify the client and remove the device from MHI bus */
    327	device_del(dev);
    328	put_device(dev);
    329
    330	return 0;
    331}
    332
    333int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
    334				enum dma_data_direction dir)
    335{
    336	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
    337	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
    338		mhi_dev->ul_chan : mhi_dev->dl_chan;
    339	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
    340
    341	return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
    342}
    343EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
    344
    345void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
    346{
    347	struct mhi_driver *mhi_drv;
    348
    349	if (!mhi_dev->dev.driver)
    350		return;
    351
    352	mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
    353
    354	if (mhi_drv->status_cb)
    355		mhi_drv->status_cb(mhi_dev, cb_reason);
    356}
    357EXPORT_SYMBOL_GPL(mhi_notify);
    358
    359/* Bind MHI channels to MHI devices */
    360void mhi_create_devices(struct mhi_controller *mhi_cntrl)
    361{
    362	struct mhi_chan *mhi_chan;
    363	struct mhi_device *mhi_dev;
    364	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    365	int i, ret;
    366
    367	mhi_chan = mhi_cntrl->mhi_chan;
    368	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
    369		if (!mhi_chan->configured || mhi_chan->mhi_dev ||
    370		    !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
    371			continue;
    372		mhi_dev = mhi_alloc_device(mhi_cntrl);
    373		if (IS_ERR(mhi_dev))
    374			return;
    375
    376		mhi_dev->dev_type = MHI_DEVICE_XFER;
    377		switch (mhi_chan->dir) {
    378		case DMA_TO_DEVICE:
    379			mhi_dev->ul_chan = mhi_chan;
    380			mhi_dev->ul_chan_id = mhi_chan->chan;
    381			break;
    382		case DMA_FROM_DEVICE:
    383			/* We use dl_chan as offload channels */
    384			mhi_dev->dl_chan = mhi_chan;
    385			mhi_dev->dl_chan_id = mhi_chan->chan;
    386			break;
    387		default:
    388			dev_err(dev, "Direction not supported\n");
    389			put_device(&mhi_dev->dev);
    390			return;
    391		}
    392
    393		get_device(&mhi_dev->dev);
    394		mhi_chan->mhi_dev = mhi_dev;
    395
    396		/* Check next channel if it matches */
    397		if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
    398			if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
    399				i++;
    400				mhi_chan++;
    401				if (mhi_chan->dir == DMA_TO_DEVICE) {
    402					mhi_dev->ul_chan = mhi_chan;
    403					mhi_dev->ul_chan_id = mhi_chan->chan;
    404				} else {
    405					mhi_dev->dl_chan = mhi_chan;
    406					mhi_dev->dl_chan_id = mhi_chan->chan;
    407				}
    408				get_device(&mhi_dev->dev);
    409				mhi_chan->mhi_dev = mhi_dev;
    410			}
    411		}
    412
    413		/* Channel name is same for both UL and DL */
    414		mhi_dev->name = mhi_chan->name;
    415		dev_set_name(&mhi_dev->dev, "%s_%s",
    416			     dev_name(&mhi_cntrl->mhi_dev->dev),
    417			     mhi_dev->name);
    418
    419		/* Init wakeup source if available */
    420		if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
    421			device_init_wakeup(&mhi_dev->dev, true);
    422
    423		ret = device_add(&mhi_dev->dev);
    424		if (ret)
    425			put_device(&mhi_dev->dev);
    426	}
    427}
    428
    429irqreturn_t mhi_irq_handler(int irq_number, void *dev)
    430{
    431	struct mhi_event *mhi_event = dev;
    432	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
    433	struct mhi_event_ctxt *er_ctxt =
    434		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
    435	struct mhi_ring *ev_ring = &mhi_event->ring;
    436	dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
    437	void *dev_rp;
    438
    439	if (!is_valid_ring_ptr(ev_ring, ptr)) {
    440		dev_err(&mhi_cntrl->mhi_dev->dev,
    441			"Event ring rp points outside of the event ring\n");
    442		return IRQ_HANDLED;
    443	}
    444
    445	dev_rp = mhi_to_virtual(ev_ring, ptr);
    446
    447	/* Only proceed if event ring has pending events */
    448	if (ev_ring->rp == dev_rp)
    449		return IRQ_HANDLED;
    450
    451	/* For client managed event ring, notify pending data */
    452	if (mhi_event->cl_manage) {
    453		struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
    454		struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
    455
    456		if (mhi_dev)
    457			mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
    458	} else {
    459		tasklet_schedule(&mhi_event->task);
    460	}
    461
    462	return IRQ_HANDLED;
    463}
    464
    465irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
    466{
    467	struct mhi_controller *mhi_cntrl = priv;
    468	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    469	enum mhi_state state;
    470	enum mhi_pm_state pm_state = 0;
    471	enum mhi_ee_type ee;
    472
    473	write_lock_irq(&mhi_cntrl->pm_lock);
    474	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
    475		write_unlock_irq(&mhi_cntrl->pm_lock);
    476		goto exit_intvec;
    477	}
    478
    479	state = mhi_get_mhi_state(mhi_cntrl);
    480	ee = mhi_get_exec_env(mhi_cntrl);
    481	dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
    482		TO_MHI_EXEC_STR(mhi_cntrl->ee),
    483		mhi_state_str(mhi_cntrl->dev_state),
    484		TO_MHI_EXEC_STR(ee), mhi_state_str(state));
    485
    486	if (state == MHI_STATE_SYS_ERR) {
    487		dev_dbg(dev, "System error detected\n");
    488		pm_state = mhi_tryset_pm_state(mhi_cntrl,
    489					       MHI_PM_SYS_ERR_DETECT);
    490	}
    491	write_unlock_irq(&mhi_cntrl->pm_lock);
    492
    493	if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
    494		goto exit_intvec;
    495
    496	switch (ee) {
    497	case MHI_EE_RDDM:
    498		/* proceed if power down is not already in progress */
    499		if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
    500			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
    501			mhi_cntrl->ee = ee;
    502			wake_up_all(&mhi_cntrl->state_event);
    503		}
    504		break;
    505	case MHI_EE_PBL:
    506	case MHI_EE_EDL:
    507	case MHI_EE_PTHRU:
    508		mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
    509		mhi_cntrl->ee = ee;
    510		wake_up_all(&mhi_cntrl->state_event);
    511		mhi_pm_sys_err_handler(mhi_cntrl);
    512		break;
    513	default:
    514		wake_up_all(&mhi_cntrl->state_event);
    515		mhi_pm_sys_err_handler(mhi_cntrl);
    516		break;
    517	}
    518
    519exit_intvec:
    520
    521	return IRQ_HANDLED;
    522}
    523
    524irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
    525{
    526	struct mhi_controller *mhi_cntrl = dev;
    527
    528	/* Wake up events waiting for state change */
    529	wake_up_all(&mhi_cntrl->state_event);
    530
    531	return IRQ_WAKE_THREAD;
    532}
    533
    534static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
    535					struct mhi_ring *ring)
    536{
    537	/* Update the WP */
    538	ring->wp += ring->el_size;
    539
    540	if (ring->wp >= (ring->base + ring->len))
    541		ring->wp = ring->base;
    542
    543	*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base));
    544
    545	/* Update the RP */
    546	ring->rp += ring->el_size;
    547	if (ring->rp >= (ring->base + ring->len))
    548		ring->rp = ring->base;
    549
    550	/* Update to all cores */
    551	smp_wmb();
    552}
    553
    554static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
    555			    struct mhi_ring_element *event,
    556			    struct mhi_chan *mhi_chan)
    557{
    558	struct mhi_ring *buf_ring, *tre_ring;
    559	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    560	struct mhi_result result;
    561	unsigned long flags = 0;
    562	u32 ev_code;
    563
    564	ev_code = MHI_TRE_GET_EV_CODE(event);
    565	buf_ring = &mhi_chan->buf_ring;
    566	tre_ring = &mhi_chan->tre_ring;
    567
    568	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
    569		-EOVERFLOW : 0;
    570
    571	/*
    572	 * If it's a DB Event then we need to grab the lock
    573	 * with preemption disabled and as a write because we
    574	 * have to update db register and there are chances that
    575	 * another thread could be doing the same.
    576	 */
    577	if (ev_code >= MHI_EV_CC_OOB)
    578		write_lock_irqsave(&mhi_chan->lock, flags);
    579	else
    580		read_lock_bh(&mhi_chan->lock);
    581
    582	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
    583		goto end_process_tx_event;
    584
    585	switch (ev_code) {
    586	case MHI_EV_CC_OVERFLOW:
    587	case MHI_EV_CC_EOB:
    588	case MHI_EV_CC_EOT:
    589	{
    590		dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
    591		struct mhi_ring_element *local_rp, *ev_tre;
    592		void *dev_rp;
    593		struct mhi_buf_info *buf_info;
    594		u16 xfer_len;
    595
    596		if (!is_valid_ring_ptr(tre_ring, ptr)) {
    597			dev_err(&mhi_cntrl->mhi_dev->dev,
    598				"Event element points outside of the tre ring\n");
    599			break;
    600		}
    601		/* Get the TRB this event points to */
    602		ev_tre = mhi_to_virtual(tre_ring, ptr);
    603
    604		dev_rp = ev_tre + 1;
    605		if (dev_rp >= (tre_ring->base + tre_ring->len))
    606			dev_rp = tre_ring->base;
    607
    608		result.dir = mhi_chan->dir;
    609
    610		local_rp = tre_ring->rp;
    611		while (local_rp != dev_rp) {
    612			buf_info = buf_ring->rp;
    613			/* If it's the last TRE, get length from the event */
    614			if (local_rp == ev_tre)
    615				xfer_len = MHI_TRE_GET_EV_LEN(event);
    616			else
    617				xfer_len = buf_info->len;
    618
    619			/* Unmap if it's not pre-mapped by client */
    620			if (likely(!buf_info->pre_mapped))
    621				mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
    622
    623			result.buf_addr = buf_info->cb_buf;
    624
    625			/* truncate to buf len if xfer_len is larger */
    626			result.bytes_xferd =
    627				min_t(u16, xfer_len, buf_info->len);
    628			mhi_del_ring_element(mhi_cntrl, buf_ring);
    629			mhi_del_ring_element(mhi_cntrl, tre_ring);
    630			local_rp = tre_ring->rp;
    631
    632			/* notify client */
    633			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
    634
    635			if (mhi_chan->dir == DMA_TO_DEVICE) {
    636				atomic_dec(&mhi_cntrl->pending_pkts);
    637				/* Release the reference got from mhi_queue() */
    638				mhi_cntrl->runtime_put(mhi_cntrl);
    639			}
    640
    641			/*
    642			 * Recycle the buffer if buffer is pre-allocated,
    643			 * if there is an error, not much we can do apart
    644			 * from dropping the packet
    645			 */
    646			if (mhi_chan->pre_alloc) {
    647				if (mhi_queue_buf(mhi_chan->mhi_dev,
    648						  mhi_chan->dir,
    649						  buf_info->cb_buf,
    650						  buf_info->len, MHI_EOT)) {
    651					dev_err(dev,
    652						"Error recycling buffer for chan:%d\n",
    653						mhi_chan->chan);
    654					kfree(buf_info->cb_buf);
    655				}
    656			}
    657		}
    658		break;
    659	} /* CC_EOT */
    660	case MHI_EV_CC_OOB:
    661	case MHI_EV_CC_DB_MODE:
    662	{
    663		unsigned long pm_lock_flags;
    664
    665		mhi_chan->db_cfg.db_mode = 1;
    666		read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags);
    667		if (tre_ring->wp != tre_ring->rp &&
    668		    MHI_DB_ACCESS_VALID(mhi_cntrl)) {
    669			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
    670		}
    671		read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags);
    672		break;
    673	}
    674	case MHI_EV_CC_BAD_TRE:
    675	default:
    676		dev_err(dev, "Unknown event 0x%x\n", ev_code);
    677		break;
    678	} /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
    679
    680end_process_tx_event:
    681	if (ev_code >= MHI_EV_CC_OOB)
    682		write_unlock_irqrestore(&mhi_chan->lock, flags);
    683	else
    684		read_unlock_bh(&mhi_chan->lock);
    685
    686	return 0;
    687}
    688
    689static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
    690			   struct mhi_ring_element *event,
    691			   struct mhi_chan *mhi_chan)
    692{
    693	struct mhi_ring *buf_ring, *tre_ring;
    694	struct mhi_buf_info *buf_info;
    695	struct mhi_result result;
    696	int ev_code;
    697	u32 cookie; /* offset to local descriptor */
    698	u16 xfer_len;
    699
    700	buf_ring = &mhi_chan->buf_ring;
    701	tre_ring = &mhi_chan->tre_ring;
    702
    703	ev_code = MHI_TRE_GET_EV_CODE(event);
    704	cookie = MHI_TRE_GET_EV_COOKIE(event);
    705	xfer_len = MHI_TRE_GET_EV_LEN(event);
    706
    707	/* Received out of bound cookie */
    708	WARN_ON(cookie >= buf_ring->len);
    709
    710	buf_info = buf_ring->base + cookie;
    711
    712	result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
    713		-EOVERFLOW : 0;
    714
    715	/* truncate to buf len if xfer_len is larger */
    716	result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
    717	result.buf_addr = buf_info->cb_buf;
    718	result.dir = mhi_chan->dir;
    719
    720	read_lock_bh(&mhi_chan->lock);
    721
    722	if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
    723		goto end_process_rsc_event;
    724
    725	WARN_ON(!buf_info->used);
    726
    727	/* notify the client */
    728	mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
    729
    730	/*
    731	 * Note: We're arbitrarily incrementing RP even though, completion
    732	 * packet we processed might not be the same one, reason we can do this
    733	 * is because device guaranteed to cache descriptors in order it
    734	 * receive, so even though completion event is different we can re-use
    735	 * all descriptors in between.
    736	 * Example:
    737	 * Transfer Ring has descriptors: A, B, C, D
    738	 * Last descriptor host queue is D (WP) and first descriptor
    739	 * host queue is A (RP).
    740	 * The completion event we just serviced is descriptor C.
    741	 * Then we can safely queue descriptors to replace A, B, and C
    742	 * even though host did not receive any completions.
    743	 */
    744	mhi_del_ring_element(mhi_cntrl, tre_ring);
    745	buf_info->used = false;
    746
    747end_process_rsc_event:
    748	read_unlock_bh(&mhi_chan->lock);
    749
    750	return 0;
    751}
    752
    753static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
    754				       struct mhi_ring_element *tre)
    755{
    756	dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
    757	struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
    758	struct mhi_ring *mhi_ring = &cmd_ring->ring;
    759	struct mhi_ring_element *cmd_pkt;
    760	struct mhi_chan *mhi_chan;
    761	u32 chan;
    762
    763	if (!is_valid_ring_ptr(mhi_ring, ptr)) {
    764		dev_err(&mhi_cntrl->mhi_dev->dev,
    765			"Event element points outside of the cmd ring\n");
    766		return;
    767	}
    768
    769	cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
    770
    771	chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
    772
    773	if (chan < mhi_cntrl->max_chan &&
    774	    mhi_cntrl->mhi_chan[chan].configured) {
    775		mhi_chan = &mhi_cntrl->mhi_chan[chan];
    776		write_lock_bh(&mhi_chan->lock);
    777		mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
    778		complete(&mhi_chan->completion);
    779		write_unlock_bh(&mhi_chan->lock);
    780	} else {
    781		dev_err(&mhi_cntrl->mhi_dev->dev,
    782			"Completion packet for invalid channel ID: %d\n", chan);
    783	}
    784
    785	mhi_del_ring_element(mhi_cntrl, mhi_ring);
    786}
    787
    788int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
    789			     struct mhi_event *mhi_event,
    790			     u32 event_quota)
    791{
    792	struct mhi_ring_element *dev_rp, *local_rp;
    793	struct mhi_ring *ev_ring = &mhi_event->ring;
    794	struct mhi_event_ctxt *er_ctxt =
    795		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
    796	struct mhi_chan *mhi_chan;
    797	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    798	u32 chan;
    799	int count = 0;
    800	dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
    801
    802	/*
    803	 * This is a quick check to avoid unnecessary event processing
    804	 * in case MHI is already in error state, but it's still possible
    805	 * to transition to error state while processing events
    806	 */
    807	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
    808		return -EIO;
    809
    810	if (!is_valid_ring_ptr(ev_ring, ptr)) {
    811		dev_err(&mhi_cntrl->mhi_dev->dev,
    812			"Event ring rp points outside of the event ring\n");
    813		return -EIO;
    814	}
    815
    816	dev_rp = mhi_to_virtual(ev_ring, ptr);
    817	local_rp = ev_ring->rp;
    818
    819	while (dev_rp != local_rp) {
    820		enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
    821
    822		switch (type) {
    823		case MHI_PKT_TYPE_BW_REQ_EVENT:
    824		{
    825			struct mhi_link_info *link_info;
    826
    827			link_info = &mhi_cntrl->mhi_link_info;
    828			write_lock_irq(&mhi_cntrl->pm_lock);
    829			link_info->target_link_speed =
    830				MHI_TRE_GET_EV_LINKSPEED(local_rp);
    831			link_info->target_link_width =
    832				MHI_TRE_GET_EV_LINKWIDTH(local_rp);
    833			write_unlock_irq(&mhi_cntrl->pm_lock);
    834			dev_dbg(dev, "Received BW_REQ event\n");
    835			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
    836			break;
    837		}
    838		case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
    839		{
    840			enum mhi_state new_state;
    841
    842			new_state = MHI_TRE_GET_EV_STATE(local_rp);
    843
    844			dev_dbg(dev, "State change event to state: %s\n",
    845				mhi_state_str(new_state));
    846
    847			switch (new_state) {
    848			case MHI_STATE_M0:
    849				mhi_pm_m0_transition(mhi_cntrl);
    850				break;
    851			case MHI_STATE_M1:
    852				mhi_pm_m1_transition(mhi_cntrl);
    853				break;
    854			case MHI_STATE_M3:
    855				mhi_pm_m3_transition(mhi_cntrl);
    856				break;
    857			case MHI_STATE_SYS_ERR:
    858			{
    859				enum mhi_pm_state pm_state;
    860
    861				dev_dbg(dev, "System error detected\n");
    862				write_lock_irq(&mhi_cntrl->pm_lock);
    863				pm_state = mhi_tryset_pm_state(mhi_cntrl,
    864							MHI_PM_SYS_ERR_DETECT);
    865				write_unlock_irq(&mhi_cntrl->pm_lock);
    866				if (pm_state == MHI_PM_SYS_ERR_DETECT)
    867					mhi_pm_sys_err_handler(mhi_cntrl);
    868				break;
    869			}
    870			default:
    871				dev_err(dev, "Invalid state: %s\n",
    872					mhi_state_str(new_state));
    873			}
    874
    875			break;
    876		}
    877		case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
    878			mhi_process_cmd_completion(mhi_cntrl, local_rp);
    879			break;
    880		case MHI_PKT_TYPE_EE_EVENT:
    881		{
    882			enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
    883			enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
    884
    885			dev_dbg(dev, "Received EE event: %s\n",
    886				TO_MHI_EXEC_STR(event));
    887			switch (event) {
    888			case MHI_EE_SBL:
    889				st = DEV_ST_TRANSITION_SBL;
    890				break;
    891			case MHI_EE_WFW:
    892			case MHI_EE_AMSS:
    893				st = DEV_ST_TRANSITION_MISSION_MODE;
    894				break;
    895			case MHI_EE_FP:
    896				st = DEV_ST_TRANSITION_FP;
    897				break;
    898			case MHI_EE_RDDM:
    899				mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
    900				write_lock_irq(&mhi_cntrl->pm_lock);
    901				mhi_cntrl->ee = event;
    902				write_unlock_irq(&mhi_cntrl->pm_lock);
    903				wake_up_all(&mhi_cntrl->state_event);
    904				break;
    905			default:
    906				dev_err(dev,
    907					"Unhandled EE event: 0x%x\n", type);
    908			}
    909			if (st != DEV_ST_TRANSITION_MAX)
    910				mhi_queue_state_transition(mhi_cntrl, st);
    911
    912			break;
    913		}
    914		case MHI_PKT_TYPE_TX_EVENT:
    915			chan = MHI_TRE_GET_EV_CHID(local_rp);
    916
    917			WARN_ON(chan >= mhi_cntrl->max_chan);
    918
    919			/*
    920			 * Only process the event ring elements whose channel
    921			 * ID is within the maximum supported range.
    922			 */
    923			if (chan < mhi_cntrl->max_chan) {
    924				mhi_chan = &mhi_cntrl->mhi_chan[chan];
    925				if (!mhi_chan->configured)
    926					break;
    927				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
    928				event_quota--;
    929			}
    930			break;
    931		default:
    932			dev_err(dev, "Unhandled event type: %d\n", type);
    933			break;
    934		}
    935
    936		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
    937		local_rp = ev_ring->rp;
    938
    939		ptr = le64_to_cpu(er_ctxt->rp);
    940		if (!is_valid_ring_ptr(ev_ring, ptr)) {
    941			dev_err(&mhi_cntrl->mhi_dev->dev,
    942				"Event ring rp points outside of the event ring\n");
    943			return -EIO;
    944		}
    945
    946		dev_rp = mhi_to_virtual(ev_ring, ptr);
    947		count++;
    948	}
    949
    950	read_lock_bh(&mhi_cntrl->pm_lock);
    951	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
    952		mhi_ring_er_db(mhi_event);
    953	read_unlock_bh(&mhi_cntrl->pm_lock);
    954
    955	return count;
    956}
    957
    958int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
    959				struct mhi_event *mhi_event,
    960				u32 event_quota)
    961{
    962	struct mhi_ring_element *dev_rp, *local_rp;
    963	struct mhi_ring *ev_ring = &mhi_event->ring;
    964	struct mhi_event_ctxt *er_ctxt =
    965		&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
    966	int count = 0;
    967	u32 chan;
    968	struct mhi_chan *mhi_chan;
    969	dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
    970
    971	if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
    972		return -EIO;
    973
    974	if (!is_valid_ring_ptr(ev_ring, ptr)) {
    975		dev_err(&mhi_cntrl->mhi_dev->dev,
    976			"Event ring rp points outside of the event ring\n");
    977		return -EIO;
    978	}
    979
    980	dev_rp = mhi_to_virtual(ev_ring, ptr);
    981	local_rp = ev_ring->rp;
    982
    983	while (dev_rp != local_rp && event_quota > 0) {
    984		enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
    985
    986		chan = MHI_TRE_GET_EV_CHID(local_rp);
    987
    988		WARN_ON(chan >= mhi_cntrl->max_chan);
    989
    990		/*
    991		 * Only process the event ring elements whose channel
    992		 * ID is within the maximum supported range.
    993		 */
    994		if (chan < mhi_cntrl->max_chan &&
    995		    mhi_cntrl->mhi_chan[chan].configured) {
    996			mhi_chan = &mhi_cntrl->mhi_chan[chan];
    997
    998			if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
    999				parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
   1000				event_quota--;
   1001			} else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
   1002				parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
   1003				event_quota--;
   1004			}
   1005		}
   1006
   1007		mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
   1008		local_rp = ev_ring->rp;
   1009
   1010		ptr = le64_to_cpu(er_ctxt->rp);
   1011		if (!is_valid_ring_ptr(ev_ring, ptr)) {
   1012			dev_err(&mhi_cntrl->mhi_dev->dev,
   1013				"Event ring rp points outside of the event ring\n");
   1014			return -EIO;
   1015		}
   1016
   1017		dev_rp = mhi_to_virtual(ev_ring, ptr);
   1018		count++;
   1019	}
   1020	read_lock_bh(&mhi_cntrl->pm_lock);
   1021	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
   1022		mhi_ring_er_db(mhi_event);
   1023	read_unlock_bh(&mhi_cntrl->pm_lock);
   1024
   1025	return count;
   1026}
   1027
   1028void mhi_ev_task(unsigned long data)
   1029{
   1030	struct mhi_event *mhi_event = (struct mhi_event *)data;
   1031	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
   1032
   1033	/* process all pending events */
   1034	spin_lock_bh(&mhi_event->lock);
   1035	mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
   1036	spin_unlock_bh(&mhi_event->lock);
   1037}
   1038
   1039void mhi_ctrl_ev_task(unsigned long data)
   1040{
   1041	struct mhi_event *mhi_event = (struct mhi_event *)data;
   1042	struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
   1043	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1044	enum mhi_state state;
   1045	enum mhi_pm_state pm_state = 0;
   1046	int ret;
   1047
   1048	/*
   1049	 * We can check PM state w/o a lock here because there is no way
   1050	 * PM state can change from reg access valid to no access while this
   1051	 * thread being executed.
   1052	 */
   1053	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
   1054		/*
   1055		 * We may have a pending event but not allowed to
   1056		 * process it since we are probably in a suspended state,
   1057		 * so trigger a resume.
   1058		 */
   1059		mhi_trigger_resume(mhi_cntrl);
   1060
   1061		return;
   1062	}
   1063
   1064	/* Process ctrl events */
   1065	ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
   1066
   1067	/*
   1068	 * We received an IRQ but no events to process, maybe device went to
   1069	 * SYS_ERR state? Check the state to confirm.
   1070	 */
   1071	if (!ret) {
   1072		write_lock_irq(&mhi_cntrl->pm_lock);
   1073		state = mhi_get_mhi_state(mhi_cntrl);
   1074		if (state == MHI_STATE_SYS_ERR) {
   1075			dev_dbg(dev, "System error detected\n");
   1076			pm_state = mhi_tryset_pm_state(mhi_cntrl,
   1077						       MHI_PM_SYS_ERR_DETECT);
   1078		}
   1079		write_unlock_irq(&mhi_cntrl->pm_lock);
   1080		if (pm_state == MHI_PM_SYS_ERR_DETECT)
   1081			mhi_pm_sys_err_handler(mhi_cntrl);
   1082	}
   1083}
   1084
   1085static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
   1086			     struct mhi_ring *ring)
   1087{
   1088	void *tmp = ring->wp + ring->el_size;
   1089
   1090	if (tmp >= (ring->base + ring->len))
   1091		tmp = ring->base;
   1092
   1093	return (tmp == ring->rp);
   1094}
   1095
   1096static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
   1097		     enum dma_data_direction dir, enum mhi_flags mflags)
   1098{
   1099	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1100	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
   1101							     mhi_dev->dl_chan;
   1102	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
   1103	unsigned long flags;
   1104	int ret;
   1105
   1106	if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
   1107		return -EIO;
   1108
   1109	read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
   1110
   1111	ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
   1112	if (unlikely(ret)) {
   1113		ret = -EAGAIN;
   1114		goto exit_unlock;
   1115	}
   1116
   1117	ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
   1118	if (unlikely(ret))
   1119		goto exit_unlock;
   1120
   1121	/* Packet is queued, take a usage ref to exit M3 if necessary
   1122	 * for host->device buffer, balanced put is done on buffer completion
   1123	 * for device->host buffer, balanced put is after ringing the DB
   1124	 */
   1125	mhi_cntrl->runtime_get(mhi_cntrl);
   1126
   1127	/* Assert dev_wake (to exit/prevent M1/M2)*/
   1128	mhi_cntrl->wake_toggle(mhi_cntrl);
   1129
   1130	if (mhi_chan->dir == DMA_TO_DEVICE)
   1131		atomic_inc(&mhi_cntrl->pending_pkts);
   1132
   1133	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
   1134		mhi_ring_chan_db(mhi_cntrl, mhi_chan);
   1135
   1136	if (dir == DMA_FROM_DEVICE)
   1137		mhi_cntrl->runtime_put(mhi_cntrl);
   1138
   1139exit_unlock:
   1140	read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
   1141
   1142	return ret;
   1143}
   1144
   1145int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
   1146		  struct sk_buff *skb, size_t len, enum mhi_flags mflags)
   1147{
   1148	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
   1149							     mhi_dev->dl_chan;
   1150	struct mhi_buf_info buf_info = { };
   1151
   1152	buf_info.v_addr = skb->data;
   1153	buf_info.cb_buf = skb;
   1154	buf_info.len = len;
   1155
   1156	if (unlikely(mhi_chan->pre_alloc))
   1157		return -EINVAL;
   1158
   1159	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
   1160}
   1161EXPORT_SYMBOL_GPL(mhi_queue_skb);
   1162
   1163int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
   1164		  struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
   1165{
   1166	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
   1167							     mhi_dev->dl_chan;
   1168	struct mhi_buf_info buf_info = { };
   1169
   1170	buf_info.p_addr = mhi_buf->dma_addr;
   1171	buf_info.cb_buf = mhi_buf;
   1172	buf_info.pre_mapped = true;
   1173	buf_info.len = len;
   1174
   1175	if (unlikely(mhi_chan->pre_alloc))
   1176		return -EINVAL;
   1177
   1178	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
   1179}
   1180EXPORT_SYMBOL_GPL(mhi_queue_dma);
   1181
   1182int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
   1183			struct mhi_buf_info *info, enum mhi_flags flags)
   1184{
   1185	struct mhi_ring *buf_ring, *tre_ring;
   1186	struct mhi_ring_element *mhi_tre;
   1187	struct mhi_buf_info *buf_info;
   1188	int eot, eob, chain, bei;
   1189	int ret;
   1190
   1191	buf_ring = &mhi_chan->buf_ring;
   1192	tre_ring = &mhi_chan->tre_ring;
   1193
   1194	buf_info = buf_ring->wp;
   1195	WARN_ON(buf_info->used);
   1196	buf_info->pre_mapped = info->pre_mapped;
   1197	if (info->pre_mapped)
   1198		buf_info->p_addr = info->p_addr;
   1199	else
   1200		buf_info->v_addr = info->v_addr;
   1201	buf_info->cb_buf = info->cb_buf;
   1202	buf_info->wp = tre_ring->wp;
   1203	buf_info->dir = mhi_chan->dir;
   1204	buf_info->len = info->len;
   1205
   1206	if (!info->pre_mapped) {
   1207		ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
   1208		if (ret)
   1209			return ret;
   1210	}
   1211
   1212	eob = !!(flags & MHI_EOB);
   1213	eot = !!(flags & MHI_EOT);
   1214	chain = !!(flags & MHI_CHAIN);
   1215	bei = !!(mhi_chan->intmod);
   1216
   1217	mhi_tre = tre_ring->wp;
   1218	mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
   1219	mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
   1220	mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
   1221
   1222	/* increment WP */
   1223	mhi_add_ring_element(mhi_cntrl, tre_ring);
   1224	mhi_add_ring_element(mhi_cntrl, buf_ring);
   1225
   1226	return 0;
   1227}
   1228
   1229int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
   1230		  void *buf, size_t len, enum mhi_flags mflags)
   1231{
   1232	struct mhi_buf_info buf_info = { };
   1233
   1234	buf_info.v_addr = buf;
   1235	buf_info.cb_buf = buf;
   1236	buf_info.len = len;
   1237
   1238	return mhi_queue(mhi_dev, &buf_info, dir, mflags);
   1239}
   1240EXPORT_SYMBOL_GPL(mhi_queue_buf);
   1241
   1242bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
   1243{
   1244	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1245	struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
   1246					mhi_dev->ul_chan : mhi_dev->dl_chan;
   1247	struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
   1248
   1249	return mhi_is_ring_full(mhi_cntrl, tre_ring);
   1250}
   1251EXPORT_SYMBOL_GPL(mhi_queue_is_full);
   1252
   1253int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
   1254		 struct mhi_chan *mhi_chan,
   1255		 enum mhi_cmd_type cmd)
   1256{
   1257	struct mhi_ring_element *cmd_tre = NULL;
   1258	struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
   1259	struct mhi_ring *ring = &mhi_cmd->ring;
   1260	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1261	int chan = 0;
   1262
   1263	if (mhi_chan)
   1264		chan = mhi_chan->chan;
   1265
   1266	spin_lock_bh(&mhi_cmd->lock);
   1267	if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
   1268		spin_unlock_bh(&mhi_cmd->lock);
   1269		return -ENOMEM;
   1270	}
   1271
   1272	/* prepare the cmd tre */
   1273	cmd_tre = ring->wp;
   1274	switch (cmd) {
   1275	case MHI_CMD_RESET_CHAN:
   1276		cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
   1277		cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
   1278		cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
   1279		break;
   1280	case MHI_CMD_STOP_CHAN:
   1281		cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR;
   1282		cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0;
   1283		cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan);
   1284		break;
   1285	case MHI_CMD_START_CHAN:
   1286		cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
   1287		cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
   1288		cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
   1289		break;
   1290	default:
   1291		dev_err(dev, "Command not supported\n");
   1292		break;
   1293	}
   1294
   1295	/* queue to hardware */
   1296	mhi_add_ring_element(mhi_cntrl, ring);
   1297	read_lock_bh(&mhi_cntrl->pm_lock);
   1298	if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
   1299		mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
   1300	read_unlock_bh(&mhi_cntrl->pm_lock);
   1301	spin_unlock_bh(&mhi_cmd->lock);
   1302
   1303	return 0;
   1304}
   1305
   1306static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
   1307				    struct mhi_chan *mhi_chan,
   1308				    enum mhi_ch_state_type to_state)
   1309{
   1310	struct device *dev = &mhi_chan->mhi_dev->dev;
   1311	enum mhi_cmd_type cmd = MHI_CMD_NOP;
   1312	int ret;
   1313
   1314	dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
   1315		TO_CH_STATE_TYPE_STR(to_state));
   1316
   1317	switch (to_state) {
   1318	case MHI_CH_STATE_TYPE_RESET:
   1319		write_lock_irq(&mhi_chan->lock);
   1320		if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
   1321		    mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
   1322		    mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
   1323			write_unlock_irq(&mhi_chan->lock);
   1324			return -EINVAL;
   1325		}
   1326		mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
   1327		write_unlock_irq(&mhi_chan->lock);
   1328
   1329		cmd = MHI_CMD_RESET_CHAN;
   1330		break;
   1331	case MHI_CH_STATE_TYPE_STOP:
   1332		if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
   1333			return -EINVAL;
   1334
   1335		cmd = MHI_CMD_STOP_CHAN;
   1336		break;
   1337	case MHI_CH_STATE_TYPE_START:
   1338		if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
   1339		    mhi_chan->ch_state != MHI_CH_STATE_DISABLED)
   1340			return -EINVAL;
   1341
   1342		cmd = MHI_CMD_START_CHAN;
   1343		break;
   1344	default:
   1345		dev_err(dev, "%d: Channel state update to %s not allowed\n",
   1346			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
   1347		return -EINVAL;
   1348	}
   1349
   1350	/* bring host and device out of suspended states */
   1351	ret = mhi_device_get_sync(mhi_cntrl->mhi_dev);
   1352	if (ret)
   1353		return ret;
   1354	mhi_cntrl->runtime_get(mhi_cntrl);
   1355
   1356	reinit_completion(&mhi_chan->completion);
   1357	ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd);
   1358	if (ret) {
   1359		dev_err(dev, "%d: Failed to send %s channel command\n",
   1360			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
   1361		goto exit_channel_update;
   1362	}
   1363
   1364	ret = wait_for_completion_timeout(&mhi_chan->completion,
   1365				       msecs_to_jiffies(mhi_cntrl->timeout_ms));
   1366	if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
   1367		dev_err(dev,
   1368			"%d: Failed to receive %s channel command completion\n",
   1369			mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
   1370		ret = -EIO;
   1371		goto exit_channel_update;
   1372	}
   1373
   1374	ret = 0;
   1375
   1376	if (to_state != MHI_CH_STATE_TYPE_RESET) {
   1377		write_lock_irq(&mhi_chan->lock);
   1378		mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ?
   1379				      MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP;
   1380		write_unlock_irq(&mhi_chan->lock);
   1381	}
   1382
   1383	dev_dbg(dev, "%d: Channel state change to %s successful\n",
   1384		mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
   1385
   1386exit_channel_update:
   1387	mhi_cntrl->runtime_put(mhi_cntrl);
   1388	mhi_device_put(mhi_cntrl->mhi_dev);
   1389
   1390	return ret;
   1391}
   1392
   1393static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
   1394				  struct mhi_chan *mhi_chan)
   1395{
   1396	int ret;
   1397	struct device *dev = &mhi_chan->mhi_dev->dev;
   1398
   1399	mutex_lock(&mhi_chan->mutex);
   1400
   1401	if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
   1402		dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
   1403			TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
   1404		goto exit_unprepare_channel;
   1405	}
   1406
   1407	/* no more processing events for this channel */
   1408	ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
   1409				       MHI_CH_STATE_TYPE_RESET);
   1410	if (ret)
   1411		dev_err(dev, "%d: Failed to reset channel, still resetting\n",
   1412			mhi_chan->chan);
   1413
   1414exit_unprepare_channel:
   1415	write_lock_irq(&mhi_chan->lock);
   1416	mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
   1417	write_unlock_irq(&mhi_chan->lock);
   1418
   1419	if (!mhi_chan->offload_ch) {
   1420		mhi_reset_chan(mhi_cntrl, mhi_chan);
   1421		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
   1422	}
   1423	dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan);
   1424
   1425	mutex_unlock(&mhi_chan->mutex);
   1426}
   1427
   1428int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
   1429			struct mhi_chan *mhi_chan, unsigned int flags)
   1430{
   1431	int ret = 0;
   1432	struct device *dev = &mhi_chan->mhi_dev->dev;
   1433
   1434	if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
   1435		dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
   1436			TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask);
   1437		return -ENOTCONN;
   1438	}
   1439
   1440	mutex_lock(&mhi_chan->mutex);
   1441
   1442	/* Check of client manages channel context for offload channels */
   1443	if (!mhi_chan->offload_ch) {
   1444		ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
   1445		if (ret)
   1446			goto error_init_chan;
   1447	}
   1448
   1449	ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
   1450				       MHI_CH_STATE_TYPE_START);
   1451	if (ret)
   1452		goto error_pm_state;
   1453
   1454	if (mhi_chan->dir == DMA_FROM_DEVICE)
   1455		mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
   1456
   1457	/* Pre-allocate buffer for xfer ring */
   1458	if (mhi_chan->pre_alloc) {
   1459		int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
   1460						       &mhi_chan->tre_ring);
   1461		size_t len = mhi_cntrl->buffer_len;
   1462
   1463		while (nr_el--) {
   1464			void *buf;
   1465			struct mhi_buf_info info = { };
   1466
   1467			buf = kmalloc(len, GFP_KERNEL);
   1468			if (!buf) {
   1469				ret = -ENOMEM;
   1470				goto error_pre_alloc;
   1471			}
   1472
   1473			/* Prepare transfer descriptors */
   1474			info.v_addr = buf;
   1475			info.cb_buf = buf;
   1476			info.len = len;
   1477			ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
   1478			if (ret) {
   1479				kfree(buf);
   1480				goto error_pre_alloc;
   1481			}
   1482		}
   1483
   1484		read_lock_bh(&mhi_cntrl->pm_lock);
   1485		if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
   1486			read_lock_irq(&mhi_chan->lock);
   1487			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
   1488			read_unlock_irq(&mhi_chan->lock);
   1489		}
   1490		read_unlock_bh(&mhi_cntrl->pm_lock);
   1491	}
   1492
   1493	mutex_unlock(&mhi_chan->mutex);
   1494
   1495	return 0;
   1496
   1497error_pm_state:
   1498	if (!mhi_chan->offload_ch)
   1499		mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
   1500
   1501error_init_chan:
   1502	mutex_unlock(&mhi_chan->mutex);
   1503
   1504	return ret;
   1505
   1506error_pre_alloc:
   1507	mutex_unlock(&mhi_chan->mutex);
   1508	mhi_unprepare_channel(mhi_cntrl, mhi_chan);
   1509
   1510	return ret;
   1511}
   1512
   1513static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
   1514				  struct mhi_event *mhi_event,
   1515				  struct mhi_event_ctxt *er_ctxt,
   1516				  int chan)
   1517
   1518{
   1519	struct mhi_ring_element *dev_rp, *local_rp;
   1520	struct mhi_ring *ev_ring;
   1521	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1522	unsigned long flags;
   1523	dma_addr_t ptr;
   1524
   1525	dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
   1526
   1527	ev_ring = &mhi_event->ring;
   1528
   1529	/* mark all stale events related to channel as STALE event */
   1530	spin_lock_irqsave(&mhi_event->lock, flags);
   1531
   1532	ptr = le64_to_cpu(er_ctxt->rp);
   1533	if (!is_valid_ring_ptr(ev_ring, ptr)) {
   1534		dev_err(&mhi_cntrl->mhi_dev->dev,
   1535			"Event ring rp points outside of the event ring\n");
   1536		dev_rp = ev_ring->rp;
   1537	} else {
   1538		dev_rp = mhi_to_virtual(ev_ring, ptr);
   1539	}
   1540
   1541	local_rp = ev_ring->rp;
   1542	while (dev_rp != local_rp) {
   1543		if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
   1544		    chan == MHI_TRE_GET_EV_CHID(local_rp))
   1545			local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
   1546					MHI_PKT_TYPE_STALE_EVENT);
   1547		local_rp++;
   1548		if (local_rp == (ev_ring->base + ev_ring->len))
   1549			local_rp = ev_ring->base;
   1550	}
   1551
   1552	dev_dbg(dev, "Finished marking events as stale events\n");
   1553	spin_unlock_irqrestore(&mhi_event->lock, flags);
   1554}
   1555
   1556static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
   1557				struct mhi_chan *mhi_chan)
   1558{
   1559	struct mhi_ring *buf_ring, *tre_ring;
   1560	struct mhi_result result;
   1561
   1562	/* Reset any pending buffers */
   1563	buf_ring = &mhi_chan->buf_ring;
   1564	tre_ring = &mhi_chan->tre_ring;
   1565	result.transaction_status = -ENOTCONN;
   1566	result.bytes_xferd = 0;
   1567	while (tre_ring->rp != tre_ring->wp) {
   1568		struct mhi_buf_info *buf_info = buf_ring->rp;
   1569
   1570		if (mhi_chan->dir == DMA_TO_DEVICE) {
   1571			atomic_dec(&mhi_cntrl->pending_pkts);
   1572			/* Release the reference got from mhi_queue() */
   1573			mhi_cntrl->runtime_put(mhi_cntrl);
   1574		}
   1575
   1576		if (!buf_info->pre_mapped)
   1577			mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
   1578
   1579		mhi_del_ring_element(mhi_cntrl, buf_ring);
   1580		mhi_del_ring_element(mhi_cntrl, tre_ring);
   1581
   1582		if (mhi_chan->pre_alloc) {
   1583			kfree(buf_info->cb_buf);
   1584		} else {
   1585			result.buf_addr = buf_info->cb_buf;
   1586			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
   1587		}
   1588	}
   1589}
   1590
   1591void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
   1592{
   1593	struct mhi_event *mhi_event;
   1594	struct mhi_event_ctxt *er_ctxt;
   1595	int chan = mhi_chan->chan;
   1596
   1597	/* Nothing to reset, client doesn't queue buffers */
   1598	if (mhi_chan->offload_ch)
   1599		return;
   1600
   1601	read_lock_bh(&mhi_cntrl->pm_lock);
   1602	mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
   1603	er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
   1604
   1605	mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
   1606
   1607	mhi_reset_data_chan(mhi_cntrl, mhi_chan);
   1608
   1609	read_unlock_bh(&mhi_cntrl->pm_lock);
   1610}
   1611
   1612static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
   1613{
   1614	int ret, dir;
   1615	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1616	struct mhi_chan *mhi_chan;
   1617
   1618	for (dir = 0; dir < 2; dir++) {
   1619		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
   1620		if (!mhi_chan)
   1621			continue;
   1622
   1623		ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
   1624		if (ret)
   1625			goto error_open_chan;
   1626	}
   1627
   1628	return 0;
   1629
   1630error_open_chan:
   1631	for (--dir; dir >= 0; dir--) {
   1632		mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
   1633		if (!mhi_chan)
   1634			continue;
   1635
   1636		mhi_unprepare_channel(mhi_cntrl, mhi_chan);
   1637	}
   1638
   1639	return ret;
   1640}
   1641
   1642int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
   1643{
   1644	return __mhi_prepare_for_transfer(mhi_dev, 0);
   1645}
   1646EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
   1647
   1648int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
   1649{
   1650	return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
   1651}
   1652EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
   1653
   1654void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
   1655{
   1656	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1657	struct mhi_chan *mhi_chan;
   1658	int dir;
   1659
   1660	for (dir = 0; dir < 2; dir++) {
   1661		mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
   1662		if (!mhi_chan)
   1663			continue;
   1664
   1665		mhi_unprepare_channel(mhi_cntrl, mhi_chan);
   1666	}
   1667}
   1668EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
   1669
   1670int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
   1671{
   1672	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1673	struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
   1674	struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
   1675	int ret;
   1676
   1677	spin_lock_bh(&mhi_event->lock);
   1678	ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
   1679	spin_unlock_bh(&mhi_event->lock);
   1680
   1681	return ret;
   1682}
   1683EXPORT_SYMBOL_GPL(mhi_poll);