cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pm.c (36022B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
      4 *
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/device.h>
      9#include <linux/dma-direction.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/interrupt.h>
     12#include <linux/list.h>
     13#include <linux/mhi.h>
     14#include <linux/module.h>
     15#include <linux/slab.h>
     16#include <linux/wait.h>
     17#include "internal.h"
     18
     19/*
     20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
     21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
     22 * transition to a new state only if we're allowed to.
     23 *
     24 * Priority increases as we go down. For instance, from any state in L0, the
     25 * transition can be made to states in L1, L2 and L3. A notable exception to
     26 * this rule is state DISABLE.  From DISABLE state we can only transition to
     27 * POR state. Also, while in L2 state, user cannot jump back to previous
     28 * L1 or L0 states.
     29 *
     30 * Valid transitions:
     31 * L0: DISABLE <--> POR
     32 *     POR <--> POR
     33 *     POR -> M0 -> M2 --> M0
     34 *     POR -> FW_DL_ERR
     35 *     FW_DL_ERR <--> FW_DL_ERR
     36 *     M0 <--> M0
     37 *     M0 -> FW_DL_ERR
     38 *     M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
     39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
     40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
     41 *     SHUTDOWN_PROCESS -> DISABLE
     42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
     43 *     LD_ERR_FATAL_DETECT -> DISABLE
     44 */
     45static const struct mhi_pm_transitions dev_state_transitions[] = {
     46	/* L0 States */
     47	{
     48		MHI_PM_DISABLE,
     49		MHI_PM_POR
     50	},
     51	{
     52		MHI_PM_POR,
     53		MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
     54		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
     55		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
     56	},
     57	{
     58		MHI_PM_M0,
     59		MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
     60		MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
     61		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
     62	},
     63	{
     64		MHI_PM_M2,
     65		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
     66		MHI_PM_LD_ERR_FATAL_DETECT
     67	},
     68	{
     69		MHI_PM_M3_ENTER,
     70		MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
     71		MHI_PM_LD_ERR_FATAL_DETECT
     72	},
     73	{
     74		MHI_PM_M3,
     75		MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
     76		MHI_PM_LD_ERR_FATAL_DETECT
     77	},
     78	{
     79		MHI_PM_M3_EXIT,
     80		MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
     81		MHI_PM_LD_ERR_FATAL_DETECT
     82	},
     83	{
     84		MHI_PM_FW_DL_ERR,
     85		MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
     86		MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
     87	},
     88	/* L1 States */
     89	{
     90		MHI_PM_SYS_ERR_DETECT,
     91		MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
     92		MHI_PM_LD_ERR_FATAL_DETECT
     93	},
     94	{
     95		MHI_PM_SYS_ERR_PROCESS,
     96		MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
     97		MHI_PM_LD_ERR_FATAL_DETECT
     98	},
     99	/* L2 States */
    100	{
    101		MHI_PM_SHUTDOWN_PROCESS,
    102		MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
    103	},
    104	/* L3 States */
    105	{
    106		MHI_PM_LD_ERR_FATAL_DETECT,
    107		MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
    108	},
    109};
    110
    111enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
    112						   enum mhi_pm_state state)
    113{
    114	unsigned long cur_state = mhi_cntrl->pm_state;
    115	int index = find_last_bit(&cur_state, 32);
    116
    117	if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
    118		return cur_state;
    119
    120	if (unlikely(dev_state_transitions[index].from_state != cur_state))
    121		return cur_state;
    122
    123	if (unlikely(!(dev_state_transitions[index].to_states & state)))
    124		return cur_state;
    125
    126	mhi_cntrl->pm_state = state;
    127	return mhi_cntrl->pm_state;
    128}
    129
    130void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
    131{
    132	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    133	int ret;
    134
    135	if (state == MHI_STATE_RESET) {
    136		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
    137					  MHICTRL_RESET_MASK, 1);
    138	} else {
    139		ret = mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
    140					  MHICTRL_MHISTATE_MASK, state);
    141	}
    142
    143	if (ret)
    144		dev_err(dev, "Failed to set MHI state to: %s\n",
    145			mhi_state_str(state));
    146}
    147
    148/* NOP for backward compatibility, host allowed to ring DB in M2 state */
    149static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
    150{
    151}
    152
    153static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
    154{
    155	mhi_cntrl->wake_get(mhi_cntrl, false);
    156	mhi_cntrl->wake_put(mhi_cntrl, true);
    157}
    158
    159/* Handle device ready state transition */
    160int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
    161{
    162	struct mhi_event *mhi_event;
    163	enum mhi_pm_state cur_state;
    164	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    165	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
    166	int ret, i;
    167
    168	/* Check if device entered error state */
    169	if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
    170		dev_err(dev, "Device link is not accessible\n");
    171		return -EIO;
    172	}
    173
    174	/* Wait for RESET to be cleared and READY bit to be set by the device */
    175	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
    176				 MHICTRL_RESET_MASK, 0, interval_us);
    177	if (ret) {
    178		dev_err(dev, "Device failed to clear MHI Reset\n");
    179		return ret;
    180	}
    181
    182	ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
    183				 MHISTATUS_READY_MASK, 1, interval_us);
    184	if (ret) {
    185		dev_err(dev, "Device failed to enter MHI Ready\n");
    186		return ret;
    187	}
    188
    189	dev_dbg(dev, "Device in READY State\n");
    190	write_lock_irq(&mhi_cntrl->pm_lock);
    191	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
    192	mhi_cntrl->dev_state = MHI_STATE_READY;
    193	write_unlock_irq(&mhi_cntrl->pm_lock);
    194
    195	if (cur_state != MHI_PM_POR) {
    196		dev_err(dev, "Error moving to state %s from %s\n",
    197			to_mhi_pm_state_str(MHI_PM_POR),
    198			to_mhi_pm_state_str(cur_state));
    199		return -EIO;
    200	}
    201
    202	read_lock_bh(&mhi_cntrl->pm_lock);
    203	if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
    204		dev_err(dev, "Device registers not accessible\n");
    205		goto error_mmio;
    206	}
    207
    208	/* Configure MMIO registers */
    209	ret = mhi_init_mmio(mhi_cntrl);
    210	if (ret) {
    211		dev_err(dev, "Error configuring MMIO registers\n");
    212		goto error_mmio;
    213	}
    214
    215	/* Add elements to all SW event rings */
    216	mhi_event = mhi_cntrl->mhi_event;
    217	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    218		struct mhi_ring *ring = &mhi_event->ring;
    219
    220		/* Skip if this is an offload or HW event */
    221		if (mhi_event->offload_ev || mhi_event->hw_ring)
    222			continue;
    223
    224		ring->wp = ring->base + ring->len - ring->el_size;
    225		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
    226		/* Update all cores */
    227		smp_wmb();
    228
    229		/* Ring the event ring db */
    230		spin_lock_irq(&mhi_event->lock);
    231		mhi_ring_er_db(mhi_event);
    232		spin_unlock_irq(&mhi_event->lock);
    233	}
    234
    235	/* Set MHI to M0 state */
    236	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
    237	read_unlock_bh(&mhi_cntrl->pm_lock);
    238
    239	return 0;
    240
    241error_mmio:
    242	read_unlock_bh(&mhi_cntrl->pm_lock);
    243
    244	return -EIO;
    245}
    246
    247int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
    248{
    249	enum mhi_pm_state cur_state;
    250	struct mhi_chan *mhi_chan;
    251	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    252	int i;
    253
    254	write_lock_irq(&mhi_cntrl->pm_lock);
    255	mhi_cntrl->dev_state = MHI_STATE_M0;
    256	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
    257	write_unlock_irq(&mhi_cntrl->pm_lock);
    258	if (unlikely(cur_state != MHI_PM_M0)) {
    259		dev_err(dev, "Unable to transition to M0 state\n");
    260		return -EIO;
    261	}
    262	mhi_cntrl->M0++;
    263
    264	/* Wake up the device */
    265	read_lock_bh(&mhi_cntrl->pm_lock);
    266	mhi_cntrl->wake_get(mhi_cntrl, true);
    267
    268	/* Ring all event rings and CMD ring only if we're in mission mode */
    269	if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
    270		struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
    271		struct mhi_cmd *mhi_cmd =
    272			&mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
    273
    274		for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    275			if (mhi_event->offload_ev)
    276				continue;
    277
    278			spin_lock_irq(&mhi_event->lock);
    279			mhi_ring_er_db(mhi_event);
    280			spin_unlock_irq(&mhi_event->lock);
    281		}
    282
    283		/* Only ring primary cmd ring if ring is not empty */
    284		spin_lock_irq(&mhi_cmd->lock);
    285		if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
    286			mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
    287		spin_unlock_irq(&mhi_cmd->lock);
    288	}
    289
    290	/* Ring channel DB registers */
    291	mhi_chan = mhi_cntrl->mhi_chan;
    292	for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
    293		struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
    294
    295		if (mhi_chan->db_cfg.reset_req) {
    296			write_lock_irq(&mhi_chan->lock);
    297			mhi_chan->db_cfg.db_mode = true;
    298			write_unlock_irq(&mhi_chan->lock);
    299		}
    300
    301		read_lock_irq(&mhi_chan->lock);
    302
    303		/* Only ring DB if ring is not empty */
    304		if (tre_ring->base && tre_ring->wp  != tre_ring->rp)
    305			mhi_ring_chan_db(mhi_cntrl, mhi_chan);
    306		read_unlock_irq(&mhi_chan->lock);
    307	}
    308
    309	mhi_cntrl->wake_put(mhi_cntrl, false);
    310	read_unlock_bh(&mhi_cntrl->pm_lock);
    311	wake_up_all(&mhi_cntrl->state_event);
    312
    313	return 0;
    314}
    315
    316/*
    317 * After receiving the MHI state change event from the device indicating the
    318 * transition to M1 state, the host can transition the device to M2 state
    319 * for keeping it in low power state.
    320 */
    321void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
    322{
    323	enum mhi_pm_state state;
    324	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    325
    326	write_lock_irq(&mhi_cntrl->pm_lock);
    327	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
    328	if (state == MHI_PM_M2) {
    329		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
    330		mhi_cntrl->dev_state = MHI_STATE_M2;
    331
    332		write_unlock_irq(&mhi_cntrl->pm_lock);
    333
    334		mhi_cntrl->M2++;
    335		wake_up_all(&mhi_cntrl->state_event);
    336
    337		/* If there are any pending resources, exit M2 immediately */
    338		if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
    339			     atomic_read(&mhi_cntrl->dev_wake))) {
    340			dev_dbg(dev,
    341				"Exiting M2, pending_pkts: %d dev_wake: %d\n",
    342				atomic_read(&mhi_cntrl->pending_pkts),
    343				atomic_read(&mhi_cntrl->dev_wake));
    344			read_lock_bh(&mhi_cntrl->pm_lock);
    345			mhi_cntrl->wake_get(mhi_cntrl, true);
    346			mhi_cntrl->wake_put(mhi_cntrl, true);
    347			read_unlock_bh(&mhi_cntrl->pm_lock);
    348		} else {
    349			mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
    350		}
    351	} else {
    352		write_unlock_irq(&mhi_cntrl->pm_lock);
    353	}
    354}
    355
    356/* MHI M3 completion handler */
    357int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
    358{
    359	enum mhi_pm_state state;
    360	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    361
    362	write_lock_irq(&mhi_cntrl->pm_lock);
    363	mhi_cntrl->dev_state = MHI_STATE_M3;
    364	state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
    365	write_unlock_irq(&mhi_cntrl->pm_lock);
    366	if (state != MHI_PM_M3) {
    367		dev_err(dev, "Unable to transition to M3 state\n");
    368		return -EIO;
    369	}
    370
    371	mhi_cntrl->M3++;
    372	wake_up_all(&mhi_cntrl->state_event);
    373
    374	return 0;
    375}
    376
    377/* Handle device Mission Mode transition */
    378static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
    379{
    380	struct mhi_event *mhi_event;
    381	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    382	enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
    383	int i, ret;
    384
    385	dev_dbg(dev, "Processing Mission Mode transition\n");
    386
    387	write_lock_irq(&mhi_cntrl->pm_lock);
    388	if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
    389		ee = mhi_get_exec_env(mhi_cntrl);
    390
    391	if (!MHI_IN_MISSION_MODE(ee)) {
    392		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
    393		write_unlock_irq(&mhi_cntrl->pm_lock);
    394		wake_up_all(&mhi_cntrl->state_event);
    395		return -EIO;
    396	}
    397	mhi_cntrl->ee = ee;
    398	write_unlock_irq(&mhi_cntrl->pm_lock);
    399
    400	wake_up_all(&mhi_cntrl->state_event);
    401
    402	device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
    403			      mhi_destroy_device);
    404	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
    405
    406	/* Force MHI to be in M0 state before continuing */
    407	ret = __mhi_device_get_sync(mhi_cntrl);
    408	if (ret)
    409		return ret;
    410
    411	read_lock_bh(&mhi_cntrl->pm_lock);
    412
    413	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    414		ret = -EIO;
    415		goto error_mission_mode;
    416	}
    417
    418	/* Add elements to all HW event rings */
    419	mhi_event = mhi_cntrl->mhi_event;
    420	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    421		struct mhi_ring *ring = &mhi_event->ring;
    422
    423		if (mhi_event->offload_ev || !mhi_event->hw_ring)
    424			continue;
    425
    426		ring->wp = ring->base + ring->len - ring->el_size;
    427		*ring->ctxt_wp = cpu_to_le64(ring->iommu_base + ring->len - ring->el_size);
    428		/* Update to all cores */
    429		smp_wmb();
    430
    431		spin_lock_irq(&mhi_event->lock);
    432		if (MHI_DB_ACCESS_VALID(mhi_cntrl))
    433			mhi_ring_er_db(mhi_event);
    434		spin_unlock_irq(&mhi_event->lock);
    435	}
    436
    437	read_unlock_bh(&mhi_cntrl->pm_lock);
    438
    439	/*
    440	 * The MHI devices are only created when the client device switches its
    441	 * Execution Environment (EE) to either SBL or AMSS states
    442	 */
    443	mhi_create_devices(mhi_cntrl);
    444
    445	read_lock_bh(&mhi_cntrl->pm_lock);
    446
    447error_mission_mode:
    448	mhi_cntrl->wake_put(mhi_cntrl, false);
    449	read_unlock_bh(&mhi_cntrl->pm_lock);
    450
    451	return ret;
    452}
    453
    454/* Handle shutdown transitions */
    455static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
    456{
    457	enum mhi_pm_state cur_state;
    458	struct mhi_event *mhi_event;
    459	struct mhi_cmd_ctxt *cmd_ctxt;
    460	struct mhi_cmd *mhi_cmd;
    461	struct mhi_event_ctxt *er_ctxt;
    462	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    463	int ret, i;
    464
    465	dev_dbg(dev, "Processing disable transition with PM state: %s\n",
    466		to_mhi_pm_state_str(mhi_cntrl->pm_state));
    467
    468	mutex_lock(&mhi_cntrl->pm_mutex);
    469
    470	/* Trigger MHI RESET so that the device will not access host memory */
    471	if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
    472		dev_dbg(dev, "Triggering MHI Reset in device\n");
    473		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
    474
    475		/* Wait for the reset bit to be cleared by the device */
    476		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
    477				 MHICTRL_RESET_MASK, 0, 25000);
    478		if (ret)
    479			dev_err(dev, "Device failed to clear MHI Reset\n");
    480
    481		/*
    482		 * Device will clear BHI_INTVEC as a part of RESET processing,
    483		 * hence re-program it
    484		 */
    485		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
    486
    487		if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
    488			/* wait for ready to be set */
    489			ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs,
    490						 MHISTATUS,
    491						 MHISTATUS_READY_MASK, 1, 25000);
    492			if (ret)
    493				dev_err(dev, "Device failed to enter READY state\n");
    494		}
    495	}
    496
    497	dev_dbg(dev,
    498		 "Waiting for all pending event ring processing to complete\n");
    499	mhi_event = mhi_cntrl->mhi_event;
    500	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    501		if (mhi_event->offload_ev)
    502			continue;
    503		free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
    504		tasklet_kill(&mhi_event->task);
    505	}
    506
    507	/* Release lock and wait for all pending threads to complete */
    508	mutex_unlock(&mhi_cntrl->pm_mutex);
    509	dev_dbg(dev, "Waiting for all pending threads to complete\n");
    510	wake_up_all(&mhi_cntrl->state_event);
    511
    512	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
    513	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
    514
    515	mutex_lock(&mhi_cntrl->pm_mutex);
    516
    517	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
    518	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
    519
    520	/* Reset the ev rings and cmd rings */
    521	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
    522	mhi_cmd = mhi_cntrl->mhi_cmd;
    523	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
    524	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
    525		struct mhi_ring *ring = &mhi_cmd->ring;
    526
    527		ring->rp = ring->base;
    528		ring->wp = ring->base;
    529		cmd_ctxt->rp = cmd_ctxt->rbase;
    530		cmd_ctxt->wp = cmd_ctxt->rbase;
    531	}
    532
    533	mhi_event = mhi_cntrl->mhi_event;
    534	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
    535	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
    536		     mhi_event++) {
    537		struct mhi_ring *ring = &mhi_event->ring;
    538
    539		/* Skip offload events */
    540		if (mhi_event->offload_ev)
    541			continue;
    542
    543		ring->rp = ring->base;
    544		ring->wp = ring->base;
    545		er_ctxt->rp = er_ctxt->rbase;
    546		er_ctxt->wp = er_ctxt->rbase;
    547	}
    548
    549	/* Move to disable state */
    550	write_lock_irq(&mhi_cntrl->pm_lock);
    551	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
    552	write_unlock_irq(&mhi_cntrl->pm_lock);
    553	if (unlikely(cur_state != MHI_PM_DISABLE))
    554		dev_err(dev, "Error moving from PM state: %s to: %s\n",
    555			to_mhi_pm_state_str(cur_state),
    556			to_mhi_pm_state_str(MHI_PM_DISABLE));
    557
    558	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
    559		to_mhi_pm_state_str(mhi_cntrl->pm_state),
    560		mhi_state_str(mhi_cntrl->dev_state));
    561
    562	mutex_unlock(&mhi_cntrl->pm_mutex);
    563}
    564
    565/* Handle system error transitions */
    566static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
    567{
    568	enum mhi_pm_state cur_state, prev_state;
    569	enum dev_st_transition next_state;
    570	struct mhi_event *mhi_event;
    571	struct mhi_cmd_ctxt *cmd_ctxt;
    572	struct mhi_cmd *mhi_cmd;
    573	struct mhi_event_ctxt *er_ctxt;
    574	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    575	int ret, i;
    576
    577	dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
    578		to_mhi_pm_state_str(mhi_cntrl->pm_state),
    579		to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
    580
    581	/* We must notify MHI control driver so it can clean up first */
    582	mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
    583
    584	mutex_lock(&mhi_cntrl->pm_mutex);
    585	write_lock_irq(&mhi_cntrl->pm_lock);
    586	prev_state = mhi_cntrl->pm_state;
    587	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
    588	write_unlock_irq(&mhi_cntrl->pm_lock);
    589
    590	if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
    591		dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
    592			to_mhi_pm_state_str(cur_state),
    593			to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
    594		goto exit_sys_error_transition;
    595	}
    596
    597	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
    598	mhi_cntrl->dev_state = MHI_STATE_RESET;
    599
    600	/* Wake up threads waiting for state transition */
    601	wake_up_all(&mhi_cntrl->state_event);
    602
    603	/* Trigger MHI RESET so that the device will not access host memory */
    604	if (MHI_REG_ACCESS_VALID(prev_state)) {
    605		u32 in_reset = -1;
    606		unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
    607
    608		dev_dbg(dev, "Triggering MHI Reset in device\n");
    609		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
    610
    611		/* Wait for the reset bit to be cleared by the device */
    612		ret = wait_event_timeout(mhi_cntrl->state_event,
    613					 mhi_read_reg_field(mhi_cntrl,
    614							    mhi_cntrl->regs,
    615							    MHICTRL,
    616							    MHICTRL_RESET_MASK,
    617							    &in_reset) ||
    618					!in_reset, timeout);
    619		if (!ret || in_reset) {
    620			dev_err(dev, "Device failed to exit MHI Reset state\n");
    621			goto exit_sys_error_transition;
    622		}
    623
    624		/*
    625		 * Device will clear BHI_INTVEC as a part of RESET processing,
    626		 * hence re-program it
    627		 */
    628		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
    629	}
    630
    631	dev_dbg(dev,
    632		"Waiting for all pending event ring processing to complete\n");
    633	mhi_event = mhi_cntrl->mhi_event;
    634	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
    635		if (mhi_event->offload_ev)
    636			continue;
    637		tasklet_kill(&mhi_event->task);
    638	}
    639
    640	/* Release lock and wait for all pending threads to complete */
    641	mutex_unlock(&mhi_cntrl->pm_mutex);
    642	dev_dbg(dev, "Waiting for all pending threads to complete\n");
    643	wake_up_all(&mhi_cntrl->state_event);
    644
    645	dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
    646	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
    647
    648	mutex_lock(&mhi_cntrl->pm_mutex);
    649
    650	WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
    651	WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
    652
    653	/* Reset the ev rings and cmd rings */
    654	dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
    655	mhi_cmd = mhi_cntrl->mhi_cmd;
    656	cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
    657	for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
    658		struct mhi_ring *ring = &mhi_cmd->ring;
    659
    660		ring->rp = ring->base;
    661		ring->wp = ring->base;
    662		cmd_ctxt->rp = cmd_ctxt->rbase;
    663		cmd_ctxt->wp = cmd_ctxt->rbase;
    664	}
    665
    666	mhi_event = mhi_cntrl->mhi_event;
    667	er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
    668	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
    669	     mhi_event++) {
    670		struct mhi_ring *ring = &mhi_event->ring;
    671
    672		/* Skip offload events */
    673		if (mhi_event->offload_ev)
    674			continue;
    675
    676		ring->rp = ring->base;
    677		ring->wp = ring->base;
    678		er_ctxt->rp = er_ctxt->rbase;
    679		er_ctxt->wp = er_ctxt->rbase;
    680	}
    681
    682	/* Transition to next state */
    683	if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
    684		write_lock_irq(&mhi_cntrl->pm_lock);
    685		cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
    686		write_unlock_irq(&mhi_cntrl->pm_lock);
    687		if (cur_state != MHI_PM_POR) {
    688			dev_err(dev, "Error moving to state %s from %s\n",
    689				to_mhi_pm_state_str(MHI_PM_POR),
    690				to_mhi_pm_state_str(cur_state));
    691			goto exit_sys_error_transition;
    692		}
    693		next_state = DEV_ST_TRANSITION_PBL;
    694	} else {
    695		next_state = DEV_ST_TRANSITION_READY;
    696	}
    697
    698	mhi_queue_state_transition(mhi_cntrl, next_state);
    699
    700exit_sys_error_transition:
    701	dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
    702		to_mhi_pm_state_str(mhi_cntrl->pm_state),
    703		mhi_state_str(mhi_cntrl->dev_state));
    704
    705	mutex_unlock(&mhi_cntrl->pm_mutex);
    706}
    707
    708/* Queue a new work item and schedule work */
    709int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
    710			       enum dev_st_transition state)
    711{
    712	struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
    713	unsigned long flags;
    714
    715	if (!item)
    716		return -ENOMEM;
    717
    718	item->state = state;
    719	spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
    720	list_add_tail(&item->node, &mhi_cntrl->transition_list);
    721	spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
    722
    723	queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
    724
    725	return 0;
    726}
    727
    728/* SYS_ERR worker */
    729void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
    730{
    731	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    732
    733	/* skip if controller supports RDDM */
    734	if (mhi_cntrl->rddm_image) {
    735		dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
    736		return;
    737	}
    738
    739	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
    740}
    741
    742/* Device State Transition worker */
    743void mhi_pm_st_worker(struct work_struct *work)
    744{
    745	struct state_transition *itr, *tmp;
    746	LIST_HEAD(head);
    747	struct mhi_controller *mhi_cntrl = container_of(work,
    748							struct mhi_controller,
    749							st_worker);
    750	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    751
    752	spin_lock_irq(&mhi_cntrl->transition_lock);
    753	list_splice_tail_init(&mhi_cntrl->transition_list, &head);
    754	spin_unlock_irq(&mhi_cntrl->transition_lock);
    755
    756	list_for_each_entry_safe(itr, tmp, &head, node) {
    757		list_del(&itr->node);
    758		dev_dbg(dev, "Handling state transition: %s\n",
    759			TO_DEV_STATE_TRANS_STR(itr->state));
    760
    761		switch (itr->state) {
    762		case DEV_ST_TRANSITION_PBL:
    763			write_lock_irq(&mhi_cntrl->pm_lock);
    764			if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
    765				mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
    766			write_unlock_irq(&mhi_cntrl->pm_lock);
    767			mhi_fw_load_handler(mhi_cntrl);
    768			break;
    769		case DEV_ST_TRANSITION_SBL:
    770			write_lock_irq(&mhi_cntrl->pm_lock);
    771			mhi_cntrl->ee = MHI_EE_SBL;
    772			write_unlock_irq(&mhi_cntrl->pm_lock);
    773			/*
    774			 * The MHI devices are only created when the client
    775			 * device switches its Execution Environment (EE) to
    776			 * either SBL or AMSS states
    777			 */
    778			mhi_create_devices(mhi_cntrl);
    779			if (mhi_cntrl->fbc_download)
    780				mhi_download_amss_image(mhi_cntrl);
    781			break;
    782		case DEV_ST_TRANSITION_MISSION_MODE:
    783			mhi_pm_mission_mode_transition(mhi_cntrl);
    784			break;
    785		case DEV_ST_TRANSITION_FP:
    786			write_lock_irq(&mhi_cntrl->pm_lock);
    787			mhi_cntrl->ee = MHI_EE_FP;
    788			write_unlock_irq(&mhi_cntrl->pm_lock);
    789			mhi_create_devices(mhi_cntrl);
    790			break;
    791		case DEV_ST_TRANSITION_READY:
    792			mhi_ready_state_transition(mhi_cntrl);
    793			break;
    794		case DEV_ST_TRANSITION_SYS_ERR:
    795			mhi_pm_sys_error_transition(mhi_cntrl);
    796			break;
    797		case DEV_ST_TRANSITION_DISABLE:
    798			mhi_pm_disable_transition(mhi_cntrl);
    799			break;
    800		default:
    801			break;
    802		}
    803		kfree(itr);
    804	}
    805}
    806
    807int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
    808{
    809	struct mhi_chan *itr, *tmp;
    810	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    811	enum mhi_pm_state new_state;
    812	int ret;
    813
    814	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
    815		return -EINVAL;
    816
    817	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
    818		return -EIO;
    819
    820	/* Return busy if there are any pending resources */
    821	if (atomic_read(&mhi_cntrl->dev_wake) ||
    822	    atomic_read(&mhi_cntrl->pending_pkts))
    823		return -EBUSY;
    824
    825	/* Take MHI out of M2 state */
    826	read_lock_bh(&mhi_cntrl->pm_lock);
    827	mhi_cntrl->wake_get(mhi_cntrl, false);
    828	read_unlock_bh(&mhi_cntrl->pm_lock);
    829
    830	ret = wait_event_timeout(mhi_cntrl->state_event,
    831				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
    832				 mhi_cntrl->dev_state == MHI_STATE_M1 ||
    833				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
    834				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
    835
    836	read_lock_bh(&mhi_cntrl->pm_lock);
    837	mhi_cntrl->wake_put(mhi_cntrl, false);
    838	read_unlock_bh(&mhi_cntrl->pm_lock);
    839
    840	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    841		dev_err(dev,
    842			"Could not enter M0/M1 state");
    843		return -EIO;
    844	}
    845
    846	write_lock_irq(&mhi_cntrl->pm_lock);
    847
    848	if (atomic_read(&mhi_cntrl->dev_wake) ||
    849	    atomic_read(&mhi_cntrl->pending_pkts)) {
    850		write_unlock_irq(&mhi_cntrl->pm_lock);
    851		return -EBUSY;
    852	}
    853
    854	dev_dbg(dev, "Allowing M3 transition\n");
    855	new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
    856	if (new_state != MHI_PM_M3_ENTER) {
    857		write_unlock_irq(&mhi_cntrl->pm_lock);
    858		dev_err(dev,
    859			"Error setting to PM state: %s from: %s\n",
    860			to_mhi_pm_state_str(MHI_PM_M3_ENTER),
    861			to_mhi_pm_state_str(mhi_cntrl->pm_state));
    862		return -EIO;
    863	}
    864
    865	/* Set MHI to M3 and wait for completion */
    866	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
    867	write_unlock_irq(&mhi_cntrl->pm_lock);
    868	dev_dbg(dev, "Waiting for M3 completion\n");
    869
    870	ret = wait_event_timeout(mhi_cntrl->state_event,
    871				 mhi_cntrl->dev_state == MHI_STATE_M3 ||
    872				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
    873				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
    874
    875	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    876		dev_err(dev,
    877			"Did not enter M3 state, MHI state: %s, PM state: %s\n",
    878			mhi_state_str(mhi_cntrl->dev_state),
    879			to_mhi_pm_state_str(mhi_cntrl->pm_state));
    880		return -EIO;
    881	}
    882
    883	/* Notify clients about entering LPM */
    884	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
    885		mutex_lock(&itr->mutex);
    886		if (itr->mhi_dev)
    887			mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
    888		mutex_unlock(&itr->mutex);
    889	}
    890
    891	return 0;
    892}
    893EXPORT_SYMBOL_GPL(mhi_pm_suspend);
    894
    895static int __mhi_pm_resume(struct mhi_controller *mhi_cntrl, bool force)
    896{
    897	struct mhi_chan *itr, *tmp;
    898	struct device *dev = &mhi_cntrl->mhi_dev->dev;
    899	enum mhi_pm_state cur_state;
    900	int ret;
    901
    902	dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
    903		to_mhi_pm_state_str(mhi_cntrl->pm_state),
    904		mhi_state_str(mhi_cntrl->dev_state));
    905
    906	if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
    907		return 0;
    908
    909	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
    910		return -EIO;
    911
    912	if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3) {
    913		dev_warn(dev, "Resuming from non M3 state (%s)\n",
    914			 mhi_state_str(mhi_get_mhi_state(mhi_cntrl)));
    915		if (!force)
    916			return -EINVAL;
    917	}
    918
    919	/* Notify clients about exiting LPM */
    920	list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
    921		mutex_lock(&itr->mutex);
    922		if (itr->mhi_dev)
    923			mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
    924		mutex_unlock(&itr->mutex);
    925	}
    926
    927	write_lock_irq(&mhi_cntrl->pm_lock);
    928	cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
    929	if (cur_state != MHI_PM_M3_EXIT) {
    930		write_unlock_irq(&mhi_cntrl->pm_lock);
    931		dev_info(dev,
    932			 "Error setting to PM state: %s from: %s\n",
    933			 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
    934			 to_mhi_pm_state_str(mhi_cntrl->pm_state));
    935		return -EIO;
    936	}
    937
    938	/* Set MHI to M0 and wait for completion */
    939	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
    940	write_unlock_irq(&mhi_cntrl->pm_lock);
    941
    942	ret = wait_event_timeout(mhi_cntrl->state_event,
    943				 mhi_cntrl->dev_state == MHI_STATE_M0 ||
    944				 mhi_cntrl->dev_state == MHI_STATE_M2 ||
    945				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
    946				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
    947
    948	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    949		dev_err(dev,
    950			"Did not enter M0 state, MHI state: %s, PM state: %s\n",
    951			mhi_state_str(mhi_cntrl->dev_state),
    952			to_mhi_pm_state_str(mhi_cntrl->pm_state));
    953		return -EIO;
    954	}
    955
    956	return 0;
    957}
    958
    959int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
    960{
    961	return __mhi_pm_resume(mhi_cntrl, false);
    962}
    963EXPORT_SYMBOL_GPL(mhi_pm_resume);
    964
    965int mhi_pm_resume_force(struct mhi_controller *mhi_cntrl)
    966{
    967	return __mhi_pm_resume(mhi_cntrl, true);
    968}
    969EXPORT_SYMBOL_GPL(mhi_pm_resume_force);
    970
    971int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
    972{
    973	int ret;
    974
    975	/* Wake up the device */
    976	read_lock_bh(&mhi_cntrl->pm_lock);
    977	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    978		read_unlock_bh(&mhi_cntrl->pm_lock);
    979		return -EIO;
    980	}
    981	mhi_cntrl->wake_get(mhi_cntrl, true);
    982	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
    983		mhi_trigger_resume(mhi_cntrl);
    984	read_unlock_bh(&mhi_cntrl->pm_lock);
    985
    986	ret = wait_event_timeout(mhi_cntrl->state_event,
    987				 mhi_cntrl->pm_state == MHI_PM_M0 ||
    988				 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
    989				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
    990
    991	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
    992		read_lock_bh(&mhi_cntrl->pm_lock);
    993		mhi_cntrl->wake_put(mhi_cntrl, false);
    994		read_unlock_bh(&mhi_cntrl->pm_lock);
    995		return -EIO;
    996	}
    997
    998	return 0;
    999}
   1000
   1001/* Assert device wake db */
   1002static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
   1003{
   1004	unsigned long flags;
   1005
   1006	/*
   1007	 * If force flag is set, then increment the wake count value and
   1008	 * ring wake db
   1009	 */
   1010	if (unlikely(force)) {
   1011		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
   1012		atomic_inc(&mhi_cntrl->dev_wake);
   1013		if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
   1014		    !mhi_cntrl->wake_set) {
   1015			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
   1016			mhi_cntrl->wake_set = true;
   1017		}
   1018		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
   1019	} else {
   1020		/*
   1021		 * If resources are already requested, then just increment
   1022		 * the wake count value and return
   1023		 */
   1024		if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
   1025			return;
   1026
   1027		spin_lock_irqsave(&mhi_cntrl->wlock, flags);
   1028		if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
   1029		    MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
   1030		    !mhi_cntrl->wake_set) {
   1031			mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
   1032			mhi_cntrl->wake_set = true;
   1033		}
   1034		spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
   1035	}
   1036}
   1037
   1038/* De-assert device wake db */
   1039static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
   1040				  bool override)
   1041{
   1042	unsigned long flags;
   1043
   1044	/*
   1045	 * Only continue if there is a single resource, else just decrement
   1046	 * and return
   1047	 */
   1048	if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
   1049		return;
   1050
   1051	spin_lock_irqsave(&mhi_cntrl->wlock, flags);
   1052	if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
   1053	    MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
   1054	    mhi_cntrl->wake_set) {
   1055		mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
   1056		mhi_cntrl->wake_set = false;
   1057	}
   1058	spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
   1059}
   1060
   1061int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
   1062{
   1063	enum mhi_state state;
   1064	enum mhi_ee_type current_ee;
   1065	enum dev_st_transition next_state;
   1066	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1067	u32 interval_us = 25000; /* poll register field every 25 milliseconds */
   1068	int ret;
   1069
   1070	dev_info(dev, "Requested to power ON\n");
   1071
   1072	/* Supply default wake routines if not provided by controller driver */
   1073	if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
   1074	    !mhi_cntrl->wake_toggle) {
   1075		mhi_cntrl->wake_get = mhi_assert_dev_wake;
   1076		mhi_cntrl->wake_put = mhi_deassert_dev_wake;
   1077		mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
   1078			mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
   1079	}
   1080
   1081	mutex_lock(&mhi_cntrl->pm_mutex);
   1082	mhi_cntrl->pm_state = MHI_PM_DISABLE;
   1083
   1084	/* Setup BHI INTVEC */
   1085	write_lock_irq(&mhi_cntrl->pm_lock);
   1086	mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
   1087	mhi_cntrl->pm_state = MHI_PM_POR;
   1088	mhi_cntrl->ee = MHI_EE_MAX;
   1089	current_ee = mhi_get_exec_env(mhi_cntrl);
   1090	write_unlock_irq(&mhi_cntrl->pm_lock);
   1091
   1092	/* Confirm that the device is in valid exec env */
   1093	if (!MHI_POWER_UP_CAPABLE(current_ee)) {
   1094		dev_err(dev, "%s is not a valid EE for power on\n",
   1095			TO_MHI_EXEC_STR(current_ee));
   1096		ret = -EIO;
   1097		goto error_exit;
   1098	}
   1099
   1100	state = mhi_get_mhi_state(mhi_cntrl);
   1101	dev_dbg(dev, "Attempting power on with EE: %s, state: %s\n",
   1102		TO_MHI_EXEC_STR(current_ee), mhi_state_str(state));
   1103
   1104	if (state == MHI_STATE_SYS_ERR) {
   1105		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
   1106		ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
   1107				 MHICTRL_RESET_MASK, 0, interval_us);
   1108		if (ret) {
   1109			dev_info(dev, "Failed to reset MHI due to syserr state\n");
   1110			goto error_exit;
   1111		}
   1112
   1113		/*
   1114		 * device cleares INTVEC as part of RESET processing,
   1115		 * re-program it
   1116		 */
   1117		mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
   1118	}
   1119
   1120	ret = mhi_init_irq_setup(mhi_cntrl);
   1121	if (ret)
   1122		goto error_exit;
   1123
   1124	/* Transition to next state */
   1125	next_state = MHI_IN_PBL(current_ee) ?
   1126		DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
   1127
   1128	mhi_queue_state_transition(mhi_cntrl, next_state);
   1129
   1130	mutex_unlock(&mhi_cntrl->pm_mutex);
   1131
   1132	dev_info(dev, "Power on setup success\n");
   1133
   1134	return 0;
   1135
   1136error_exit:
   1137	mhi_cntrl->pm_state = MHI_PM_DISABLE;
   1138	mutex_unlock(&mhi_cntrl->pm_mutex);
   1139
   1140	return ret;
   1141}
   1142EXPORT_SYMBOL_GPL(mhi_async_power_up);
   1143
   1144void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
   1145{
   1146	enum mhi_pm_state cur_state, transition_state;
   1147	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1148
   1149	mutex_lock(&mhi_cntrl->pm_mutex);
   1150	write_lock_irq(&mhi_cntrl->pm_lock);
   1151	cur_state = mhi_cntrl->pm_state;
   1152	if (cur_state == MHI_PM_DISABLE) {
   1153		write_unlock_irq(&mhi_cntrl->pm_lock);
   1154		mutex_unlock(&mhi_cntrl->pm_mutex);
   1155		return; /* Already powered down */
   1156	}
   1157
   1158	/* If it's not a graceful shutdown, force MHI to linkdown state */
   1159	transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
   1160			   MHI_PM_LD_ERR_FATAL_DETECT;
   1161
   1162	cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
   1163	if (cur_state != transition_state) {
   1164		dev_err(dev, "Failed to move to state: %s from: %s\n",
   1165			to_mhi_pm_state_str(transition_state),
   1166			to_mhi_pm_state_str(mhi_cntrl->pm_state));
   1167		/* Force link down or error fatal detected state */
   1168		mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
   1169	}
   1170
   1171	/* mark device inactive to avoid any further host processing */
   1172	mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
   1173	mhi_cntrl->dev_state = MHI_STATE_RESET;
   1174
   1175	wake_up_all(&mhi_cntrl->state_event);
   1176
   1177	write_unlock_irq(&mhi_cntrl->pm_lock);
   1178	mutex_unlock(&mhi_cntrl->pm_mutex);
   1179
   1180	mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
   1181
   1182	/* Wait for shutdown to complete */
   1183	flush_work(&mhi_cntrl->st_worker);
   1184
   1185	free_irq(mhi_cntrl->irq[0], mhi_cntrl);
   1186}
   1187EXPORT_SYMBOL_GPL(mhi_power_down);
   1188
   1189int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
   1190{
   1191	int ret = mhi_async_power_up(mhi_cntrl);
   1192
   1193	if (ret)
   1194		return ret;
   1195
   1196	wait_event_timeout(mhi_cntrl->state_event,
   1197			   MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
   1198			   MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
   1199			   msecs_to_jiffies(mhi_cntrl->timeout_ms));
   1200
   1201	ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
   1202	if (ret)
   1203		mhi_power_down(mhi_cntrl, false);
   1204
   1205	return ret;
   1206}
   1207EXPORT_SYMBOL(mhi_sync_power_up);
   1208
   1209int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
   1210{
   1211	struct device *dev = &mhi_cntrl->mhi_dev->dev;
   1212	int ret;
   1213
   1214	/* Check if device is already in RDDM */
   1215	if (mhi_cntrl->ee == MHI_EE_RDDM)
   1216		return 0;
   1217
   1218	dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
   1219	mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
   1220
   1221	/* Wait for RDDM event */
   1222	ret = wait_event_timeout(mhi_cntrl->state_event,
   1223				 mhi_cntrl->ee == MHI_EE_RDDM,
   1224				 msecs_to_jiffies(mhi_cntrl->timeout_ms));
   1225	ret = ret ? 0 : -EIO;
   1226
   1227	return ret;
   1228}
   1229EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
   1230
   1231void mhi_device_get(struct mhi_device *mhi_dev)
   1232{
   1233	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1234
   1235	mhi_dev->dev_wake++;
   1236	read_lock_bh(&mhi_cntrl->pm_lock);
   1237	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
   1238		mhi_trigger_resume(mhi_cntrl);
   1239
   1240	mhi_cntrl->wake_get(mhi_cntrl, true);
   1241	read_unlock_bh(&mhi_cntrl->pm_lock);
   1242}
   1243EXPORT_SYMBOL_GPL(mhi_device_get);
   1244
   1245int mhi_device_get_sync(struct mhi_device *mhi_dev)
   1246{
   1247	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1248	int ret;
   1249
   1250	ret = __mhi_device_get_sync(mhi_cntrl);
   1251	if (!ret)
   1252		mhi_dev->dev_wake++;
   1253
   1254	return ret;
   1255}
   1256EXPORT_SYMBOL_GPL(mhi_device_get_sync);
   1257
   1258void mhi_device_put(struct mhi_device *mhi_dev)
   1259{
   1260	struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
   1261
   1262	mhi_dev->dev_wake--;
   1263	read_lock_bh(&mhi_cntrl->pm_lock);
   1264	if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
   1265		mhi_trigger_resume(mhi_cntrl);
   1266
   1267	mhi_cntrl->wake_put(mhi_cntrl, false);
   1268	read_unlock_bh(&mhi_cntrl->pm_lock);
   1269}
   1270EXPORT_SYMBOL_GPL(mhi_device_put);