cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iosm_ipc_imem.c (41092B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2020-21 Intel Corporation.
      4 */
      5
      6#include <linux/delay.h>
      7
      8#include "iosm_ipc_chnl_cfg.h"
      9#include "iosm_ipc_devlink.h"
     10#include "iosm_ipc_flash.h"
     11#include "iosm_ipc_imem.h"
     12#include "iosm_ipc_port.h"
     13#include "iosm_ipc_trace.h"
     14#include "iosm_ipc_debugfs.h"
     15
     16/* Check the wwan ips if it is valid with Channel as input. */
     17static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
     18{
     19	if (chnl)
     20		return chnl->ctype == IPC_CTYPE_WWAN &&
     21		       chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
     22	return false;
     23}
     24
     25static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
     26{
     27	union ipc_msg_prep_args prep_args = {
     28		.sleep.target = 1,
     29		.sleep.state = state,
     30	};
     31
     32	ipc_imem->device_sleep = state;
     33
     34	return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
     35					IPC_MSG_PREP_SLEEP, &prep_args, NULL);
     36}
     37
     38static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
     39				  struct ipc_pipe *pipe)
     40{
     41	/* limit max. nr of entries */
     42	if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
     43		return false;
     44
     45	return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
     46}
     47
     48/* This timer handler will retry DL buff allocation if a pipe has no free buf
     49 * and gives doorbell if TD is available
     50 */
     51static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
     52				      void *msg, size_t size)
     53{
     54	bool new_buffers_available = false;
     55	bool retry_allocation = false;
     56	int i;
     57
     58	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
     59		struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
     60
     61		if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
     62			continue;
     63
     64		while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
     65			new_buffers_available = true;
     66
     67		if (pipe->nr_of_queued_entries == 0)
     68			retry_allocation = true;
     69	}
     70
     71	if (new_buffers_available)
     72		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
     73					      IPC_HP_DL_PROCESS);
     74
     75	if (retry_allocation) {
     76		ipc_imem->hrtimer_period =
     77		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
     78		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
     79			hrtimer_start(&ipc_imem->td_alloc_timer,
     80				      ipc_imem->hrtimer_period,
     81				      HRTIMER_MODE_REL);
     82	}
     83	return 0;
     84}
     85
     86static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
     87{
     88	struct iosm_imem *ipc_imem =
     89		container_of(hr_timer, struct iosm_imem, td_alloc_timer);
     90	/* Post an async tasklet event to trigger HP update Doorbell */
     91	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
     92				 0, false);
     93	return HRTIMER_NORESTART;
     94}
     95
     96/* Fast update timer tasklet handler to trigger HP update */
     97static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
     98					    void *msg, size_t size)
     99{
    100	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
    101				      IPC_HP_FAST_TD_UPD_TMR);
    102
    103	return 0;
    104}
    105
    106static enum hrtimer_restart
    107ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
    108{
    109	struct iosm_imem *ipc_imem =
    110		container_of(hr_timer, struct iosm_imem, fast_update_timer);
    111	/* Post an async tasklet event to trigger HP update Doorbell */
    112	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
    113				 NULL, 0, false);
    114	return HRTIMER_NORESTART;
    115}
    116
    117static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
    118				    void *msg, size_t size)
    119{
    120	ipc_mux_ul_adb_finish(ipc_imem->mux);
    121	return 0;
    122}
    123
    124static enum hrtimer_restart
    125ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
    126{
    127	struct iosm_imem *ipc_imem =
    128		container_of(hr_timer, struct iosm_imem, adb_timer);
    129
    130	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
    131				 NULL, 0, false);
    132	return HRTIMER_NORESTART;
    133}
    134
    135static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
    136					  struct ipc_mux_config *cfg)
    137{
    138	ipc_mmio_update_cp_capability(ipc_imem->mmio);
    139
    140	if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
    141		dev_err(ipc_imem->dev, "Failed to get Mux capability.");
    142		return -EINVAL;
    143	}
    144
    145	cfg->protocol = ipc_imem->mmio->mux_protocol;
    146
    147	cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
    148			       MUX_UL_ON_CREDITS :
    149			       MUX_UL;
    150
    151	/* The instance ID is same as channel ID because this is been reused
    152	 * for channel alloc function.
    153	 */
    154	cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
    155
    156	return 0;
    157}
    158
    159void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
    160				   unsigned int reset_enable, bool atomic_ctx)
    161{
    162	union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
    163						      reset_enable };
    164
    165	if (atomic_ctx)
    166		ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
    167					 IPC_MSG_PREP_FEATURE_SET, &prep_args,
    168					 NULL);
    169	else
    170		ipc_protocol_msg_send(ipc_imem->ipc_protocol,
    171				      IPC_MSG_PREP_FEATURE_SET, &prep_args);
    172}
    173
    174/**
    175 * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
    176 * @ipc_imem:                       Pointer to imem data-struct
    177 */
    178void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
    179{
    180	/* Use the TD update timer only in the runtime phase */
    181	if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
    182		/* trigger the doorbell irq on CP directly. */
    183		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
    184					      IPC_HP_TD_UPD_TMR_START);
    185		return;
    186	}
    187
    188	if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
    189		ipc_imem->hrtimer_period =
    190		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
    191		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
    192			hrtimer_start(&ipc_imem->tdupdate_timer,
    193				      ipc_imem->hrtimer_period,
    194				      HRTIMER_MODE_REL);
    195	}
    196}
    197
    198void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
    199{
    200	if (hrtimer_active(hr_timer))
    201		hrtimer_cancel(hr_timer);
    202}
    203
    204/**
    205 * ipc_imem_adb_timer_start -	Starts the adb Timer if not starting.
    206 * @ipc_imem:			Pointer to imem data-struct
    207 */
    208void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
    209{
    210	if (!hrtimer_active(&ipc_imem->adb_timer)) {
    211		ipc_imem->hrtimer_period =
    212			ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
    213		hrtimer_start(&ipc_imem->adb_timer,
    214			      ipc_imem->hrtimer_period,
    215			      HRTIMER_MODE_REL);
    216	}
    217}
    218
    219bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
    220{
    221	struct ipc_mem_channel *channel;
    222	bool hpda_ctrl_pending = false;
    223	struct sk_buff_head *ul_list;
    224	bool hpda_pending = false;
    225	struct ipc_pipe *pipe;
    226	int i;
    227
    228	/* Analyze the uplink pipe of all active channels. */
    229	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
    230		channel = &ipc_imem->channels[i];
    231
    232		if (channel->state != IMEM_CHANNEL_ACTIVE)
    233			continue;
    234
    235		pipe = &channel->ul_pipe;
    236
    237		/* Get the reference to the skbuf accumulator list. */
    238		ul_list = &channel->ul_list;
    239
    240		/* Fill the transfer descriptor with the uplink buffer info. */
    241		if (!ipc_imem_check_wwan_ips(channel)) {
    242			hpda_ctrl_pending |=
    243				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
    244							pipe, ul_list);
    245		} else {
    246			hpda_pending |=
    247				ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
    248							pipe, ul_list);
    249		}
    250	}
    251
    252	/* forced HP update needed for non data channels */
    253	if (hpda_ctrl_pending) {
    254		hpda_pending = false;
    255		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
    256					      IPC_HP_UL_WRITE_TD);
    257	}
    258
    259	return hpda_pending;
    260}
    261
    262void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
    263{
    264	int timeout = IPC_MODEM_BOOT_TIMEOUT;
    265
    266	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
    267
    268	/* Trigger the CP interrupt to enter the init state. */
    269	ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
    270			  IPC_MEM_DEVICE_IPC_INIT);
    271	/* Wait for the CP update. */
    272	do {
    273		if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
    274		    ipc_imem->ipc_requested_state) {
    275			/* Prepare the MMIO space */
    276			ipc_mmio_config(ipc_imem->mmio);
    277
    278			/* Trigger the CP irq to enter the running state. */
    279			ipc_imem->ipc_requested_state =
    280				IPC_MEM_DEVICE_IPC_RUNNING;
    281			ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
    282					  IPC_MEM_DEVICE_IPC_RUNNING);
    283
    284			return;
    285		}
    286		msleep(20);
    287	} while (--timeout);
    288
    289	/* timeout */
    290	dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
    291		ipc_imem_phase_get_string(ipc_imem->phase),
    292		ipc_mmio_get_ipc_state(ipc_imem->mmio));
    293
    294	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
    295}
    296
    297/* Analyze the packet type and distribute it. */
    298static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
    299				    struct ipc_pipe *pipe, struct sk_buff *skb)
    300{
    301	u16 port_id;
    302
    303	if (!skb)
    304		return;
    305
    306	/* An AT/control or IP packet is expected. */
    307	switch (pipe->channel->ctype) {
    308	case IPC_CTYPE_CTRL:
    309		port_id = pipe->channel->channel_id;
    310		ipc_pcie_addr_unmap(ipc_imem->pcie, IPC_CB(skb)->len,
    311				    IPC_CB(skb)->mapping,
    312				    IPC_CB(skb)->direction);
    313		if (port_id == IPC_MEM_CTRL_CHL_ID_7)
    314			ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
    315						       skb);
    316		else if (ipc_is_trace_channel(ipc_imem, port_id))
    317			ipc_trace_port_rx(ipc_imem, skb);
    318		else
    319			wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
    320				     skb);
    321		break;
    322
    323	case IPC_CTYPE_WWAN:
    324		if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
    325			ipc_mux_dl_decode(ipc_imem->mux, skb);
    326		break;
    327	default:
    328		dev_err(ipc_imem->dev, "Invalid channel type");
    329		break;
    330	}
    331}
    332
    333/* Process the downlink data and pass them to the char or net layer. */
    334static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
    335				     struct ipc_pipe *pipe)
    336{
    337	s32 cnt = 0, processed_td_cnt = 0;
    338	struct ipc_mem_channel *channel;
    339	u32 head = 0, tail = 0;
    340	bool processed = false;
    341	struct sk_buff *skb;
    342
    343	channel = pipe->channel;
    344
    345	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
    346					 &tail);
    347	if (pipe->old_tail != tail) {
    348		if (pipe->old_tail < tail)
    349			cnt = tail - pipe->old_tail;
    350		else
    351			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
    352	}
    353
    354	processed_td_cnt = cnt;
    355
    356	/* Seek for pipes with pending DL data. */
    357	while (cnt--) {
    358		skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
    359
    360		/* Analyze the packet type and distribute it. */
    361		ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
    362	}
    363
    364	/* try to allocate new empty DL SKbs from head..tail - 1*/
    365	while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
    366		processed = true;
    367
    368	if (processed && !ipc_imem_check_wwan_ips(channel)) {
    369		/* Force HP update for non IP channels */
    370		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
    371					      IPC_HP_DL_PROCESS);
    372		processed = false;
    373
    374		/* If Fast Update timer is already running then stop */
    375		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
    376	}
    377
    378	/* Any control channel process will get immediate HP update.
    379	 * Start Fast update timer only for IP channel if all the TDs were
    380	 * used in last process.
    381	 */
    382	if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
    383		ipc_imem->hrtimer_period =
    384		ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
    385		hrtimer_start(&ipc_imem->fast_update_timer,
    386			      ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
    387	}
    388
    389	if (ipc_imem->app_notify_dl_pend)
    390		complete(&ipc_imem->dl_pend_sem);
    391}
    392
    393/* process open uplink pipe */
    394static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
    395				     struct ipc_pipe *pipe)
    396{
    397	struct ipc_mem_channel *channel;
    398	u32 tail = 0, head = 0;
    399	struct sk_buff *skb;
    400	s32 cnt = 0;
    401
    402	channel = pipe->channel;
    403
    404	/* Get the internal phase. */
    405	ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
    406					 &tail);
    407
    408	if (pipe->old_tail != tail) {
    409		if (pipe->old_tail < tail)
    410			cnt = tail - pipe->old_tail;
    411		else
    412			cnt = pipe->nr_of_entries - pipe->old_tail + tail;
    413	}
    414
    415	/* Free UL buffers. */
    416	while (cnt--) {
    417		skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
    418
    419		if (!skb)
    420			continue;
    421
    422		/* If the user app was suspended in uplink direction - blocking
    423		 * write, resume it.
    424		 */
    425		if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
    426			complete(&channel->ul_sem);
    427
    428		/* Free the skbuf element. */
    429		if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
    430			if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
    431				ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
    432			else
    433				dev_err(ipc_imem->dev,
    434					"OP Type is UL_MUX, unknown if_id %d",
    435					channel->if_id);
    436		} else {
    437			ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
    438		}
    439	}
    440
    441	/* Trace channel stats for IP UL pipe. */
    442	if (ipc_imem_check_wwan_ips(pipe->channel))
    443		ipc_mux_check_n_restart_tx(ipc_imem->mux);
    444
    445	if (ipc_imem->app_notify_ul_pend)
    446		complete(&ipc_imem->ul_pend_sem);
    447}
    448
    449/* Executes the irq. */
    450static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
    451{
    452	struct ipc_mem_channel *channel;
    453
    454	channel = ipc_imem->ipc_devlink->devlink_sio.channel;
    455	ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
    456	complete(&channel->ul_sem);
    457}
    458
    459/* Execute the UL bundle timer actions, generating the doorbell irq. */
    460static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
    461					  void *msg, size_t size)
    462{
    463	ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
    464				      IPC_HP_TD_UPD_TMR);
    465	return 0;
    466}
    467
    468/* Consider link power management in the runtime phase. */
    469static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
    470{
    471	    /* link will go down, Test pending UL packets.*/
    472	if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
    473	    hrtimer_active(&ipc_imem->tdupdate_timer)) {
    474		/* Generate the doorbell irq. */
    475		ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
    476		/* Stop the TD update timer. */
    477		ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
    478		/* Stop the fast update timer. */
    479		ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
    480	}
    481}
    482
    483/* Execute startup timer and wait for delayed start (e.g. NAND) */
    484static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
    485					void *msg, size_t size)
    486{
    487	/* Update & check the current operation phase. */
    488	if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
    489		return -EIO;
    490
    491	if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
    492	    IPC_MEM_DEVICE_IPC_UNINIT) {
    493		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
    494
    495		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
    496				  IPC_MEM_DEVICE_IPC_INIT);
    497
    498		ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
    499		/* reduce period to 100 ms to check for mmio init state */
    500		if (!hrtimer_active(&ipc_imem->startup_timer))
    501			hrtimer_start(&ipc_imem->startup_timer,
    502				      ipc_imem->hrtimer_period,
    503				      HRTIMER_MODE_REL);
    504	} else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
    505		   IPC_MEM_DEVICE_IPC_INIT) {
    506		/* Startup complete  - disable timer */
    507		ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
    508
    509		/* Prepare the MMIO space */
    510		ipc_mmio_config(ipc_imem->mmio);
    511		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
    512		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
    513				  IPC_MEM_DEVICE_IPC_RUNNING);
    514	}
    515
    516	return 0;
    517}
    518
    519static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
    520{
    521	enum hrtimer_restart result = HRTIMER_NORESTART;
    522	struct iosm_imem *ipc_imem =
    523		container_of(hr_timer, struct iosm_imem, startup_timer);
    524
    525	if (ktime_to_ns(ipc_imem->hrtimer_period)) {
    526		hrtimer_forward_now(&ipc_imem->startup_timer,
    527				    ipc_imem->hrtimer_period);
    528		result = HRTIMER_RESTART;
    529	}
    530
    531	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
    532				 NULL, 0, false);
    533	return result;
    534}
    535
    536/* Get the CP execution stage */
    537static enum ipc_mem_exec_stage
    538ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
    539{
    540	return (ipc_imem->phase == IPC_P_RUN &&
    541		ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
    542		       ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
    543		       ipc_mmio_get_exec_stage(ipc_imem->mmio);
    544}
    545
    546/* Callback to send the modem ready uevent */
    547static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
    548				    void *msg, size_t size)
    549{
    550	enum ipc_mem_exec_stage exec_stage =
    551		ipc_imem_get_exec_stage_buffered(ipc_imem);
    552
    553	if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
    554		ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
    555
    556	return 0;
    557}
    558
    559/* This function is executed in a task context via an ipc_worker object,
    560 * as the creation or removal of device can't be done from tasklet.
    561 */
    562static void ipc_imem_run_state_worker(struct work_struct *instance)
    563{
    564	struct ipc_chnl_cfg chnl_cfg_port = { 0 };
    565	struct ipc_mux_config mux_cfg;
    566	struct iosm_imem *ipc_imem;
    567	u8 ctrl_chl_idx = 0;
    568
    569	ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
    570
    571	if (ipc_imem->phase != IPC_P_RUN) {
    572		dev_err(ipc_imem->dev,
    573			"Modem link down. Exit run state worker.");
    574		return;
    575	}
    576
    577	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
    578		ipc_devlink_deinit(ipc_imem->ipc_devlink);
    579
    580	if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
    581		ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
    582
    583	ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
    584	if (ipc_imem->mux)
    585		ipc_imem->mux->wwan = ipc_imem->wwan;
    586
    587	while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
    588		if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
    589			ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
    590			if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
    591			    chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
    592				ctrl_chl_idx++;
    593				continue;
    594			}
    595			if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
    596				ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
    597						      chnl_cfg_port,
    598						      IRQ_MOD_OFF);
    599				ipc_imem->ipc_port[ctrl_chl_idx] =
    600					ipc_port_init(ipc_imem, chnl_cfg_port);
    601			}
    602		}
    603		ctrl_chl_idx++;
    604	}
    605
    606	ipc_debugfs_init(ipc_imem);
    607
    608	ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
    609				 false);
    610
    611	/* Complete all memory stores before setting bit */
    612	smp_mb__before_atomic();
    613
    614	set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
    615
    616	/* Complete all memory stores after setting bit */
    617	smp_mb__after_atomic();
    618}
    619
    620static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
    621{
    622	enum ipc_mem_device_ipc_state curr_ipc_status;
    623	enum ipc_phase old_phase, phase;
    624	bool retry_allocation = false;
    625	bool ul_pending = false;
    626	int i;
    627
    628	if (irq != IMEM_IRQ_DONT_CARE)
    629		ipc_imem->ev_irq_pending[irq] = false;
    630
    631	/* Get the internal phase. */
    632	old_phase = ipc_imem->phase;
    633
    634	if (old_phase == IPC_P_OFF_REQ) {
    635		dev_dbg(ipc_imem->dev,
    636			"[%s]: Ignoring MSI. Deinit sequence in progress!",
    637			ipc_imem_phase_get_string(old_phase));
    638		return;
    639	}
    640
    641	/* Update the phase controlled by CP. */
    642	phase = ipc_imem_phase_update(ipc_imem);
    643
    644	switch (phase) {
    645	case IPC_P_RUN:
    646		if (!ipc_imem->enter_runtime) {
    647			/* Excute the transition from flash/boot to runtime. */
    648			ipc_imem->enter_runtime = 1;
    649
    650			/* allow device to sleep, default value is
    651			 * IPC_HOST_SLEEP_ENTER_SLEEP
    652			 */
    653			ipc_imem_msg_send_device_sleep(ipc_imem,
    654						       ipc_imem->device_sleep);
    655
    656			ipc_imem_msg_send_feature_set(ipc_imem,
    657						      IPC_MEM_INBAND_CRASH_SIG,
    658						  true);
    659		}
    660
    661		curr_ipc_status =
    662			ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
    663
    664		/* check ipc_status change */
    665		if (ipc_imem->ipc_status != curr_ipc_status) {
    666			ipc_imem->ipc_status = curr_ipc_status;
    667
    668			if (ipc_imem->ipc_status ==
    669			    IPC_MEM_DEVICE_IPC_RUNNING) {
    670				schedule_work(&ipc_imem->run_state_worker);
    671			}
    672		}
    673
    674		/* Consider power management in the runtime phase. */
    675		ipc_imem_slp_control_exec(ipc_imem);
    676		break; /* Continue with skbuf processing. */
    677
    678		/* Unexpected phases. */
    679	case IPC_P_OFF:
    680	case IPC_P_OFF_REQ:
    681		dev_err(ipc_imem->dev, "confused phase %s",
    682			ipc_imem_phase_get_string(phase));
    683		return;
    684
    685	case IPC_P_PSI:
    686		if (old_phase != IPC_P_ROM)
    687			break;
    688
    689		fallthrough;
    690		/* On CP the PSI phase is already active. */
    691
    692	case IPC_P_ROM:
    693		/* Before CP ROM driver starts the PSI image, it sets
    694		 * the exit_code field on the doorbell scratchpad and
    695		 * triggers the irq.
    696		 */
    697		ipc_imem_rom_irq_exec(ipc_imem);
    698		return;
    699
    700	default:
    701		break;
    702	}
    703
    704	/* process message ring */
    705	ipc_protocol_msg_process(ipc_imem, irq);
    706
    707	/* process all open pipes */
    708	for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
    709		struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
    710		struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
    711
    712		if (dl_pipe->is_open &&
    713		    (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
    714			ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
    715
    716			if (dl_pipe->nr_of_queued_entries == 0)
    717				retry_allocation = true;
    718		}
    719
    720		if (ul_pipe->is_open)
    721			ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
    722	}
    723
    724	/* Try to generate new ADB or ADGH. */
    725	if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
    726		ipc_imem_td_update_timer_start(ipc_imem);
    727		if (ipc_imem->mux->protocol == MUX_AGGREGATION)
    728			ipc_imem_adb_timer_start(ipc_imem);
    729	}
    730
    731	/* Continue the send procedure with accumulated SIO or NETIF packets.
    732	 * Reset the debounce flags.
    733	 */
    734	ul_pending |= ipc_imem_ul_write_td(ipc_imem);
    735
    736	/* if UL data is pending restart TD update timer */
    737	if (ul_pending) {
    738		ipc_imem->hrtimer_period =
    739		ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
    740		if (!hrtimer_active(&ipc_imem->tdupdate_timer))
    741			hrtimer_start(&ipc_imem->tdupdate_timer,
    742				      ipc_imem->hrtimer_period,
    743				      HRTIMER_MODE_REL);
    744	}
    745
    746	/* If CP has executed the transition
    747	 * from IPC_INIT to IPC_RUNNING in the PSI
    748	 * phase, wake up the flash app to open the pipes.
    749	 */
    750	if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
    751	    ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
    752	    ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
    753						IPC_MEM_DEVICE_IPC_RUNNING) {
    754		complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
    755	}
    756
    757	/* Reset the expected CP state. */
    758	ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
    759
    760	if (retry_allocation) {
    761		ipc_imem->hrtimer_period =
    762		ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
    763		if (!hrtimer_active(&ipc_imem->td_alloc_timer))
    764			hrtimer_start(&ipc_imem->td_alloc_timer,
    765				      ipc_imem->hrtimer_period,
    766				      HRTIMER_MODE_REL);
    767	}
    768}
    769
    770/* Callback by tasklet for handling interrupt events. */
    771static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
    772			      size_t size)
    773{
    774	ipc_imem_handle_irq(ipc_imem, arg);
    775
    776	return 0;
    777}
    778
    779void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
    780{
    781	/* start doorbell irq delay timer if UL is pending */
    782	if (ipc_imem_ul_write_td(ipc_imem))
    783		ipc_imem_td_update_timer_start(ipc_imem);
    784}
    785
    786/* Check the execution stage and update the AP phase */
    787static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
    788						  enum ipc_mem_exec_stage stage)
    789{
    790	switch (stage) {
    791	case IPC_MEM_EXEC_STAGE_BOOT:
    792		if (ipc_imem->phase != IPC_P_ROM) {
    793			/* Send this event only once */
    794			ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
    795		}
    796
    797		ipc_imem->phase = IPC_P_ROM;
    798		break;
    799
    800	case IPC_MEM_EXEC_STAGE_PSI:
    801		ipc_imem->phase = IPC_P_PSI;
    802		break;
    803
    804	case IPC_MEM_EXEC_STAGE_EBL:
    805		ipc_imem->phase = IPC_P_EBL;
    806		break;
    807
    808	case IPC_MEM_EXEC_STAGE_RUN:
    809		if (ipc_imem->phase != IPC_P_RUN &&
    810		    ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
    811			ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
    812		}
    813		ipc_imem->phase = IPC_P_RUN;
    814		break;
    815
    816	case IPC_MEM_EXEC_STAGE_CRASH:
    817		if (ipc_imem->phase != IPC_P_CRASH)
    818			ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
    819
    820		ipc_imem->phase = IPC_P_CRASH;
    821		break;
    822
    823	case IPC_MEM_EXEC_STAGE_CD_READY:
    824		if (ipc_imem->phase != IPC_P_CD_READY)
    825			ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
    826		ipc_imem->phase = IPC_P_CD_READY;
    827		break;
    828
    829	default:
    830		/* unknown exec stage:
    831		 * assume that link is down and send info to listeners
    832		 */
    833		ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
    834		break;
    835	}
    836
    837	return ipc_imem->phase;
    838}
    839
    840/* Send msg to device to open pipe */
    841static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
    842			       struct ipc_pipe *pipe)
    843{
    844	union ipc_msg_prep_args prep_args = {
    845		.pipe_open.pipe = pipe,
    846	};
    847
    848	if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
    849				  IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
    850		pipe->is_open = true;
    851
    852	return pipe->is_open;
    853}
    854
    855/* Allocates the TDs for the given pipe along with firing HP update DB. */
    856static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
    857				     void *msg, size_t size)
    858{
    859	struct ipc_pipe *dl_pipe = msg;
    860	bool processed = false;
    861	int i;
    862
    863	for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
    864		processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
    865
    866	/* Trigger the doorbell irq to inform CP that new downlink buffers are
    867	 * available.
    868	 */
    869	if (processed)
    870		ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
    871
    872	return 0;
    873}
    874
    875static enum hrtimer_restart
    876ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
    877{
    878	struct iosm_imem *ipc_imem =
    879		container_of(hr_timer, struct iosm_imem, tdupdate_timer);
    880
    881	ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
    882				 NULL, 0, false);
    883	return HRTIMER_NORESTART;
    884}
    885
    886/* Get the CP execution state and map it to the AP phase. */
    887enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
    888{
    889	enum ipc_mem_exec_stage exec_stage =
    890				ipc_imem_get_exec_stage_buffered(ipc_imem);
    891	/* If the CP stage is undef, return the internal precalculated phase. */
    892	return ipc_imem->phase == IPC_P_OFF_REQ ?
    893		       ipc_imem->phase :
    894		       ipc_imem_phase_update_check(ipc_imem, exec_stage);
    895}
    896
    897const char *ipc_imem_phase_get_string(enum ipc_phase phase)
    898{
    899	switch (phase) {
    900	case IPC_P_RUN:
    901		return "A-RUN";
    902
    903	case IPC_P_OFF:
    904		return "A-OFF";
    905
    906	case IPC_P_ROM:
    907		return "A-ROM";
    908
    909	case IPC_P_PSI:
    910		return "A-PSI";
    911
    912	case IPC_P_EBL:
    913		return "A-EBL";
    914
    915	case IPC_P_CRASH:
    916		return "A-CRASH";
    917
    918	case IPC_P_CD_READY:
    919		return "A-CD_READY";
    920
    921	case IPC_P_OFF_REQ:
    922		return "A-OFF_REQ";
    923
    924	default:
    925		return "A-???";
    926	}
    927}
    928
    929void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
    930{
    931	union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
    932
    933	pipe->is_open = false;
    934	ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
    935			      &prep_args);
    936
    937	ipc_imem_pipe_cleanup(ipc_imem, pipe);
    938}
    939
    940void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
    941{
    942	struct ipc_mem_channel *channel;
    943
    944	if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
    945		dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
    946		return;
    947	}
    948
    949	channel = &ipc_imem->channels[channel_id];
    950
    951	if (channel->state == IMEM_CHANNEL_FREE) {
    952		dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
    953			channel_id, channel->state);
    954		return;
    955	}
    956
    957	/* Free only the channel id in the CP power off mode. */
    958	if (channel->state == IMEM_CHANNEL_RESERVED)
    959		/* Release only the channel id. */
    960		goto channel_free;
    961
    962	if (ipc_imem->phase == IPC_P_RUN) {
    963		ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
    964		ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
    965	}
    966
    967	ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
    968	ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
    969
    970channel_free:
    971	ipc_imem_channel_free(channel);
    972}
    973
    974struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
    975					      int channel_id, u32 db_id)
    976{
    977	struct ipc_mem_channel *channel;
    978
    979	if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
    980		dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
    981		return NULL;
    982	}
    983
    984	channel = &ipc_imem->channels[channel_id];
    985
    986	channel->state = IMEM_CHANNEL_ACTIVE;
    987
    988	if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
    989		goto ul_pipe_err;
    990
    991	if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
    992		goto dl_pipe_err;
    993
    994	/* Allocate the downlink buffers in tasklet context. */
    995	if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
    996				     &channel->dl_pipe, 0, false)) {
    997		dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
    998		goto task_failed;
    999	}
   1000
   1001	/* Active channel. */
   1002	return channel;
   1003task_failed:
   1004	ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
   1005dl_pipe_err:
   1006	ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
   1007ul_pipe_err:
   1008	ipc_imem_channel_free(channel);
   1009	return NULL;
   1010}
   1011
   1012void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
   1013{
   1014	ipc_protocol_suspend(ipc_imem->ipc_protocol);
   1015}
   1016
   1017void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
   1018{
   1019	ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
   1020}
   1021
   1022void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
   1023{
   1024	enum ipc_mem_exec_stage stage;
   1025
   1026	if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
   1027		stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
   1028		ipc_imem_phase_update_check(ipc_imem, stage);
   1029	}
   1030}
   1031
   1032void ipc_imem_channel_free(struct ipc_mem_channel *channel)
   1033{
   1034	/* Reset dynamic channel elements. */
   1035	channel->state = IMEM_CHANNEL_FREE;
   1036}
   1037
   1038int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
   1039			   enum ipc_ctype ctype)
   1040{
   1041	struct ipc_mem_channel *channel;
   1042	int i;
   1043
   1044	/* Find channel of given type/index */
   1045	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
   1046		channel = &ipc_imem->channels[i];
   1047		if (channel->ctype == ctype && channel->index == index)
   1048			break;
   1049	}
   1050
   1051	if (i >= ipc_imem->nr_of_channels) {
   1052		dev_dbg(ipc_imem->dev,
   1053			"no channel definition for index=%d ctype=%d", index,
   1054			ctype);
   1055		return -ECHRNG;
   1056	}
   1057
   1058	if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
   1059		dev_dbg(ipc_imem->dev, "channel is in use");
   1060		return -EBUSY;
   1061	}
   1062
   1063	if (channel->ctype == IPC_CTYPE_WWAN &&
   1064	    index == IPC_MEM_MUX_IP_CH_IF_ID)
   1065		channel->if_id = index;
   1066
   1067	channel->channel_id = index;
   1068	channel->state = IMEM_CHANNEL_RESERVED;
   1069
   1070	return i;
   1071}
   1072
   1073void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
   1074			   struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
   1075{
   1076	struct ipc_mem_channel *channel;
   1077
   1078	if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
   1079	    chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
   1080		dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
   1081			chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
   1082		return;
   1083	}
   1084
   1085	if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
   1086		dev_err(ipc_imem->dev, "too many channels");
   1087		return;
   1088	}
   1089
   1090	channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
   1091	channel->channel_id = ipc_imem->nr_of_channels;
   1092	channel->ctype = ctype;
   1093	channel->index = chnl_cfg.id;
   1094	channel->net_err_count = 0;
   1095	channel->state = IMEM_CHANNEL_FREE;
   1096	ipc_imem->nr_of_channels++;
   1097
   1098	ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
   1099				IRQ_MOD_OFF);
   1100
   1101	skb_queue_head_init(&channel->ul_list);
   1102
   1103	init_completion(&channel->ul_sem);
   1104}
   1105
   1106void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
   1107			     struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
   1108{
   1109	struct ipc_mem_channel *channel;
   1110
   1111	if (id < 0 || id >= ipc_imem->nr_of_channels) {
   1112		dev_err(ipc_imem->dev, "invalid channel id %d", id);
   1113		return;
   1114	}
   1115
   1116	channel = &ipc_imem->channels[id];
   1117
   1118	if (channel->state != IMEM_CHANNEL_FREE &&
   1119	    channel->state != IMEM_CHANNEL_RESERVED) {
   1120		dev_err(ipc_imem->dev, "invalid channel state %d",
   1121			channel->state);
   1122		return;
   1123	}
   1124
   1125	channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
   1126	channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
   1127	channel->ul_pipe.is_open = false;
   1128	channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
   1129	channel->ul_pipe.channel = channel;
   1130	channel->ul_pipe.dir = IPC_MEM_DIR_UL;
   1131	channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
   1132	channel->ul_pipe.irq_moderation = irq_moderation;
   1133	channel->ul_pipe.buf_size = 0;
   1134
   1135	channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
   1136	channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
   1137	channel->dl_pipe.is_open = false;
   1138	channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
   1139	channel->dl_pipe.channel = channel;
   1140	channel->dl_pipe.dir = IPC_MEM_DIR_DL;
   1141	channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
   1142	channel->dl_pipe.irq_moderation = irq_moderation;
   1143	channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
   1144}
   1145
   1146static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
   1147{
   1148	int i;
   1149
   1150	for (i = 0; i < ipc_imem->nr_of_channels; i++) {
   1151		struct ipc_mem_channel *channel;
   1152
   1153		channel = &ipc_imem->channels[i];
   1154
   1155		ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
   1156		ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
   1157
   1158		ipc_imem_channel_free(channel);
   1159	}
   1160}
   1161
   1162void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
   1163{
   1164	struct sk_buff *skb;
   1165
   1166	/* Force pipe to closed state also when not explicitly closed through
   1167	 * ipc_imem_pipe_close()
   1168	 */
   1169	pipe->is_open = false;
   1170
   1171	/* Empty the uplink skb accumulator. */
   1172	while ((skb = skb_dequeue(&pipe->channel->ul_list)))
   1173		ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
   1174
   1175	ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
   1176}
   1177
   1178/* Send IPC protocol uninit to the modem when Link is active. */
   1179static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
   1180{
   1181	int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
   1182	enum ipc_mem_device_ipc_state ipc_state;
   1183
   1184	/* When PCIe link is up set IPC_UNINIT
   1185	 * of the modem otherwise ignore it when PCIe link down happens.
   1186	 */
   1187	if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
   1188		/* set modem to UNINIT
   1189		 * (in case we want to reload the AP driver without resetting
   1190		 * the modem)
   1191		 */
   1192		ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
   1193				  IPC_MEM_DEVICE_IPC_UNINIT);
   1194		ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
   1195
   1196		/* Wait for maximum 30ms to allow the Modem to uninitialize the
   1197		 * protocol.
   1198		 */
   1199		while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
   1200		       (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
   1201		       (timeout > 0)) {
   1202			usleep_range(1000, 1250);
   1203			timeout--;
   1204			ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
   1205		}
   1206	}
   1207}
   1208
   1209void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
   1210{
   1211	ipc_imem->phase = IPC_P_OFF_REQ;
   1212
   1213	/* forward MDM_NOT_READY to listeners */
   1214	ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
   1215
   1216	hrtimer_cancel(&ipc_imem->td_alloc_timer);
   1217	hrtimer_cancel(&ipc_imem->tdupdate_timer);
   1218	hrtimer_cancel(&ipc_imem->fast_update_timer);
   1219	hrtimer_cancel(&ipc_imem->startup_timer);
   1220
   1221	/* cancel the workqueue */
   1222	cancel_work_sync(&ipc_imem->run_state_worker);
   1223
   1224	if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
   1225		ipc_mux_deinit(ipc_imem->mux);
   1226		ipc_debugfs_deinit(ipc_imem);
   1227		ipc_wwan_deinit(ipc_imem->wwan);
   1228		ipc_port_deinit(ipc_imem->ipc_port);
   1229	}
   1230
   1231	if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
   1232		ipc_devlink_deinit(ipc_imem->ipc_devlink);
   1233
   1234	ipc_imem_device_ipc_uninit(ipc_imem);
   1235	ipc_imem_channel_reset(ipc_imem);
   1236
   1237	ipc_protocol_deinit(ipc_imem->ipc_protocol);
   1238	ipc_task_deinit(ipc_imem->ipc_task);
   1239
   1240	kfree(ipc_imem->ipc_task);
   1241	kfree(ipc_imem->mmio);
   1242
   1243	ipc_imem->phase = IPC_P_OFF;
   1244}
   1245
   1246/* After CP has unblocked the PCIe link, save the start address of the doorbell
   1247 * scratchpad and prepare the shared memory region. If the flashing to RAM
   1248 * procedure shall be executed, copy the chip information from the doorbell
   1249 * scratchtpad to the application buffer and wake up the flash app.
   1250 */
   1251static int ipc_imem_config(struct iosm_imem *ipc_imem)
   1252{
   1253	enum ipc_phase phase;
   1254
   1255	/* Initialize the semaphore for the blocking read UL/DL transfer. */
   1256	init_completion(&ipc_imem->ul_pend_sem);
   1257
   1258	init_completion(&ipc_imem->dl_pend_sem);
   1259
   1260	/* clear internal flags */
   1261	ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
   1262	ipc_imem->enter_runtime = 0;
   1263
   1264	phase = ipc_imem_phase_update(ipc_imem);
   1265
   1266	/* Either CP shall be in the power off or power on phase. */
   1267	switch (phase) {
   1268	case IPC_P_ROM:
   1269		ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
   1270		/* poll execution stage (for delayed start, e.g. NAND) */
   1271		if (!hrtimer_active(&ipc_imem->startup_timer))
   1272			hrtimer_start(&ipc_imem->startup_timer,
   1273				      ipc_imem->hrtimer_period,
   1274				      HRTIMER_MODE_REL);
   1275		return 0;
   1276
   1277	case IPC_P_PSI:
   1278	case IPC_P_EBL:
   1279	case IPC_P_RUN:
   1280		/* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
   1281		ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
   1282
   1283		/* Verify the exepected initial state. */
   1284		if (ipc_imem->ipc_requested_state ==
   1285		    ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
   1286			ipc_imem_ipc_init_check(ipc_imem);
   1287
   1288			return 0;
   1289		}
   1290		dev_err(ipc_imem->dev,
   1291			"ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
   1292			ipc_mmio_get_ipc_state(ipc_imem->mmio));
   1293		break;
   1294	case IPC_P_CRASH:
   1295	case IPC_P_CD_READY:
   1296		dev_dbg(ipc_imem->dev,
   1297			"Modem is in phase %d, reset Modem to collect CD",
   1298			phase);
   1299		return 0;
   1300	default:
   1301		dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
   1302		break;
   1303	}
   1304
   1305	complete(&ipc_imem->dl_pend_sem);
   1306	complete(&ipc_imem->ul_pend_sem);
   1307	ipc_imem->phase = IPC_P_OFF;
   1308	return -EIO;
   1309}
   1310
   1311/* Pass the dev ptr to the shared memory driver and request the entry points */
   1312struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
   1313				void __iomem *mmio, struct device *dev)
   1314{
   1315	struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
   1316	enum ipc_mem_exec_stage stage;
   1317
   1318	if (!ipc_imem)
   1319		return NULL;
   1320
   1321	/* Save the device address. */
   1322	ipc_imem->pcie = pcie;
   1323	ipc_imem->dev = dev;
   1324
   1325	ipc_imem->pci_device_id = device_id;
   1326
   1327	ipc_imem->cp_version = 0;
   1328	ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
   1329
   1330	/* Reset the max number of configured channels */
   1331	ipc_imem->nr_of_channels = 0;
   1332
   1333	/* allocate IPC MMIO */
   1334	ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
   1335	if (!ipc_imem->mmio) {
   1336		dev_err(ipc_imem->dev, "failed to initialize mmio region");
   1337		goto mmio_init_fail;
   1338	}
   1339
   1340	ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
   1341				     GFP_KERNEL);
   1342
   1343	/* Create tasklet for event handling*/
   1344	if (!ipc_imem->ipc_task)
   1345		goto ipc_task_fail;
   1346
   1347	if (ipc_task_init(ipc_imem->ipc_task))
   1348		goto ipc_task_init_fail;
   1349
   1350	ipc_imem->ipc_task->dev = ipc_imem->dev;
   1351
   1352	INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
   1353
   1354	ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
   1355
   1356	if (!ipc_imem->ipc_protocol)
   1357		goto protocol_init_fail;
   1358
   1359	/* The phase is set to power off. */
   1360	ipc_imem->phase = IPC_P_OFF;
   1361
   1362	hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
   1363		     HRTIMER_MODE_REL);
   1364	ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
   1365
   1366	hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
   1367		     HRTIMER_MODE_REL);
   1368	ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
   1369
   1370	hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
   1371		     HRTIMER_MODE_REL);
   1372	ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
   1373
   1374	hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
   1375		     HRTIMER_MODE_REL);
   1376	ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
   1377
   1378	hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
   1379	ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
   1380
   1381	if (ipc_imem_config(ipc_imem)) {
   1382		dev_err(ipc_imem->dev, "failed to initialize the imem");
   1383		goto imem_config_fail;
   1384	}
   1385
   1386	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
   1387	if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
   1388		/* Alloc and Register devlink */
   1389		ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
   1390		if (!ipc_imem->ipc_devlink) {
   1391			dev_err(ipc_imem->dev, "Devlink register failed");
   1392			goto imem_config_fail;
   1393		}
   1394
   1395		if (ipc_flash_link_establish(ipc_imem))
   1396			goto devlink_channel_fail;
   1397
   1398		set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
   1399	}
   1400	return ipc_imem;
   1401devlink_channel_fail:
   1402	ipc_devlink_deinit(ipc_imem->ipc_devlink);
   1403imem_config_fail:
   1404	hrtimer_cancel(&ipc_imem->td_alloc_timer);
   1405	hrtimer_cancel(&ipc_imem->fast_update_timer);
   1406	hrtimer_cancel(&ipc_imem->tdupdate_timer);
   1407	hrtimer_cancel(&ipc_imem->startup_timer);
   1408protocol_init_fail:
   1409	cancel_work_sync(&ipc_imem->run_state_worker);
   1410	ipc_task_deinit(ipc_imem->ipc_task);
   1411ipc_task_init_fail:
   1412	kfree(ipc_imem->ipc_task);
   1413ipc_task_fail:
   1414	kfree(ipc_imem->mmio);
   1415mmio_init_fail:
   1416	kfree(ipc_imem);
   1417	return NULL;
   1418}
   1419
   1420void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
   1421{
   1422	/* Debounce IPC_EV_IRQ. */
   1423	if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
   1424		ipc_imem->ev_irq_pending[irq] = true;
   1425		ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
   1426					 NULL, 0, false);
   1427	}
   1428}
   1429
   1430void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
   1431{
   1432	ipc_imem->td_update_timer_suspended = suspend;
   1433}
   1434
   1435/* Verify the CP execution state, copy the chip info,
   1436 * change the execution phase to ROM
   1437 */
   1438static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
   1439						 int arg, void *msg,
   1440						 size_t msgsize)
   1441{
   1442	enum ipc_mem_exec_stage stage;
   1443	struct sk_buff *skb;
   1444	int rc = -EINVAL;
   1445	size_t size;
   1446
   1447	/* Test the CP execution state. */
   1448	stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
   1449	if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
   1450		dev_err(ipc_imem->dev,
   1451			"Execution_stage: expected BOOT, received = %X", stage);
   1452		goto trigger_chip_info_fail;
   1453	}
   1454	/* Allocate a new sk buf for the chip info. */
   1455	size = ipc_imem->mmio->chip_info_size;
   1456	if (size > IOSM_CHIP_INFO_SIZE_MAX)
   1457		goto trigger_chip_info_fail;
   1458
   1459	skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
   1460	if (!skb) {
   1461		dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
   1462		rc = -ENOMEM;
   1463		goto trigger_chip_info_fail;
   1464	}
   1465	/* Copy the chip info characters into the ipc_skb. */
   1466	ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
   1467	/* First change to the ROM boot phase. */
   1468	dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
   1469	ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
   1470	ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
   1471	rc = 0;
   1472trigger_chip_info_fail:
   1473	return rc;
   1474}
   1475
   1476int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
   1477{
   1478	return ipc_task_queue_send_task(ipc_imem,
   1479					ipc_imem_devlink_trigger_chip_info_cb,
   1480					0, NULL, 0, true);
   1481}