cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

t7xx_modem_ops.c (19673B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2021, MediaTek Inc.
      4 * Copyright (c) 2021-2022, Intel Corporation.
      5 *
      6 * Authors:
      7 *  Haijun Liu <haijun.liu@mediatek.com>
      8 *  Eliot Lee <eliot.lee@intel.com>
      9 *  Moises Veleta <moises.veleta@intel.com>
     10 *  Ricardo Martinez <ricardo.martinez@linux.intel.com>
     11 *
     12 * Contributors:
     13 *  Amir Hanania <amir.hanania@intel.com>
     14 *  Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
     15 *  Sreehari Kancharla <sreehari.kancharla@intel.com>
     16 */
     17
     18#include <linux/acpi.h>
     19#include <linux/bits.h>
     20#include <linux/bitfield.h>
     21#include <linux/device.h>
     22#include <linux/delay.h>
     23#include <linux/gfp.h>
     24#include <linux/io.h>
     25#include <linux/irqreturn.h>
     26#include <linux/kthread.h>
     27#include <linux/skbuff.h>
     28#include <linux/spinlock.h>
     29#include <linux/string.h>
     30#include <linux/types.h>
     31#include <linux/wait.h>
     32#include <linux/workqueue.h>
     33
     34#include "t7xx_cldma.h"
     35#include "t7xx_hif_cldma.h"
     36#include "t7xx_mhccif.h"
     37#include "t7xx_modem_ops.h"
     38#include "t7xx_netdev.h"
     39#include "t7xx_pci.h"
     40#include "t7xx_pcie_mac.h"
     41#include "t7xx_port.h"
     42#include "t7xx_port_proxy.h"
     43#include "t7xx_reg.h"
     44#include "t7xx_state_monitor.h"
     45
     46#define RT_ID_MD_PORT_ENUM	0
     47/* Modem feature query identification code - "ICCC" */
     48#define MD_FEATURE_QUERY_ID	0x49434343
     49
     50#define FEATURE_VER		GENMASK(7, 4)
     51#define FEATURE_MSK		GENMASK(3, 0)
     52
     53#define RGU_RESET_DELAY_MS	10
     54#define PORT_RESET_DELAY_MS	2000
     55#define EX_HS_TIMEOUT_MS	5000
     56#define EX_HS_POLL_DELAY_MS	10
     57
     58enum mtk_feature_support_type {
     59	MTK_FEATURE_DOES_NOT_EXIST,
     60	MTK_FEATURE_NOT_SUPPORTED,
     61	MTK_FEATURE_MUST_BE_SUPPORTED,
     62};
     63
     64static unsigned int t7xx_get_interrupt_status(struct t7xx_pci_dev *t7xx_dev)
     65{
     66	return t7xx_mhccif_read_sw_int_sts(t7xx_dev) & D2H_SW_INT_MASK;
     67}
     68
     69/**
     70 * t7xx_pci_mhccif_isr() - Process MHCCIF interrupts.
     71 * @t7xx_dev: MTK device.
     72 *
     73 * Check the interrupt status and queue commands accordingly.
     74 *
     75 * Returns:
     76 ** 0		- Success.
     77 ** -EINVAL	- Failure to get FSM control.
     78 */
     79int t7xx_pci_mhccif_isr(struct t7xx_pci_dev *t7xx_dev)
     80{
     81	struct t7xx_modem *md = t7xx_dev->md;
     82	struct t7xx_fsm_ctl *ctl;
     83	unsigned int int_sta;
     84	int ret = 0;
     85	u32 mask;
     86
     87	ctl = md->fsm_ctl;
     88	if (!ctl) {
     89		dev_err_ratelimited(&t7xx_dev->pdev->dev,
     90				    "MHCCIF interrupt received before initializing MD monitor\n");
     91		return -EINVAL;
     92	}
     93
     94	spin_lock_bh(&md->exp_lock);
     95	int_sta = t7xx_get_interrupt_status(t7xx_dev);
     96	md->exp_id |= int_sta;
     97	if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
     98		if (ctl->md_state == MD_STATE_INVALID ||
     99		    ctl->md_state == MD_STATE_WAITING_FOR_HS1 ||
    100		    ctl->md_state == MD_STATE_WAITING_FOR_HS2 ||
    101		    ctl->md_state == MD_STATE_READY) {
    102			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
    103			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_CCIF_EX);
    104		}
    105	} else if (md->exp_id & D2H_INT_PORT_ENUM) {
    106		md->exp_id &= ~D2H_INT_PORT_ENUM;
    107
    108		if (ctl->curr_state == FSM_STATE_INIT || ctl->curr_state == FSM_STATE_PRE_START ||
    109		    ctl->curr_state == FSM_STATE_STOPPED)
    110			ret = t7xx_fsm_recv_md_intr(ctl, MD_IRQ_PORT_ENUM);
    111	} else if (ctl->md_state == MD_STATE_WAITING_FOR_HS1) {
    112		mask = t7xx_mhccif_mask_get(t7xx_dev);
    113		if ((md->exp_id & D2H_INT_ASYNC_MD_HK) && !(mask & D2H_INT_ASYNC_MD_HK)) {
    114			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
    115			queue_work(md->handshake_wq, &md->handshake_work);
    116		}
    117	}
    118	spin_unlock_bh(&md->exp_lock);
    119
    120	return ret;
    121}
    122
    123static void t7xx_clr_device_irq_via_pcie(struct t7xx_pci_dev *t7xx_dev)
    124{
    125	struct t7xx_addr_base *pbase_addr = &t7xx_dev->base_addr;
    126	void __iomem *reset_pcie_reg;
    127	u32 val;
    128
    129	reset_pcie_reg = pbase_addr->pcie_ext_reg_base + TOPRGU_CH_PCIE_IRQ_STA -
    130			  pbase_addr->pcie_dev_reg_trsl_addr;
    131	val = ioread32(reset_pcie_reg);
    132	iowrite32(val, reset_pcie_reg);
    133}
    134
    135void t7xx_clear_rgu_irq(struct t7xx_pci_dev *t7xx_dev)
    136{
    137	/* Clear L2 */
    138	t7xx_clr_device_irq_via_pcie(t7xx_dev);
    139	/* Clear L1 */
    140	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
    141}
    142
    143static int t7xx_acpi_reset(struct t7xx_pci_dev *t7xx_dev, char *fn_name)
    144{
    145#ifdef CONFIG_ACPI
    146	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
    147	struct device *dev = &t7xx_dev->pdev->dev;
    148	acpi_status acpi_ret;
    149	acpi_handle handle;
    150
    151	handle = ACPI_HANDLE(dev);
    152	if (!handle) {
    153		dev_err(dev, "ACPI handle not found\n");
    154		return -EFAULT;
    155	}
    156
    157	if (!acpi_has_method(handle, fn_name)) {
    158		dev_err(dev, "%s method not found\n", fn_name);
    159		return -EFAULT;
    160	}
    161
    162	acpi_ret = acpi_evaluate_object(handle, fn_name, NULL, &buffer);
    163	if (ACPI_FAILURE(acpi_ret)) {
    164		dev_err(dev, "%s method fail: %s\n", fn_name, acpi_format_exception(acpi_ret));
    165		return -EFAULT;
    166	}
    167
    168#endif
    169	return 0;
    170}
    171
    172int t7xx_acpi_fldr_func(struct t7xx_pci_dev *t7xx_dev)
    173{
    174	return t7xx_acpi_reset(t7xx_dev, "_RST");
    175}
    176
    177static void t7xx_reset_device_via_pmic(struct t7xx_pci_dev *t7xx_dev)
    178{
    179	u32 val;
    180
    181	val = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
    182	if (val & MISC_RESET_TYPE_PLDR)
    183		t7xx_acpi_reset(t7xx_dev, "MRST._RST");
    184	else if (val & MISC_RESET_TYPE_FLDR)
    185		t7xx_acpi_fldr_func(t7xx_dev);
    186}
    187
    188static irqreturn_t t7xx_rgu_isr_thread(int irq, void *data)
    189{
    190	struct t7xx_pci_dev *t7xx_dev = data;
    191
    192	msleep(RGU_RESET_DELAY_MS);
    193	t7xx_reset_device_via_pmic(t7xx_dev);
    194	return IRQ_HANDLED;
    195}
    196
    197static irqreturn_t t7xx_rgu_isr_handler(int irq, void *data)
    198{
    199	struct t7xx_pci_dev *t7xx_dev = data;
    200	struct t7xx_modem *modem;
    201
    202	t7xx_clear_rgu_irq(t7xx_dev);
    203	if (!t7xx_dev->rgu_pci_irq_en)
    204		return IRQ_HANDLED;
    205
    206	modem = t7xx_dev->md;
    207	modem->rgu_irq_asserted = true;
    208	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
    209	return IRQ_WAKE_THREAD;
    210}
    211
    212static void t7xx_pcie_register_rgu_isr(struct t7xx_pci_dev *t7xx_dev)
    213{
    214	/* Registers RGU callback ISR with PCIe driver */
    215	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
    216	t7xx_pcie_mac_clear_int_status(t7xx_dev, SAP_RGU_INT);
    217
    218	t7xx_dev->intr_handler[SAP_RGU_INT] = t7xx_rgu_isr_handler;
    219	t7xx_dev->intr_thread[SAP_RGU_INT] = t7xx_rgu_isr_thread;
    220	t7xx_dev->callback_param[SAP_RGU_INT] = t7xx_dev;
    221	t7xx_pcie_mac_set_int(t7xx_dev, SAP_RGU_INT);
    222}
    223
    224/**
    225 * t7xx_cldma_exception() - CLDMA exception handler.
    226 * @md_ctrl: modem control struct.
    227 * @stage: exception stage.
    228 *
    229 * Part of the modem exception recovery.
    230 * Stages are one after the other as describe below:
    231 * HIF_EX_INIT:		Disable and clear TXQ.
    232 * HIF_EX_CLEARQ_DONE:	Disable RX, flush TX/RX workqueues and clear RX.
    233 * HIF_EX_ALLQ_RESET:	HW is back in safe mode for re-initialization and restart.
    234 */
    235
    236/* Modem Exception Handshake Flow
    237 *
    238 * Modem HW Exception interrupt received
    239 *           (MD_IRQ_CCIF_EX)
    240 *                   |
    241 *         +---------v--------+
    242 *         |   HIF_EX_INIT    | : Disable and clear TXQ
    243 *         +------------------+
    244 *                   |
    245 *         +---------v--------+
    246 *         | HIF_EX_INIT_DONE | : Wait for the init to be done
    247 *         +------------------+
    248 *                   |
    249 *         +---------v--------+
    250 *         |HIF_EX_CLEARQ_DONE| : Disable and clear RXQ
    251 *         +------------------+ : Flush TX/RX workqueues
    252 *                   |
    253 *         +---------v--------+
    254 *         |HIF_EX_ALLQ_RESET | : Restart HW and CLDMA
    255 *         +------------------+
    256 */
    257static void t7xx_cldma_exception(struct cldma_ctrl *md_ctrl, enum hif_ex_stage stage)
    258{
    259	switch (stage) {
    260	case HIF_EX_INIT:
    261		t7xx_cldma_stop_all_qs(md_ctrl, MTK_TX);
    262		t7xx_cldma_clear_all_qs(md_ctrl, MTK_TX);
    263		break;
    264
    265	case HIF_EX_CLEARQ_DONE:
    266		/* We do not want to get CLDMA IRQ when MD is
    267		 * resetting CLDMA after it got clearq_ack.
    268		 */
    269		t7xx_cldma_stop_all_qs(md_ctrl, MTK_RX);
    270		t7xx_cldma_stop(md_ctrl);
    271
    272		if (md_ctrl->hif_id == CLDMA_ID_MD)
    273			t7xx_cldma_hw_reset(md_ctrl->t7xx_dev->base_addr.infracfg_ao_base);
    274
    275		t7xx_cldma_clear_all_qs(md_ctrl, MTK_RX);
    276		break;
    277
    278	case HIF_EX_ALLQ_RESET:
    279		t7xx_cldma_hw_init(&md_ctrl->hw_info);
    280		t7xx_cldma_start(md_ctrl);
    281		break;
    282
    283	default:
    284		break;
    285	}
    286}
    287
    288static void t7xx_md_exception(struct t7xx_modem *md, enum hif_ex_stage stage)
    289{
    290	struct t7xx_pci_dev *t7xx_dev = md->t7xx_dev;
    291
    292	if (stage == HIF_EX_CLEARQ_DONE) {
    293		/* Give DHL time to flush data */
    294		msleep(PORT_RESET_DELAY_MS);
    295		t7xx_port_proxy_reset(md->port_prox);
    296	}
    297
    298	t7xx_cldma_exception(md->md_ctrl[CLDMA_ID_MD], stage);
    299
    300	if (stage == HIF_EX_INIT)
    301		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_ACK);
    302	else if (stage == HIF_EX_CLEARQ_DONE)
    303		t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_EXCEPTION_CLEARQ_ACK);
    304}
    305
    306static int t7xx_wait_hif_ex_hk_event(struct t7xx_modem *md, int event_id)
    307{
    308	unsigned int waited_time_ms = 0;
    309
    310	do {
    311		if (md->exp_id & event_id)
    312			return 0;
    313
    314		waited_time_ms += EX_HS_POLL_DELAY_MS;
    315		msleep(EX_HS_POLL_DELAY_MS);
    316	} while (waited_time_ms < EX_HS_TIMEOUT_MS);
    317
    318	return -EFAULT;
    319}
    320
    321static void t7xx_md_sys_sw_init(struct t7xx_pci_dev *t7xx_dev)
    322{
    323	/* Register the MHCCIF ISR for MD exception, port enum and
    324	 * async handshake notifications.
    325	 */
    326	t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
    327	t7xx_mhccif_mask_clr(t7xx_dev, D2H_INT_PORT_ENUM);
    328
    329	/* Register RGU IRQ handler for sAP exception notification */
    330	t7xx_dev->rgu_pci_irq_en = true;
    331	t7xx_pcie_register_rgu_isr(t7xx_dev);
    332}
    333
    334struct feature_query {
    335	__le32 head_pattern;
    336	u8 feature_set[FEATURE_COUNT];
    337	__le32 tail_pattern;
    338};
    339
    340static void t7xx_prepare_host_rt_data_query(struct t7xx_sys_info *core)
    341{
    342	struct feature_query *ft_query;
    343	struct sk_buff *skb;
    344
    345	skb = t7xx_ctrl_alloc_skb(sizeof(*ft_query));
    346	if (!skb)
    347		return;
    348
    349	ft_query = skb_put(skb, sizeof(*ft_query));
    350	ft_query->head_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
    351	memcpy(ft_query->feature_set, core->feature_set, FEATURE_COUNT);
    352	ft_query->tail_pattern = cpu_to_le32(MD_FEATURE_QUERY_ID);
    353
    354	/* Send HS1 message to device */
    355	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS1_MSG, 0);
    356}
    357
    358static int t7xx_prepare_device_rt_data(struct t7xx_sys_info *core, struct device *dev,
    359				       void *data)
    360{
    361	struct feature_query *md_feature = data;
    362	struct mtk_runtime_feature *rt_feature;
    363	unsigned int i, rt_data_len = 0;
    364	struct sk_buff *skb;
    365
    366	/* Parse MD runtime data query */
    367	if (le32_to_cpu(md_feature->head_pattern) != MD_FEATURE_QUERY_ID ||
    368	    le32_to_cpu(md_feature->tail_pattern) != MD_FEATURE_QUERY_ID) {
    369		dev_err(dev, "Invalid feature pattern: head 0x%x, tail 0x%x\n",
    370			le32_to_cpu(md_feature->head_pattern),
    371			le32_to_cpu(md_feature->tail_pattern));
    372		return -EINVAL;
    373	}
    374
    375	for (i = 0; i < FEATURE_COUNT; i++) {
    376		if (FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]) !=
    377		    MTK_FEATURE_MUST_BE_SUPPORTED)
    378			rt_data_len += sizeof(*rt_feature);
    379	}
    380
    381	skb = t7xx_ctrl_alloc_skb(rt_data_len);
    382	if (!skb)
    383		return -ENOMEM;
    384
    385	rt_feature = skb_put(skb, rt_data_len);
    386	memset(rt_feature, 0, rt_data_len);
    387
    388	/* Fill runtime feature */
    389	for (i = 0; i < FEATURE_COUNT; i++) {
    390		u8 md_feature_mask = FIELD_GET(FEATURE_MSK, md_feature->feature_set[i]);
    391
    392		if (md_feature_mask == MTK_FEATURE_MUST_BE_SUPPORTED)
    393			continue;
    394
    395		rt_feature->feature_id = i;
    396		if (md_feature_mask == MTK_FEATURE_DOES_NOT_EXIST)
    397			rt_feature->support_info = md_feature->feature_set[i];
    398
    399		rt_feature++;
    400	}
    401
    402	/* Send HS3 message to device */
    403	t7xx_port_send_ctl_skb(core->ctl_port, skb, CTL_ID_HS3_MSG, 0);
    404	return 0;
    405}
    406
    407static int t7xx_parse_host_rt_data(struct t7xx_fsm_ctl *ctl, struct t7xx_sys_info *core,
    408				   struct device *dev, void *data, int data_length)
    409{
    410	enum mtk_feature_support_type ft_spt_st, ft_spt_cfg;
    411	struct mtk_runtime_feature *rt_feature;
    412	int i, offset;
    413
    414	offset = sizeof(struct feature_query);
    415	for (i = 0; i < FEATURE_COUNT && offset < data_length; i++) {
    416		rt_feature = data + offset;
    417		offset += sizeof(*rt_feature) + le32_to_cpu(rt_feature->data_len);
    418
    419		ft_spt_cfg = FIELD_GET(FEATURE_MSK, core->feature_set[i]);
    420		if (ft_spt_cfg != MTK_FEATURE_MUST_BE_SUPPORTED)
    421			continue;
    422
    423		ft_spt_st = FIELD_GET(FEATURE_MSK, rt_feature->support_info);
    424		if (ft_spt_st != MTK_FEATURE_MUST_BE_SUPPORTED)
    425			return -EINVAL;
    426
    427		if (i == RT_ID_MD_PORT_ENUM)
    428			t7xx_port_enum_msg_handler(ctl->md, rt_feature->data);
    429	}
    430
    431	return 0;
    432}
    433
    434static int t7xx_core_reset(struct t7xx_modem *md)
    435{
    436	struct device *dev = &md->t7xx_dev->pdev->dev;
    437	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
    438
    439	md->core_md.ready = false;
    440
    441	if (!ctl) {
    442		dev_err(dev, "FSM is not initialized\n");
    443		return -EINVAL;
    444	}
    445
    446	if (md->core_md.handshake_ongoing) {
    447		int ret = t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
    448
    449		if (ret)
    450			return ret;
    451	}
    452
    453	md->core_md.handshake_ongoing = false;
    454	return 0;
    455}
    456
    457static void t7xx_core_hk_handler(struct t7xx_modem *md, struct t7xx_fsm_ctl *ctl,
    458				 enum t7xx_fsm_event_state event_id,
    459				 enum t7xx_fsm_event_state err_detect)
    460{
    461	struct t7xx_fsm_event *event = NULL, *event_next;
    462	struct t7xx_sys_info *core_info = &md->core_md;
    463	struct device *dev = &md->t7xx_dev->pdev->dev;
    464	unsigned long flags;
    465	int ret;
    466
    467	t7xx_prepare_host_rt_data_query(core_info);
    468
    469	while (!kthread_should_stop()) {
    470		bool event_received = false;
    471
    472		spin_lock_irqsave(&ctl->event_lock, flags);
    473		list_for_each_entry_safe(event, event_next, &ctl->event_queue, entry) {
    474			if (event->event_id == err_detect) {
    475				list_del(&event->entry);
    476				spin_unlock_irqrestore(&ctl->event_lock, flags);
    477				dev_err(dev, "Core handshake error event received\n");
    478				goto err_free_event;
    479			} else if (event->event_id == event_id) {
    480				list_del(&event->entry);
    481				event_received = true;
    482				break;
    483			}
    484		}
    485		spin_unlock_irqrestore(&ctl->event_lock, flags);
    486
    487		if (event_received)
    488			break;
    489
    490		wait_event_interruptible(ctl->event_wq, !list_empty(&ctl->event_queue) ||
    491					 kthread_should_stop());
    492		if (kthread_should_stop())
    493			goto err_free_event;
    494	}
    495
    496	if (!event || ctl->exp_flg)
    497		goto err_free_event;
    498
    499	ret = t7xx_parse_host_rt_data(ctl, core_info, dev, event->data, event->length);
    500	if (ret) {
    501		dev_err(dev, "Host failure parsing runtime data: %d\n", ret);
    502		goto err_free_event;
    503	}
    504
    505	if (ctl->exp_flg)
    506		goto err_free_event;
    507
    508	ret = t7xx_prepare_device_rt_data(core_info, dev, event->data);
    509	if (ret) {
    510		dev_err(dev, "Device failure parsing runtime data: %d", ret);
    511		goto err_free_event;
    512	}
    513
    514	core_info->ready = true;
    515	core_info->handshake_ongoing = false;
    516	wake_up(&ctl->async_hk_wq);
    517err_free_event:
    518	kfree(event);
    519}
    520
    521static void t7xx_md_hk_wq(struct work_struct *work)
    522{
    523	struct t7xx_modem *md = container_of(work, struct t7xx_modem, handshake_work);
    524	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
    525
    526	/* Clear the HS2 EXIT event appended in core_reset() */
    527	t7xx_fsm_clr_event(ctl, FSM_EVENT_MD_HS2_EXIT);
    528	t7xx_cldma_switch_cfg(md->md_ctrl[CLDMA_ID_MD]);
    529	t7xx_cldma_start(md->md_ctrl[CLDMA_ID_MD]);
    530	t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS2);
    531	md->core_md.handshake_ongoing = true;
    532	t7xx_core_hk_handler(md, ctl, FSM_EVENT_MD_HS2, FSM_EVENT_MD_HS2_EXIT);
    533}
    534
    535void t7xx_md_event_notify(struct t7xx_modem *md, enum md_event_id evt_id)
    536{
    537	struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
    538	void __iomem *mhccif_base;
    539	unsigned int int_sta;
    540	unsigned long flags;
    541
    542	switch (evt_id) {
    543	case FSM_PRE_START:
    544		t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_PORT_ENUM);
    545		break;
    546
    547	case FSM_START:
    548		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_PORT_ENUM);
    549
    550		spin_lock_irqsave(&md->exp_lock, flags);
    551		int_sta = t7xx_get_interrupt_status(md->t7xx_dev);
    552		md->exp_id |= int_sta;
    553		if (md->exp_id & D2H_INT_EXCEPTION_INIT) {
    554			ctl->exp_flg = true;
    555			md->exp_id &= ~D2H_INT_EXCEPTION_INIT;
    556			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
    557		} else if (ctl->exp_flg) {
    558			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
    559		} else if (md->exp_id & D2H_INT_ASYNC_MD_HK) {
    560			queue_work(md->handshake_wq, &md->handshake_work);
    561			md->exp_id &= ~D2H_INT_ASYNC_MD_HK;
    562			mhccif_base = md->t7xx_dev->base_addr.mhccif_rc_base;
    563			iowrite32(D2H_INT_ASYNC_MD_HK, mhccif_base + REG_EP2RC_SW_INT_ACK);
    564			t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
    565		} else {
    566			t7xx_mhccif_mask_clr(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
    567		}
    568		spin_unlock_irqrestore(&md->exp_lock, flags);
    569
    570		t7xx_mhccif_mask_clr(md->t7xx_dev,
    571				     D2H_INT_EXCEPTION_INIT |
    572				     D2H_INT_EXCEPTION_INIT_DONE |
    573				     D2H_INT_EXCEPTION_CLEARQ_DONE |
    574				     D2H_INT_EXCEPTION_ALLQ_RESET);
    575		break;
    576
    577	case FSM_READY:
    578		t7xx_mhccif_mask_set(md->t7xx_dev, D2H_INT_ASYNC_MD_HK);
    579		break;
    580
    581	default:
    582		break;
    583	}
    584}
    585
    586void t7xx_md_exception_handshake(struct t7xx_modem *md)
    587{
    588	struct device *dev = &md->t7xx_dev->pdev->dev;
    589	int ret;
    590
    591	t7xx_md_exception(md, HIF_EX_INIT);
    592	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_INIT_DONE);
    593	if (ret)
    594		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_INIT_DONE);
    595
    596	t7xx_md_exception(md, HIF_EX_INIT_DONE);
    597	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_CLEARQ_DONE);
    598	if (ret)
    599		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_CLEARQ_DONE);
    600
    601	t7xx_md_exception(md, HIF_EX_CLEARQ_DONE);
    602	ret = t7xx_wait_hif_ex_hk_event(md, D2H_INT_EXCEPTION_ALLQ_RESET);
    603	if (ret)
    604		dev_err(dev, "EX CCIF HS timeout, RCH 0x%lx\n", D2H_INT_EXCEPTION_ALLQ_RESET);
    605
    606	t7xx_md_exception(md, HIF_EX_ALLQ_RESET);
    607}
    608
    609static struct t7xx_modem *t7xx_md_alloc(struct t7xx_pci_dev *t7xx_dev)
    610{
    611	struct device *dev = &t7xx_dev->pdev->dev;
    612	struct t7xx_modem *md;
    613
    614	md = devm_kzalloc(dev, sizeof(*md), GFP_KERNEL);
    615	if (!md)
    616		return NULL;
    617
    618	md->t7xx_dev = t7xx_dev;
    619	t7xx_dev->md = md;
    620	spin_lock_init(&md->exp_lock);
    621	md->handshake_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI,
    622					   0, "md_hk_wq");
    623	if (!md->handshake_wq)
    624		return NULL;
    625
    626	INIT_WORK(&md->handshake_work, t7xx_md_hk_wq);
    627	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] &= ~FEATURE_MSK;
    628	md->core_md.feature_set[RT_ID_MD_PORT_ENUM] |=
    629		FIELD_PREP(FEATURE_MSK, MTK_FEATURE_MUST_BE_SUPPORTED);
    630	return md;
    631}
    632
    633int t7xx_md_reset(struct t7xx_pci_dev *t7xx_dev)
    634{
    635	struct t7xx_modem *md = t7xx_dev->md;
    636
    637	md->md_init_finish = false;
    638	md->exp_id = 0;
    639	t7xx_fsm_reset(md);
    640	t7xx_cldma_reset(md->md_ctrl[CLDMA_ID_MD]);
    641	t7xx_port_proxy_reset(md->port_prox);
    642	md->md_init_finish = true;
    643	return t7xx_core_reset(md);
    644}
    645
    646/**
    647 * t7xx_md_init() - Initialize modem.
    648 * @t7xx_dev: MTK device.
    649 *
    650 * Allocate and initialize MD control block, and initialize data path.
    651 * Register MHCCIF ISR and RGU ISR, and start the state machine.
    652 *
    653 * Return:
    654 ** 0		- Success.
    655 ** -ENOMEM	- Allocation failure.
    656 */
    657int t7xx_md_init(struct t7xx_pci_dev *t7xx_dev)
    658{
    659	struct t7xx_modem *md;
    660	int ret;
    661
    662	md = t7xx_md_alloc(t7xx_dev);
    663	if (!md)
    664		return -ENOMEM;
    665
    666	ret = t7xx_cldma_alloc(CLDMA_ID_MD, t7xx_dev);
    667	if (ret)
    668		goto err_destroy_hswq;
    669
    670	ret = t7xx_fsm_init(md);
    671	if (ret)
    672		goto err_destroy_hswq;
    673
    674	ret = t7xx_ccmni_init(t7xx_dev);
    675	if (ret)
    676		goto err_uninit_fsm;
    677
    678	ret = t7xx_cldma_init(md->md_ctrl[CLDMA_ID_MD]);
    679	if (ret)
    680		goto err_uninit_ccmni;
    681
    682	ret = t7xx_port_proxy_init(md);
    683	if (ret)
    684		goto err_uninit_md_cldma;
    685
    686	ret = t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_START, 0);
    687	if (ret) /* fsm_uninit flushes cmd queue */
    688		goto err_uninit_proxy;
    689
    690	t7xx_md_sys_sw_init(t7xx_dev);
    691	md->md_init_finish = true;
    692	return 0;
    693
    694err_uninit_proxy:
    695	t7xx_port_proxy_uninit(md->port_prox);
    696
    697err_uninit_md_cldma:
    698	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
    699
    700err_uninit_ccmni:
    701	t7xx_ccmni_exit(t7xx_dev);
    702
    703err_uninit_fsm:
    704	t7xx_fsm_uninit(md);
    705
    706err_destroy_hswq:
    707	destroy_workqueue(md->handshake_wq);
    708	dev_err(&t7xx_dev->pdev->dev, "Modem init failed\n");
    709	return ret;
    710}
    711
    712void t7xx_md_exit(struct t7xx_pci_dev *t7xx_dev)
    713{
    714	struct t7xx_modem *md = t7xx_dev->md;
    715
    716	t7xx_pcie_mac_clear_int(t7xx_dev, SAP_RGU_INT);
    717
    718	if (!md->md_init_finish)
    719		return;
    720
    721	t7xx_fsm_append_cmd(md->fsm_ctl, FSM_CMD_PRE_STOP, FSM_CMD_FLAG_WAIT_FOR_COMPLETION);
    722	t7xx_port_proxy_uninit(md->port_prox);
    723	t7xx_cldma_exit(md->md_ctrl[CLDMA_ID_MD]);
    724	t7xx_ccmni_exit(t7xx_dev);
    725	t7xx_fsm_uninit(md);
    726	destroy_workqueue(md->handshake_wq);
    727}