cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pci_generic.c (35578B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * MHI PCI driver - MHI over PCI controller driver
      4 *
      5 * This module is a generic driver for registering MHI-over-PCI devices,
      6 * such as PCIe QCOM modems.
      7 *
      8 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
      9 */
     10
     11#include <linux/aer.h>
     12#include <linux/delay.h>
     13#include <linux/device.h>
     14#include <linux/mhi.h>
     15#include <linux/module.h>
     16#include <linux/pci.h>
     17#include <linux/pm_runtime.h>
     18#include <linux/timer.h>
     19#include <linux/workqueue.h>
     20
     21#define MHI_PCI_DEFAULT_BAR_NUM 0
     22
     23#define MHI_POST_RESET_DELAY_MS 2000
     24
     25#define HEALTH_CHECK_PERIOD (HZ * 2)
     26
     27/**
     28 * struct mhi_pci_dev_info - MHI PCI device specific information
     29 * @config: MHI controller configuration
     30 * @name: name of the PCI module
     31 * @fw: firmware path (if any)
     32 * @edl: emergency download mode firmware path (if any)
     33 * @bar_num: PCI base address register to use for MHI MMIO register space
     34 * @dma_data_width: DMA transfer word size (32 or 64 bits)
     35 * @mru_default: default MRU size for MBIM network packets
     36 * @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
     37 *		   of inband wake support (such as sdx24)
     38 */
     39struct mhi_pci_dev_info {
     40	const struct mhi_controller_config *config;
     41	const char *name;
     42	const char *fw;
     43	const char *edl;
     44	unsigned int bar_num;
     45	unsigned int dma_data_width;
     46	unsigned int mru_default;
     47	bool sideband_wake;
     48};
     49
     50#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
     51	{						\
     52		.num = ch_num,				\
     53		.name = ch_name,			\
     54		.num_elements = el_count,		\
     55		.event_ring = ev_ring,			\
     56		.dir = DMA_TO_DEVICE,			\
     57		.ee_mask = BIT(MHI_EE_AMSS),		\
     58		.pollcfg = 0,				\
     59		.doorbell = MHI_DB_BRST_DISABLE,	\
     60		.lpm_notify = false,			\
     61		.offload_channel = false,		\
     62		.doorbell_mode_switch = false,		\
     63	}						\
     64
     65#define MHI_CHANNEL_CONFIG_DL(ch_num, ch_name, el_count, ev_ring) \
     66	{						\
     67		.num = ch_num,				\
     68		.name = ch_name,			\
     69		.num_elements = el_count,		\
     70		.event_ring = ev_ring,			\
     71		.dir = DMA_FROM_DEVICE,			\
     72		.ee_mask = BIT(MHI_EE_AMSS),		\
     73		.pollcfg = 0,				\
     74		.doorbell = MHI_DB_BRST_DISABLE,	\
     75		.lpm_notify = false,			\
     76		.offload_channel = false,		\
     77		.doorbell_mode_switch = false,		\
     78	}
     79
     80#define MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(ch_num, ch_name, el_count, ev_ring) \
     81	{						\
     82		.num = ch_num,				\
     83		.name = ch_name,			\
     84		.num_elements = el_count,		\
     85		.event_ring = ev_ring,			\
     86		.dir = DMA_FROM_DEVICE,			\
     87		.ee_mask = BIT(MHI_EE_AMSS),		\
     88		.pollcfg = 0,				\
     89		.doorbell = MHI_DB_BRST_DISABLE,	\
     90		.lpm_notify = false,			\
     91		.offload_channel = false,		\
     92		.doorbell_mode_switch = false,		\
     93		.auto_queue = true,			\
     94	}
     95
     96#define MHI_EVENT_CONFIG_CTRL(ev_ring, el_count) \
     97	{					\
     98		.num_elements = el_count,	\
     99		.irq_moderation_ms = 0,		\
    100		.irq = (ev_ring) + 1,		\
    101		.priority = 1,			\
    102		.mode = MHI_DB_BRST_DISABLE,	\
    103		.data_type = MHI_ER_CTRL,	\
    104		.hardware_event = false,	\
    105		.client_managed = false,	\
    106		.offload_channel = false,	\
    107	}
    108
    109#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
    110	{						\
    111		.num = ch_num,				\
    112		.name = ch_name,			\
    113		.num_elements = el_count,		\
    114		.event_ring = ev_ring,			\
    115		.dir = DMA_TO_DEVICE,			\
    116		.ee_mask = BIT(MHI_EE_AMSS),		\
    117		.pollcfg = 0,				\
    118		.doorbell = MHI_DB_BRST_ENABLE,	\
    119		.lpm_notify = false,			\
    120		.offload_channel = false,		\
    121		.doorbell_mode_switch = true,		\
    122	}						\
    123
    124#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
    125	{						\
    126		.num = ch_num,				\
    127		.name = ch_name,			\
    128		.num_elements = el_count,		\
    129		.event_ring = ev_ring,			\
    130		.dir = DMA_FROM_DEVICE,			\
    131		.ee_mask = BIT(MHI_EE_AMSS),		\
    132		.pollcfg = 0,				\
    133		.doorbell = MHI_DB_BRST_ENABLE,	\
    134		.lpm_notify = false,			\
    135		.offload_channel = false,		\
    136		.doorbell_mode_switch = true,		\
    137	}
    138
    139#define MHI_CHANNEL_CONFIG_UL_SBL(ch_num, ch_name, el_count, ev_ring) \
    140	{						\
    141		.num = ch_num,				\
    142		.name = ch_name,			\
    143		.num_elements = el_count,		\
    144		.event_ring = ev_ring,			\
    145		.dir = DMA_TO_DEVICE,			\
    146		.ee_mask = BIT(MHI_EE_SBL),		\
    147		.pollcfg = 0,				\
    148		.doorbell = MHI_DB_BRST_DISABLE,	\
    149		.lpm_notify = false,			\
    150		.offload_channel = false,		\
    151		.doorbell_mode_switch = false,		\
    152	}						\
    153
    154#define MHI_CHANNEL_CONFIG_DL_SBL(ch_num, ch_name, el_count, ev_ring) \
    155	{						\
    156		.num = ch_num,				\
    157		.name = ch_name,			\
    158		.num_elements = el_count,		\
    159		.event_ring = ev_ring,			\
    160		.dir = DMA_FROM_DEVICE,			\
    161		.ee_mask = BIT(MHI_EE_SBL),		\
    162		.pollcfg = 0,				\
    163		.doorbell = MHI_DB_BRST_DISABLE,	\
    164		.lpm_notify = false,			\
    165		.offload_channel = false,		\
    166		.doorbell_mode_switch = false,		\
    167	}
    168
    169#define MHI_CHANNEL_CONFIG_UL_FP(ch_num, ch_name, el_count, ev_ring) \
    170	{						\
    171		.num = ch_num,				\
    172		.name = ch_name,			\
    173		.num_elements = el_count,		\
    174		.event_ring = ev_ring,			\
    175		.dir = DMA_TO_DEVICE,			\
    176		.ee_mask = BIT(MHI_EE_FP),		\
    177		.pollcfg = 0,				\
    178		.doorbell = MHI_DB_BRST_DISABLE,	\
    179		.lpm_notify = false,			\
    180		.offload_channel = false,		\
    181		.doorbell_mode_switch = false,		\
    182	}						\
    183
    184#define MHI_CHANNEL_CONFIG_DL_FP(ch_num, ch_name, el_count, ev_ring) \
    185	{						\
    186		.num = ch_num,				\
    187		.name = ch_name,			\
    188		.num_elements = el_count,		\
    189		.event_ring = ev_ring,			\
    190		.dir = DMA_FROM_DEVICE,			\
    191		.ee_mask = BIT(MHI_EE_FP),		\
    192		.pollcfg = 0,				\
    193		.doorbell = MHI_DB_BRST_DISABLE,	\
    194		.lpm_notify = false,			\
    195		.offload_channel = false,		\
    196		.doorbell_mode_switch = false,		\
    197	}
    198
    199#define MHI_EVENT_CONFIG_DATA(ev_ring, el_count) \
    200	{					\
    201		.num_elements = el_count,	\
    202		.irq_moderation_ms = 5,		\
    203		.irq = (ev_ring) + 1,		\
    204		.priority = 1,			\
    205		.mode = MHI_DB_BRST_DISABLE,	\
    206		.data_type = MHI_ER_DATA,	\
    207		.hardware_event = false,	\
    208		.client_managed = false,	\
    209		.offload_channel = false,	\
    210	}
    211
    212#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
    213	{					\
    214		.num_elements = el_count,	\
    215		.irq_moderation_ms = 1,		\
    216		.irq = (ev_ring) + 1,		\
    217		.priority = 1,			\
    218		.mode = MHI_DB_BRST_DISABLE,	\
    219		.data_type = MHI_ER_DATA,	\
    220		.hardware_event = true,		\
    221		.client_managed = false,	\
    222		.offload_channel = false,	\
    223		.channel = ch_num,		\
    224	}
    225
    226static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
    227	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
    228	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
    229	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
    230	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
    231	MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
    232	MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
    233	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
    234	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
    235	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
    236	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
    237	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
    238	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
    239};
    240
    241static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
    242	/* first ring is control+data ring */
    243	MHI_EVENT_CONFIG_CTRL(0, 64),
    244	/* DIAG dedicated event ring */
    245	MHI_EVENT_CONFIG_DATA(1, 128),
    246	/* Hardware channels request dedicated hardware event rings */
    247	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
    248	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
    249};
    250
    251static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
    252	.max_channels = 128,
    253	.timeout_ms = 8000,
    254	.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
    255	.ch_cfg = modem_qcom_v1_mhi_channels,
    256	.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
    257	.event_cfg = modem_qcom_v1_mhi_events,
    258};
    259
    260static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = {
    261	.name = "qcom-sdx65m",
    262	.fw = "qcom/sdx65m/xbl.elf",
    263	.edl = "qcom/sdx65m/edl.mbn",
    264	.config = &modem_qcom_v1_mhiv_config,
    265	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    266	.dma_data_width = 32,
    267	.sideband_wake = false,
    268};
    269
    270static const struct mhi_pci_dev_info mhi_qcom_sdx55_info = {
    271	.name = "qcom-sdx55m",
    272	.fw = "qcom/sdx55m/sbl1.mbn",
    273	.edl = "qcom/sdx55m/edl.mbn",
    274	.config = &modem_qcom_v1_mhiv_config,
    275	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    276	.dma_data_width = 32,
    277	.mru_default = 32768,
    278	.sideband_wake = false,
    279};
    280
    281static const struct mhi_pci_dev_info mhi_qcom_sdx24_info = {
    282	.name = "qcom-sdx24",
    283	.edl = "qcom/prog_firehose_sdx24.mbn",
    284	.config = &modem_qcom_v1_mhiv_config,
    285	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    286	.dma_data_width = 32,
    287	.sideband_wake = true,
    288};
    289
    290static const struct mhi_channel_config mhi_quectel_em1xx_channels[] = {
    291	MHI_CHANNEL_CONFIG_UL(0, "NMEA", 32, 0),
    292	MHI_CHANNEL_CONFIG_DL(1, "NMEA", 32, 0),
    293	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
    294	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
    295	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
    296	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
    297	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
    298	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
    299	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
    300	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
    301	/* The EDL firmware is a flash-programmer exposing firehose protocol */
    302	MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
    303	MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
    304	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
    305	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
    306};
    307
    308static struct mhi_event_config mhi_quectel_em1xx_events[] = {
    309	MHI_EVENT_CONFIG_CTRL(0, 128),
    310	MHI_EVENT_CONFIG_DATA(1, 128),
    311	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
    312	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
    313};
    314
    315static const struct mhi_controller_config modem_quectel_em1xx_config = {
    316	.max_channels = 128,
    317	.timeout_ms = 20000,
    318	.num_channels = ARRAY_SIZE(mhi_quectel_em1xx_channels),
    319	.ch_cfg = mhi_quectel_em1xx_channels,
    320	.num_events = ARRAY_SIZE(mhi_quectel_em1xx_events),
    321	.event_cfg = mhi_quectel_em1xx_events,
    322};
    323
    324static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
    325	.name = "quectel-em1xx",
    326	.edl = "qcom/prog_firehose_sdx24.mbn",
    327	.config = &modem_quectel_em1xx_config,
    328	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    329	.dma_data_width = 32,
    330	.mru_default = 32768,
    331	.sideband_wake = true,
    332};
    333
    334static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
    335	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
    336	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
    337	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 1),
    338	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 1),
    339	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
    340	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
    341	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
    342	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
    343	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
    344	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
    345};
    346
    347static struct mhi_event_config mhi_foxconn_sdx55_events[] = {
    348	MHI_EVENT_CONFIG_CTRL(0, 128),
    349	MHI_EVENT_CONFIG_DATA(1, 128),
    350	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
    351	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101)
    352};
    353
    354static const struct mhi_controller_config modem_foxconn_sdx55_config = {
    355	.max_channels = 128,
    356	.timeout_ms = 20000,
    357	.num_channels = ARRAY_SIZE(mhi_foxconn_sdx55_channels),
    358	.ch_cfg = mhi_foxconn_sdx55_channels,
    359	.num_events = ARRAY_SIZE(mhi_foxconn_sdx55_events),
    360	.event_cfg = mhi_foxconn_sdx55_events,
    361};
    362
    363static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
    364	.name = "foxconn-sdx55",
    365	.fw = "qcom/sdx55m/sbl1.mbn",
    366	.edl = "qcom/sdx55m/edl.mbn",
    367	.config = &modem_foxconn_sdx55_config,
    368	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    369	.dma_data_width = 32,
    370	.mru_default = 32768,
    371	.sideband_wake = false,
    372};
    373
    374static const struct mhi_pci_dev_info mhi_foxconn_sdx65_info = {
    375	.name = "foxconn-sdx65",
    376	.config = &modem_foxconn_sdx55_config,
    377	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    378	.dma_data_width = 32,
    379	.mru_default = 32768,
    380	.sideband_wake = false,
    381};
    382
    383static const struct mhi_channel_config mhi_mv3x_channels[] = {
    384	MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
    385	MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
    386	/* MBIM Control Channel */
    387	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 64, 0),
    388	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 64, 0),
    389	/* MBIM Data Channel */
    390	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 512, 2),
    391	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 512, 3),
    392};
    393
    394static struct mhi_event_config mhi_mv3x_events[] = {
    395	MHI_EVENT_CONFIG_CTRL(0, 256),
    396	MHI_EVENT_CONFIG_DATA(1, 256),
    397	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
    398	MHI_EVENT_CONFIG_HW_DATA(3, 1024, 101),
    399};
    400
    401static const struct mhi_controller_config modem_mv3x_config = {
    402	.max_channels = 128,
    403	.timeout_ms = 20000,
    404	.num_channels = ARRAY_SIZE(mhi_mv3x_channels),
    405	.ch_cfg = mhi_mv3x_channels,
    406	.num_events = ARRAY_SIZE(mhi_mv3x_events),
    407	.event_cfg = mhi_mv3x_events,
    408};
    409
    410static const struct mhi_pci_dev_info mhi_mv31_info = {
    411	.name = "cinterion-mv31",
    412	.config = &modem_mv3x_config,
    413	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    414	.dma_data_width = 32,
    415	.mru_default = 32768,
    416};
    417
    418static const struct mhi_pci_dev_info mhi_mv32_info = {
    419	.name = "cinterion-mv32",
    420	.config = &modem_mv3x_config,
    421	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    422	.dma_data_width = 32,
    423	.mru_default = 32768,
    424};
    425
    426static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
    427	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
    428	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
    429	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
    430	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
    431	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
    432	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
    433	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
    434	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
    435	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
    436	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
    437	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
    438	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
    439};
    440
    441static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
    442	/* first ring is control+data and DIAG ring */
    443	MHI_EVENT_CONFIG_CTRL(0, 2048),
    444	/* Hardware channels request dedicated hardware event rings */
    445	MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
    446	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
    447};
    448
    449static const struct mhi_controller_config modem_sierra_em919x_config = {
    450	.max_channels = 128,
    451	.timeout_ms = 24000,
    452	.num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
    453	.ch_cfg = mhi_sierra_em919x_channels,
    454	.num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
    455	.event_cfg = modem_sierra_em919x_mhi_events,
    456};
    457
    458static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
    459	.name = "sierra-em919x",
    460	.config = &modem_sierra_em919x_config,
    461	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    462	.dma_data_width = 32,
    463	.sideband_wake = false,
    464};
    465
    466static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = {
    467	MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
    468	MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
    469	MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0),
    470	MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0),
    471	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1),
    472	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2),
    473};
    474
    475static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
    476	MHI_EVENT_CONFIG_CTRL(0, 128),
    477	MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100),
    478	MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
    479};
    480
    481static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
    482	.max_channels = 128,
    483	.timeout_ms = 20000,
    484	.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
    485	.ch_cfg = mhi_telit_fn980_hw_v1_channels,
    486	.num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events),
    487	.event_cfg = mhi_telit_fn980_hw_v1_events,
    488};
    489
    490static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = {
    491	.name = "telit-fn980-hwv1",
    492	.fw = "qcom/sdx55m/sbl1.mbn",
    493	.edl = "qcom/sdx55m/edl.mbn",
    494	.config = &modem_telit_fn980_hw_v1_config,
    495	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    496	.dma_data_width = 32,
    497	.mru_default = 32768,
    498	.sideband_wake = false,
    499};
    500
    501static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
    502	MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
    503	MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0),
    504	MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1),
    505	MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1),
    506	MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0),
    507	MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
    508	MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
    509	MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
    510	MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
    511	MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
    512};
    513
    514static struct mhi_event_config mhi_telit_fn990_events[] = {
    515	MHI_EVENT_CONFIG_CTRL(0, 128),
    516	MHI_EVENT_CONFIG_DATA(1, 128),
    517	MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
    518	MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
    519};
    520
    521static const struct mhi_controller_config modem_telit_fn990_config = {
    522	.max_channels = 128,
    523	.timeout_ms = 20000,
    524	.num_channels = ARRAY_SIZE(mhi_telit_fn990_channels),
    525	.ch_cfg = mhi_telit_fn990_channels,
    526	.num_events = ARRAY_SIZE(mhi_telit_fn990_events),
    527	.event_cfg = mhi_telit_fn990_events,
    528};
    529
    530static const struct mhi_pci_dev_info mhi_telit_fn990_info = {
    531	.name = "telit-fn990",
    532	.config = &modem_telit_fn990_config,
    533	.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
    534	.dma_data_width = 32,
    535	.sideband_wake = false,
    536	.mru_default = 32768,
    537};
    538
    539/* Keep the list sorted based on the PID. New VID should be added as the last entry */
    540static const struct pci_device_id mhi_pci_id_table[] = {
    541	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
    542		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info },
    543	/* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
    544	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
    545		.driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
    546	/* Telit FN980 hardware revision v1 */
    547	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000),
    548		.driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info },
    549	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
    550		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
    551	/* Telit FN990 */
    552	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
    553		.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
    554	{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
    555		.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
    556	{ PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */
    557		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
    558	{ PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */
    559		.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
    560	/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
    561	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0ab),
    562		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
    563	/* DW5930e (sdx55), With eSIM, It's also T99W175 */
    564	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b0),
    565		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
    566	/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
    567	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
    568		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
    569	/* T99W175 (sdx55), Based on Qualcomm new baseline */
    570	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
    571		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
    572	/* T99W368 (sdx65) */
    573	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d8),
    574		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
    575	/* T99W373 (sdx62) */
    576	{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0d9),
    577		.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
    578	/* MV31-W (Cinterion) */
    579	{ PCI_DEVICE(0x1269, 0x00b3),
    580		.driver_data = (kernel_ulong_t) &mhi_mv31_info },
    581	/* MV32-WA (Cinterion) */
    582	{ PCI_DEVICE(0x1269, 0x00ba),
    583		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
    584	/* MV32-WB (Cinterion) */
    585	{ PCI_DEVICE(0x1269, 0x00bb),
    586		.driver_data = (kernel_ulong_t) &mhi_mv32_info },
    587	{  }
    588};
    589MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
    590
    591enum mhi_pci_device_status {
    592	MHI_PCI_DEV_STARTED,
    593	MHI_PCI_DEV_SUSPENDED,
    594};
    595
    596struct mhi_pci_device {
    597	struct mhi_controller mhi_cntrl;
    598	struct pci_saved_state *pci_state;
    599	struct work_struct recovery_work;
    600	struct timer_list health_check_timer;
    601	unsigned long status;
    602};
    603
    604static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
    605			    void __iomem *addr, u32 *out)
    606{
    607	*out = readl(addr);
    608	return 0;
    609}
    610
    611static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
    612			      void __iomem *addr, u32 val)
    613{
    614	writel(val, addr);
    615}
    616
    617static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
    618			      enum mhi_callback cb)
    619{
    620	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
    621
    622	/* Nothing to do for now */
    623	switch (cb) {
    624	case MHI_CB_FATAL_ERROR:
    625	case MHI_CB_SYS_ERROR:
    626		dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
    627		pm_runtime_forbid(&pdev->dev);
    628		break;
    629	case MHI_CB_EE_MISSION_MODE:
    630		pm_runtime_allow(&pdev->dev);
    631		break;
    632	default:
    633		break;
    634	}
    635}
    636
    637static void mhi_pci_wake_get_nop(struct mhi_controller *mhi_cntrl, bool force)
    638{
    639	/* no-op */
    640}
    641
    642static void mhi_pci_wake_put_nop(struct mhi_controller *mhi_cntrl, bool override)
    643{
    644	/* no-op */
    645}
    646
    647static void mhi_pci_wake_toggle_nop(struct mhi_controller *mhi_cntrl)
    648{
    649	/* no-op */
    650}
    651
    652static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
    653{
    654	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
    655	u16 vendor = 0;
    656
    657	if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
    658		return false;
    659
    660	if (vendor == (u16) ~0 || vendor == 0)
    661		return false;
    662
    663	return true;
    664}
    665
    666static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
    667			 unsigned int bar_num, u64 dma_mask)
    668{
    669	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
    670	int err;
    671
    672	err = pci_assign_resource(pdev, bar_num);
    673	if (err)
    674		return err;
    675
    676	err = pcim_enable_device(pdev);
    677	if (err) {
    678		dev_err(&pdev->dev, "failed to enable pci device: %d\n", err);
    679		return err;
    680	}
    681
    682	err = pcim_iomap_regions(pdev, 1 << bar_num, pci_name(pdev));
    683	if (err) {
    684		dev_err(&pdev->dev, "failed to map pci region: %d\n", err);
    685		return err;
    686	}
    687	mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
    688	mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
    689
    690	err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
    691	if (err) {
    692		dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
    693		return err;
    694	}
    695
    696	pci_set_master(pdev);
    697
    698	return 0;
    699}
    700
    701static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
    702			    const struct mhi_controller_config *mhi_cntrl_config)
    703{
    704	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
    705	int nr_vectors, i;
    706	int *irq;
    707
    708	/*
    709	 * Alloc one MSI vector for BHI + one vector per event ring, ideally...
    710	 * No explicit pci_free_irq_vectors required, done by pcim_release.
    711	 */
    712	mhi_cntrl->nr_irqs = 1 + mhi_cntrl_config->num_events;
    713
    714	nr_vectors = pci_alloc_irq_vectors(pdev, 1, mhi_cntrl->nr_irqs, PCI_IRQ_MSI);
    715	if (nr_vectors < 0) {
    716		dev_err(&pdev->dev, "Error allocating MSI vectors %d\n",
    717			nr_vectors);
    718		return nr_vectors;
    719	}
    720
    721	if (nr_vectors < mhi_cntrl->nr_irqs) {
    722		dev_warn(&pdev->dev, "using shared MSI\n");
    723
    724		/* Patch msi vectors, use only one (shared) */
    725		for (i = 0; i < mhi_cntrl_config->num_events; i++)
    726			mhi_cntrl_config->event_cfg[i].irq = 0;
    727		mhi_cntrl->nr_irqs = 1;
    728	}
    729
    730	irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
    731	if (!irq)
    732		return -ENOMEM;
    733
    734	for (i = 0; i < mhi_cntrl->nr_irqs; i++) {
    735		int vector = i >= nr_vectors ? (nr_vectors - 1) : i;
    736
    737		irq[i] = pci_irq_vector(pdev, vector);
    738	}
    739
    740	mhi_cntrl->irq = irq;
    741
    742	return 0;
    743}
    744
    745static int mhi_pci_runtime_get(struct mhi_controller *mhi_cntrl)
    746{
    747	/* The runtime_get() MHI callback means:
    748	 *    Do whatever is requested to leave M3.
    749	 */
    750	return pm_runtime_get(mhi_cntrl->cntrl_dev);
    751}
    752
    753static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
    754{
    755	/* The runtime_put() MHI callback means:
    756	 *    Device can be moved in M3 state.
    757	 */
    758	pm_runtime_mark_last_busy(mhi_cntrl->cntrl_dev);
    759	pm_runtime_put(mhi_cntrl->cntrl_dev);
    760}
    761
    762static void mhi_pci_recovery_work(struct work_struct *work)
    763{
    764	struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
    765						       recovery_work);
    766	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
    767	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
    768	int err;
    769
    770	dev_warn(&pdev->dev, "device recovery started\n");
    771
    772	del_timer(&mhi_pdev->health_check_timer);
    773	pm_runtime_forbid(&pdev->dev);
    774
    775	/* Clean up MHI state */
    776	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
    777		mhi_power_down(mhi_cntrl, false);
    778		mhi_unprepare_after_power_down(mhi_cntrl);
    779	}
    780
    781	pci_set_power_state(pdev, PCI_D0);
    782	pci_load_saved_state(pdev, mhi_pdev->pci_state);
    783	pci_restore_state(pdev);
    784
    785	if (!mhi_pci_is_alive(mhi_cntrl))
    786		goto err_try_reset;
    787
    788	err = mhi_prepare_for_power_up(mhi_cntrl);
    789	if (err)
    790		goto err_try_reset;
    791
    792	err = mhi_sync_power_up(mhi_cntrl);
    793	if (err)
    794		goto err_unprepare;
    795
    796	dev_dbg(&pdev->dev, "Recovery completed\n");
    797
    798	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
    799	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
    800	return;
    801
    802err_unprepare:
    803	mhi_unprepare_after_power_down(mhi_cntrl);
    804err_try_reset:
    805	if (pci_reset_function(pdev))
    806		dev_err(&pdev->dev, "Recovery failed\n");
    807}
    808
    809static void health_check(struct timer_list *t)
    810{
    811	struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
    812	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
    813
    814	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
    815			test_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
    816		return;
    817
    818	if (!mhi_pci_is_alive(mhi_cntrl)) {
    819		dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
    820		queue_work(system_long_wq, &mhi_pdev->recovery_work);
    821		return;
    822	}
    823
    824	/* reschedule in two seconds */
    825	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
    826}
    827
    828static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
    829{
    830	const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
    831	const struct mhi_controller_config *mhi_cntrl_config;
    832	struct mhi_pci_device *mhi_pdev;
    833	struct mhi_controller *mhi_cntrl;
    834	int err;
    835
    836	dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
    837
    838	/* mhi_pdev.mhi_cntrl must be zero-initialized */
    839	mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
    840	if (!mhi_pdev)
    841		return -ENOMEM;
    842
    843	INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
    844	timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
    845
    846	mhi_cntrl_config = info->config;
    847	mhi_cntrl = &mhi_pdev->mhi_cntrl;
    848
    849	mhi_cntrl->cntrl_dev = &pdev->dev;
    850	mhi_cntrl->iova_start = 0;
    851	mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
    852	mhi_cntrl->fw_image = info->fw;
    853	mhi_cntrl->edl_image = info->edl;
    854
    855	mhi_cntrl->read_reg = mhi_pci_read_reg;
    856	mhi_cntrl->write_reg = mhi_pci_write_reg;
    857	mhi_cntrl->status_cb = mhi_pci_status_cb;
    858	mhi_cntrl->runtime_get = mhi_pci_runtime_get;
    859	mhi_cntrl->runtime_put = mhi_pci_runtime_put;
    860	mhi_cntrl->mru = info->mru_default;
    861
    862	if (info->sideband_wake) {
    863		mhi_cntrl->wake_get = mhi_pci_wake_get_nop;
    864		mhi_cntrl->wake_put = mhi_pci_wake_put_nop;
    865		mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
    866	}
    867
    868	err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
    869	if (err)
    870		return err;
    871
    872	err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
    873	if (err)
    874		return err;
    875
    876	pci_set_drvdata(pdev, mhi_pdev);
    877
    878	/* Have stored pci confspace at hand for restore in sudden PCI error.
    879	 * cache the state locally and discard the PCI core one.
    880	 */
    881	pci_save_state(pdev);
    882	mhi_pdev->pci_state = pci_store_saved_state(pdev);
    883	pci_load_saved_state(pdev, NULL);
    884
    885	pci_enable_pcie_error_reporting(pdev);
    886
    887	err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
    888	if (err)
    889		goto err_disable_reporting;
    890
    891	/* MHI bus does not power up the controller by default */
    892	err = mhi_prepare_for_power_up(mhi_cntrl);
    893	if (err) {
    894		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
    895		goto err_unregister;
    896	}
    897
    898	err = mhi_sync_power_up(mhi_cntrl);
    899	if (err) {
    900		dev_err(&pdev->dev, "failed to power up MHI controller\n");
    901		goto err_unprepare;
    902	}
    903
    904	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
    905
    906	/* start health check */
    907	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
    908
    909	/* Only allow runtime-suspend if PME capable (for wakeup) */
    910	if (pci_pme_capable(pdev, PCI_D3hot)) {
    911		pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
    912		pm_runtime_use_autosuspend(&pdev->dev);
    913		pm_runtime_mark_last_busy(&pdev->dev);
    914		pm_runtime_put_noidle(&pdev->dev);
    915	}
    916
    917	return 0;
    918
    919err_unprepare:
    920	mhi_unprepare_after_power_down(mhi_cntrl);
    921err_unregister:
    922	mhi_unregister_controller(mhi_cntrl);
    923err_disable_reporting:
    924	pci_disable_pcie_error_reporting(pdev);
    925
    926	return err;
    927}
    928
    929static void mhi_pci_remove(struct pci_dev *pdev)
    930{
    931	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
    932	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
    933
    934	del_timer_sync(&mhi_pdev->health_check_timer);
    935	cancel_work_sync(&mhi_pdev->recovery_work);
    936
    937	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
    938		mhi_power_down(mhi_cntrl, true);
    939		mhi_unprepare_after_power_down(mhi_cntrl);
    940	}
    941
    942	/* balancing probe put_noidle */
    943	if (pci_pme_capable(pdev, PCI_D3hot))
    944		pm_runtime_get_noresume(&pdev->dev);
    945
    946	mhi_unregister_controller(mhi_cntrl);
    947	pci_disable_pcie_error_reporting(pdev);
    948}
    949
    950static void mhi_pci_shutdown(struct pci_dev *pdev)
    951{
    952	mhi_pci_remove(pdev);
    953	pci_set_power_state(pdev, PCI_D3hot);
    954}
    955
    956static void mhi_pci_reset_prepare(struct pci_dev *pdev)
    957{
    958	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
    959	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
    960
    961	dev_info(&pdev->dev, "reset\n");
    962
    963	del_timer(&mhi_pdev->health_check_timer);
    964
    965	/* Clean up MHI state */
    966	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
    967		mhi_power_down(mhi_cntrl, false);
    968		mhi_unprepare_after_power_down(mhi_cntrl);
    969	}
    970
    971	/* cause internal device reset */
    972	mhi_soc_reset(mhi_cntrl);
    973
    974	/* Be sure device reset has been executed */
    975	msleep(MHI_POST_RESET_DELAY_MS);
    976}
    977
    978static void mhi_pci_reset_done(struct pci_dev *pdev)
    979{
    980	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
    981	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
    982	int err;
    983
    984	/* Restore initial known working PCI state */
    985	pci_load_saved_state(pdev, mhi_pdev->pci_state);
    986	pci_restore_state(pdev);
    987
    988	/* Is device status available ? */
    989	if (!mhi_pci_is_alive(mhi_cntrl)) {
    990		dev_err(&pdev->dev, "reset failed\n");
    991		return;
    992	}
    993
    994	err = mhi_prepare_for_power_up(mhi_cntrl);
    995	if (err) {
    996		dev_err(&pdev->dev, "failed to prepare MHI controller\n");
    997		return;
    998	}
    999
   1000	err = mhi_sync_power_up(mhi_cntrl);
   1001	if (err) {
   1002		dev_err(&pdev->dev, "failed to power up MHI controller\n");
   1003		mhi_unprepare_after_power_down(mhi_cntrl);
   1004		return;
   1005	}
   1006
   1007	set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
   1008	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
   1009}
   1010
   1011static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
   1012					       pci_channel_state_t state)
   1013{
   1014	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
   1015	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
   1016
   1017	dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
   1018
   1019	if (state == pci_channel_io_perm_failure)
   1020		return PCI_ERS_RESULT_DISCONNECT;
   1021
   1022	/* Clean up MHI state */
   1023	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
   1024		mhi_power_down(mhi_cntrl, false);
   1025		mhi_unprepare_after_power_down(mhi_cntrl);
   1026	} else {
   1027		/* Nothing to do */
   1028		return PCI_ERS_RESULT_RECOVERED;
   1029	}
   1030
   1031	pci_disable_device(pdev);
   1032
   1033	return PCI_ERS_RESULT_NEED_RESET;
   1034}
   1035
   1036static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
   1037{
   1038	if (pci_enable_device(pdev)) {
   1039		dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
   1040		return PCI_ERS_RESULT_DISCONNECT;
   1041	}
   1042
   1043	return PCI_ERS_RESULT_RECOVERED;
   1044}
   1045
   1046static void mhi_pci_io_resume(struct pci_dev *pdev)
   1047{
   1048	struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
   1049
   1050	dev_err(&pdev->dev, "PCI slot reset done\n");
   1051
   1052	queue_work(system_long_wq, &mhi_pdev->recovery_work);
   1053}
   1054
   1055static const struct pci_error_handlers mhi_pci_err_handler = {
   1056	.error_detected = mhi_pci_error_detected,
   1057	.slot_reset = mhi_pci_slot_reset,
   1058	.resume = mhi_pci_io_resume,
   1059	.reset_prepare = mhi_pci_reset_prepare,
   1060	.reset_done = mhi_pci_reset_done,
   1061};
   1062
   1063static int  __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
   1064{
   1065	struct pci_dev *pdev = to_pci_dev(dev);
   1066	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
   1067	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
   1068	int err;
   1069
   1070	if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
   1071		return 0;
   1072
   1073	del_timer(&mhi_pdev->health_check_timer);
   1074	cancel_work_sync(&mhi_pdev->recovery_work);
   1075
   1076	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
   1077			mhi_cntrl->ee != MHI_EE_AMSS)
   1078		goto pci_suspend; /* Nothing to do at MHI level */
   1079
   1080	/* Transition to M3 state */
   1081	err = mhi_pm_suspend(mhi_cntrl);
   1082	if (err) {
   1083		dev_err(&pdev->dev, "failed to suspend device: %d\n", err);
   1084		clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status);
   1085		return -EBUSY;
   1086	}
   1087
   1088pci_suspend:
   1089	pci_disable_device(pdev);
   1090	pci_wake_from_d3(pdev, true);
   1091
   1092	return 0;
   1093}
   1094
   1095static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
   1096{
   1097	struct pci_dev *pdev = to_pci_dev(dev);
   1098	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
   1099	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
   1100	int err;
   1101
   1102	if (!test_and_clear_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
   1103		return 0;
   1104
   1105	err = pci_enable_device(pdev);
   1106	if (err)
   1107		goto err_recovery;
   1108
   1109	pci_set_master(pdev);
   1110	pci_wake_from_d3(pdev, false);
   1111
   1112	if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
   1113			mhi_cntrl->ee != MHI_EE_AMSS)
   1114		return 0; /* Nothing to do at MHI level */
   1115
   1116	/* Exit M3, transition to M0 state */
   1117	err = mhi_pm_resume(mhi_cntrl);
   1118	if (err) {
   1119		dev_err(&pdev->dev, "failed to resume device: %d\n", err);
   1120		goto err_recovery;
   1121	}
   1122
   1123	/* Resume health check */
   1124	mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
   1125
   1126	/* It can be a remote wakeup (no mhi runtime_get), update access time */
   1127	pm_runtime_mark_last_busy(dev);
   1128
   1129	return 0;
   1130
   1131err_recovery:
   1132	/* Do not fail to not mess up our PCI device state, the device likely
   1133	 * lost power (d3cold) and we simply need to reset it from the recovery
   1134	 * procedure, trigger the recovery asynchronously to prevent system
   1135	 * suspend exit delaying.
   1136	 */
   1137	queue_work(system_long_wq, &mhi_pdev->recovery_work);
   1138	pm_runtime_mark_last_busy(dev);
   1139
   1140	return 0;
   1141}
   1142
   1143static int  __maybe_unused mhi_pci_suspend(struct device *dev)
   1144{
   1145	pm_runtime_disable(dev);
   1146	return mhi_pci_runtime_suspend(dev);
   1147}
   1148
   1149static int __maybe_unused mhi_pci_resume(struct device *dev)
   1150{
   1151	int ret;
   1152
   1153	/* Depending the platform, device may have lost power (d3cold), we need
   1154	 * to resume it now to check its state and recover when necessary.
   1155	 */
   1156	ret = mhi_pci_runtime_resume(dev);
   1157	pm_runtime_enable(dev);
   1158
   1159	return ret;
   1160}
   1161
   1162static int __maybe_unused mhi_pci_freeze(struct device *dev)
   1163{
   1164	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
   1165	struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
   1166
   1167	/* We want to stop all operations, hibernation does not guarantee that
   1168	 * device will be in the same state as before freezing, especially if
   1169	 * the intermediate restore kernel reinitializes MHI device with new
   1170	 * context.
   1171	 */
   1172	flush_work(&mhi_pdev->recovery_work);
   1173	if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
   1174		mhi_power_down(mhi_cntrl, true);
   1175		mhi_unprepare_after_power_down(mhi_cntrl);
   1176	}
   1177
   1178	return 0;
   1179}
   1180
   1181static int __maybe_unused mhi_pci_restore(struct device *dev)
   1182{
   1183	struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
   1184
   1185	/* Reinitialize the device */
   1186	queue_work(system_long_wq, &mhi_pdev->recovery_work);
   1187
   1188	return 0;
   1189}
   1190
   1191static const struct dev_pm_ops mhi_pci_pm_ops = {
   1192	SET_RUNTIME_PM_OPS(mhi_pci_runtime_suspend, mhi_pci_runtime_resume, NULL)
   1193#ifdef CONFIG_PM_SLEEP
   1194	.suspend = mhi_pci_suspend,
   1195	.resume = mhi_pci_resume,
   1196	.freeze = mhi_pci_freeze,
   1197	.thaw = mhi_pci_restore,
   1198	.poweroff = mhi_pci_freeze,
   1199	.restore = mhi_pci_restore,
   1200#endif
   1201};
   1202
   1203static struct pci_driver mhi_pci_driver = {
   1204	.name		= "mhi-pci-generic",
   1205	.id_table	= mhi_pci_id_table,
   1206	.probe		= mhi_pci_probe,
   1207	.remove		= mhi_pci_remove,
   1208	.shutdown	= mhi_pci_shutdown,
   1209	.err_handler	= &mhi_pci_err_handler,
   1210	.driver.pm	= &mhi_pci_pm_ops
   1211};
   1212module_pci_driver(mhi_pci_driver);
   1213
   1214MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
   1215MODULE_DESCRIPTION("Modem Host Interface (MHI) PCI controller driver");
   1216MODULE_LICENSE("GPL");