cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

topaz_pcie.c (30273B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/* Copyright (c) 2018 Quantenna Communications */
      3
      4#include <linux/kernel.h>
      5#include <linux/firmware.h>
      6#include <linux/pci.h>
      7#include <linux/vmalloc.h>
      8#include <linux/delay.h>
      9#include <linux/interrupt.h>
     10#include <linux/sched.h>
     11#include <linux/crc32.h>
     12#include <linux/completion.h>
     13#include <linux/spinlock.h>
     14#include <linux/circ_buf.h>
     15
     16#include "pcie_priv.h"
     17#include "topaz_pcie_regs.h"
     18#include "topaz_pcie_ipc.h"
     19#include "qtn_hw_ids.h"
     20#include "core.h"
     21#include "bus.h"
     22#include "shm_ipc.h"
     23#include "debug.h"
     24
     25#define TOPAZ_TX_BD_SIZE_DEFAULT	128
     26#define TOPAZ_RX_BD_SIZE_DEFAULT	256
     27
     28struct qtnf_topaz_tx_bd {
     29	__le32 addr;
     30	__le32 info;
     31} __packed;
     32
     33struct qtnf_topaz_rx_bd {
     34	__le32 addr;
     35	__le32 info;
     36} __packed;
     37
     38struct qtnf_extra_bd_params {
     39	__le32 param1;
     40	__le32 param2;
     41	__le32 param3;
     42	__le32 param4;
     43} __packed;
     44
     45#define QTNF_BD_PARAM_OFFSET(n)	offsetof(struct qtnf_extra_bd_params, param##n)
     46
     47struct vmac_pkt_info {
     48	__le32 addr;
     49	__le32 info;
     50};
     51
     52struct qtnf_topaz_bda {
     53	__le16	bda_len;
     54	__le16	bda_version;
     55	__le32	bda_bootstate;
     56	__le32	bda_dma_mask;
     57	__le32	bda_dma_offset;
     58	__le32	bda_flags;
     59	__le32	bda_img;
     60	__le32	bda_img_size;
     61	__le32	bda_ep2h_irqstatus;
     62	__le32	bda_h2ep_irqstatus;
     63	__le32	bda_msi_addr;
     64	u8	reserved1[56];
     65	__le32	bda_flashsz;
     66	u8	bda_boardname[PCIE_BDA_NAMELEN];
     67	__le32	bda_pci_pre_status;
     68	__le32	bda_pci_endian;
     69	__le32	bda_pci_post_status;
     70	__le32	bda_h2ep_txd_budget;
     71	__le32	bda_ep2h_txd_budget;
     72	__le32	bda_rc_rx_bd_base;
     73	__le32	bda_rc_rx_bd_num;
     74	__le32	bda_rc_tx_bd_base;
     75	__le32	bda_rc_tx_bd_num;
     76	u8	bda_ep_link_state;
     77	u8	bda_rc_link_state;
     78	u8	bda_rc_msi_enabled;
     79	u8	reserved2;
     80	__le32	bda_ep_next_pkt;
     81	struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
     82	struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
     83	struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
     84} __packed;
     85
     86struct qtnf_pcie_topaz_state {
     87	struct qtnf_pcie_bus_priv base;
     88	struct qtnf_topaz_bda __iomem *bda;
     89
     90	dma_addr_t dma_msi_dummy;
     91	u32 dma_msi_imwr;
     92
     93	struct qtnf_topaz_tx_bd *tx_bd_vbase;
     94	struct qtnf_topaz_rx_bd *rx_bd_vbase;
     95
     96	__le32 __iomem *ep_next_rx_pkt;
     97	__le32 __iomem *txqueue_wake;
     98	__le32 __iomem *ep_pmstate;
     99
    100	unsigned long rx_pkt_count;
    101};
    102
    103static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
    104{
    105	void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
    106	u32 cfg;
    107
    108	cfg = readl(reg);
    109	cfg &= ~TOPAZ_ASSERT_INTX;
    110	qtnf_non_posted_write(cfg, reg);
    111}
    112
    113static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
    114{
    115	void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
    116	u32 cfg = readl(reg);
    117
    118	return !!(cfg & TOPAZ_ASSERT_INTX);
    119}
    120
    121static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
    122{
    123	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
    124	       TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
    125	msleep(QTN_EP_RESET_WAIT_MS);
    126	pci_restore_state(ts->base.pdev);
    127}
    128
    129static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
    130{
    131	void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
    132
    133	ts->dma_msi_imwr = readl(reg);
    134}
    135
    136static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
    137{
    138	void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
    139
    140	qtnf_non_posted_write(ts->dma_msi_imwr, reg);
    141}
    142
    143static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
    144{
    145	void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
    146
    147	qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
    148}
    149
    150static void qtnf_topaz_ipc_gen_ep_int(void *arg)
    151{
    152	struct qtnf_pcie_topaz_state *ts = arg;
    153
    154	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
    155	       TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
    156}
    157
    158static int qtnf_is_state(__le32 __iomem *reg, u32 state)
    159{
    160	u32 s = readl(reg);
    161
    162	return (s == state);
    163}
    164
    165static void qtnf_set_state(__le32 __iomem *reg, u32 state)
    166{
    167	qtnf_non_posted_write(state, reg);
    168}
    169
    170static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
    171{
    172	u32 timeout = 0;
    173
    174	while ((qtnf_is_state(reg, state) == 0)) {
    175		usleep_range(1000, 1200);
    176		if (++timeout > delay_in_ms)
    177			return -1;
    178	}
    179
    180	return 0;
    181}
    182
    183static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
    184				struct qtnf_topaz_bda __iomem *bda)
    185{
    186	struct qtnf_extra_bd_params __iomem *extra_params;
    187	struct qtnf_pcie_bus_priv *priv = &ts->base;
    188	dma_addr_t paddr;
    189	void *vaddr;
    190	int len;
    191	int i;
    192
    193	/* bd table */
    194
    195	len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
    196		priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
    197			sizeof(struct qtnf_extra_bd_params);
    198
    199	vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
    200	if (!vaddr)
    201		return -ENOMEM;
    202
    203	/* tx bd */
    204
    205	ts->tx_bd_vbase = vaddr;
    206	qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
    207
    208	for (i = 0; i < priv->tx_bd_num; i++)
    209		ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
    210
    211	pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
    212
    213	priv->tx_bd_r_index = 0;
    214	priv->tx_bd_w_index = 0;
    215
    216	/* rx bd */
    217
    218	vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
    219	paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
    220
    221	ts->rx_bd_vbase = vaddr;
    222	qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
    223
    224	pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
    225
    226	/* extra shared params */
    227
    228	vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
    229	paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
    230
    231	extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
    232
    233	ts->ep_next_rx_pkt = &extra_params->param1;
    234	qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
    235			      &bda->bda_ep_next_pkt);
    236	ts->txqueue_wake = &extra_params->param2;
    237	ts->ep_pmstate = &extra_params->param3;
    238	ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
    239
    240	return 0;
    241}
    242
    243static int
    244topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
    245{
    246	struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
    247	struct sk_buff *skb;
    248	dma_addr_t paddr;
    249
    250	skb = netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE);
    251	if (!skb) {
    252		ts->base.rx_skb[index] = NULL;
    253		return -ENOMEM;
    254	}
    255
    256	ts->base.rx_skb[index] = skb;
    257
    258	paddr = dma_map_single(&ts->base.pdev->dev, skb->data, SKB_BUF_SIZE,
    259			       DMA_FROM_DEVICE);
    260	if (dma_mapping_error(&ts->base.pdev->dev, paddr)) {
    261		pr_err("skb mapping error: %pad\n", &paddr);
    262		return -ENOMEM;
    263	}
    264
    265	rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
    266	rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
    267
    268	ts->base.rx_bd_w_index = index;
    269
    270	return 0;
    271}
    272
    273static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
    274{
    275	u16 i;
    276	int ret = 0;
    277
    278	memset(ts->rx_bd_vbase, 0x0,
    279	       ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
    280
    281	for (i = 0; i < ts->base.rx_bd_num; i++) {
    282		ret = topaz_skb2rbd_attach(ts, i, 0);
    283		if (ret)
    284			break;
    285	}
    286
    287	ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
    288						cpu_to_le32(QTN_BD_WRAP);
    289
    290	return ret;
    291}
    292
    293/* all rx/tx activity should have ceased before calling this function */
    294static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
    295{
    296	struct qtnf_pcie_bus_priv *priv = &ts->base;
    297	struct qtnf_topaz_rx_bd *rxbd;
    298	struct qtnf_topaz_tx_bd *txbd;
    299	struct sk_buff *skb;
    300	dma_addr_t paddr;
    301	int i;
    302
    303	/* free rx buffers */
    304	for (i = 0; i < priv->rx_bd_num; i++) {
    305		if (priv->rx_skb && priv->rx_skb[i]) {
    306			rxbd = &ts->rx_bd_vbase[i];
    307			skb = priv->rx_skb[i];
    308			paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
    309			dma_unmap_single(&priv->pdev->dev, paddr,
    310					 SKB_BUF_SIZE, DMA_FROM_DEVICE);
    311			dev_kfree_skb_any(skb);
    312			priv->rx_skb[i] = NULL;
    313			rxbd->addr = 0;
    314			rxbd->info = 0;
    315		}
    316	}
    317
    318	/* free tx buffers */
    319	for (i = 0; i < priv->tx_bd_num; i++) {
    320		if (priv->tx_skb && priv->tx_skb[i]) {
    321			txbd = &ts->tx_bd_vbase[i];
    322			skb = priv->tx_skb[i];
    323			paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
    324			dma_unmap_single(&priv->pdev->dev, paddr,
    325					 SKB_BUF_SIZE, DMA_TO_DEVICE);
    326			dev_kfree_skb_any(skb);
    327			priv->tx_skb[i] = NULL;
    328			txbd->addr = 0;
    329			txbd->info = 0;
    330		}
    331	}
    332}
    333
    334static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
    335				     unsigned int tx_bd_size,
    336				     unsigned int rx_bd_size)
    337{
    338	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    339	struct qtnf_pcie_bus_priv *priv = &ts->base;
    340	int ret;
    341
    342	if (tx_bd_size == 0)
    343		tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
    344
    345	/* check TX BD queue max length according to struct qtnf_topaz_bda */
    346	if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
    347		pr_warn("TX BD queue cannot exceed %d\n",
    348			QTN_PCIE_RC_TX_QUEUE_LEN);
    349		tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
    350	}
    351
    352	priv->tx_bd_num = tx_bd_size;
    353	qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
    354
    355	if (rx_bd_size == 0)
    356		rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
    357
    358	if (rx_bd_size > TOPAZ_RX_BD_SIZE_DEFAULT) {
    359		pr_warn("RX BD queue cannot exceed %d\n",
    360			TOPAZ_RX_BD_SIZE_DEFAULT);
    361		rx_bd_size = TOPAZ_RX_BD_SIZE_DEFAULT;
    362	}
    363
    364	priv->rx_bd_num = rx_bd_size;
    365	qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
    366
    367	priv->rx_bd_w_index = 0;
    368	priv->rx_bd_r_index = 0;
    369
    370	ret = qtnf_pcie_alloc_skb_array(priv);
    371	if (ret) {
    372		pr_err("failed to allocate skb array\n");
    373		return ret;
    374	}
    375
    376	ret = topaz_alloc_bd_table(ts, bda);
    377	if (ret) {
    378		pr_err("failed to allocate bd table\n");
    379		return ret;
    380	}
    381
    382	ret = topaz_alloc_rx_buffers(ts);
    383	if (ret) {
    384		pr_err("failed to allocate rx buffers\n");
    385		return ret;
    386	}
    387
    388	return ret;
    389}
    390
    391static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
    392{
    393	struct qtnf_pcie_bus_priv *priv = &ts->base;
    394	struct qtnf_topaz_tx_bd *txbd;
    395	struct sk_buff *skb;
    396	unsigned long flags;
    397	dma_addr_t paddr;
    398	u32 tx_done_index;
    399	int count = 0;
    400	int i;
    401
    402	spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
    403
    404	tx_done_index = readl(ts->ep_next_rx_pkt);
    405	i = priv->tx_bd_r_index;
    406
    407	if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
    408		writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
    409		       TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
    410
    411	while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
    412		skb = priv->tx_skb[i];
    413
    414		if (likely(skb)) {
    415			txbd = &ts->tx_bd_vbase[i];
    416			paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
    417			dma_unmap_single(&priv->pdev->dev, paddr, skb->len,
    418					 DMA_TO_DEVICE);
    419
    420			if (skb->dev) {
    421				dev_sw_netstats_tx_add(skb->dev, 1, skb->len);
    422				if (unlikely(priv->tx_stopped)) {
    423					qtnf_wake_all_queues(skb->dev);
    424					priv->tx_stopped = 0;
    425				}
    426			}
    427
    428			dev_kfree_skb_any(skb);
    429		}
    430
    431		priv->tx_skb[i] = NULL;
    432		count++;
    433
    434		if (++i >= priv->tx_bd_num)
    435			i = 0;
    436	}
    437
    438	priv->tx_reclaim_done += count;
    439	priv->tx_reclaim_req++;
    440	priv->tx_bd_r_index = i;
    441
    442	spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
    443}
    444
    445static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
    446{
    447	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
    448
    449	if (ndev) {
    450		netif_tx_stop_all_queues(ndev);
    451		ts->base.tx_stopped = 1;
    452	}
    453
    454	writel(0x0, ts->txqueue_wake);
    455
    456	/* sync up tx queue status before generating interrupt */
    457	dma_wmb();
    458
    459	/* send irq to card: tx stopped */
    460	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
    461	       TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
    462
    463	/* schedule reclaim attempt */
    464	tasklet_hi_schedule(&ts->base.reclaim_tq);
    465}
    466
    467static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
    468{
    469	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    470	int ready;
    471
    472	ready = readl(ts->txqueue_wake);
    473	if (ready) {
    474		netif_wake_queue(ndev);
    475	} else {
    476		/* re-send irq to card: tx stopped */
    477		writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
    478		       TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
    479	}
    480}
    481
    482static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
    483{
    484	struct qtnf_pcie_bus_priv *priv = &ts->base;
    485
    486	if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
    487			priv->tx_bd_num)) {
    488		qtnf_topaz_data_tx_reclaim(ts);
    489
    490		if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
    491				priv->tx_bd_num)) {
    492			priv->tx_full_count++;
    493			return 0;
    494		}
    495	}
    496
    497	return 1;
    498}
    499
    500static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb,
    501			     unsigned int macid, unsigned int vifid)
    502{
    503	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
    504	struct qtnf_pcie_bus_priv *priv = &ts->base;
    505	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    506	struct qtnf_topaz_tx_bd *txbd;
    507	dma_addr_t skb_paddr;
    508	unsigned long flags;
    509	int ret = 0;
    510	int len;
    511	int i;
    512
    513	spin_lock_irqsave(&priv->tx_lock, flags);
    514
    515	if (!qtnf_tx_queue_ready(ts)) {
    516		qtnf_try_stop_xmit(bus, skb->dev);
    517		spin_unlock_irqrestore(&priv->tx_lock, flags);
    518		return NETDEV_TX_BUSY;
    519	}
    520
    521	i = priv->tx_bd_w_index;
    522	priv->tx_skb[i] = skb;
    523	len = skb->len;
    524
    525	skb_paddr = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
    526				   DMA_TO_DEVICE);
    527	if (dma_mapping_error(&priv->pdev->dev, skb_paddr)) {
    528		ret = -ENOMEM;
    529		goto tx_done;
    530	}
    531
    532	txbd = &ts->tx_bd_vbase[i];
    533	txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
    534
    535	writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
    536	writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
    537
    538	/* sync up descriptor updates before generating interrupt */
    539	dma_wmb();
    540
    541	/* generate irq to card: tx done */
    542	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
    543	       TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
    544
    545	if (++i >= priv->tx_bd_num)
    546		i = 0;
    547
    548	priv->tx_bd_w_index = i;
    549
    550tx_done:
    551	if (ret) {
    552		if (skb->dev)
    553			skb->dev->stats.tx_dropped++;
    554		dev_kfree_skb_any(skb);
    555	}
    556
    557	priv->tx_done_count++;
    558	spin_unlock_irqrestore(&priv->tx_lock, flags);
    559
    560	qtnf_topaz_data_tx_reclaim(ts);
    561
    562	return NETDEV_TX_OK;
    563}
    564
    565static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
    566{
    567	struct qtnf_bus *bus = (struct qtnf_bus *)data;
    568	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
    569	struct qtnf_pcie_bus_priv *priv = &ts->base;
    570
    571	if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
    572		return IRQ_NONE;
    573
    574	if (!priv->msi_enabled)
    575		qtnf_deassert_intx(ts);
    576
    577	priv->pcie_irq_count++;
    578
    579	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
    580	qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
    581
    582	if (napi_schedule_prep(&bus->mux_napi)) {
    583		disable_rx_irqs(ts);
    584		__napi_schedule(&bus->mux_napi);
    585	}
    586
    587	tasklet_hi_schedule(&priv->reclaim_tq);
    588
    589	return IRQ_HANDLED;
    590}
    591
    592static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
    593{
    594	u16 index = ts->base.rx_bd_r_index;
    595	struct qtnf_topaz_rx_bd *rxbd;
    596	u32 descw;
    597
    598	rxbd = &ts->rx_bd_vbase[index];
    599	descw = le32_to_cpu(rxbd->info);
    600
    601	if (descw & QTN_BD_EMPTY)
    602		return 0;
    603
    604	return 1;
    605}
    606
    607static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
    608{
    609	struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
    610	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
    611	struct qtnf_pcie_bus_priv *priv = &ts->base;
    612	struct net_device *ndev = NULL;
    613	struct sk_buff *skb = NULL;
    614	int processed = 0;
    615	struct qtnf_topaz_rx_bd *rxbd;
    616	dma_addr_t skb_paddr;
    617	int consume;
    618	u32 descw;
    619	u32 poffset;
    620	u32 psize;
    621	u16 r_idx;
    622	u16 w_idx;
    623	int ret;
    624
    625	while (processed < budget) {
    626		if (!qtnf_rx_data_ready(ts))
    627			goto rx_out;
    628
    629		r_idx = priv->rx_bd_r_index;
    630		rxbd = &ts->rx_bd_vbase[r_idx];
    631		descw = le32_to_cpu(rxbd->info);
    632
    633		skb = priv->rx_skb[r_idx];
    634		poffset = QTN_GET_OFFSET(descw);
    635		psize = QTN_GET_LEN(descw);
    636		consume = 1;
    637
    638		if (descw & QTN_BD_EMPTY) {
    639			pr_warn("skip invalid rxbd[%d]\n", r_idx);
    640			consume = 0;
    641		}
    642
    643		if (!skb) {
    644			pr_warn("skip missing rx_skb[%d]\n", r_idx);
    645			consume = 0;
    646		}
    647
    648		if (skb && (skb_tailroom(skb) <  psize)) {
    649			pr_err("skip packet with invalid length: %u > %u\n",
    650			       psize, skb_tailroom(skb));
    651			consume = 0;
    652		}
    653
    654		if (skb) {
    655			skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
    656			dma_unmap_single(&priv->pdev->dev, skb_paddr,
    657					 SKB_BUF_SIZE, DMA_FROM_DEVICE);
    658		}
    659
    660		if (consume) {
    661			skb_reserve(skb, poffset);
    662			skb_put(skb, psize);
    663			ndev = qtnf_classify_skb(bus, skb);
    664			if (likely(ndev)) {
    665				dev_sw_netstats_rx_add(ndev, skb->len);
    666				skb->protocol = eth_type_trans(skb, ndev);
    667				netif_receive_skb(skb);
    668			} else {
    669				pr_debug("drop untagged skb\n");
    670				bus->mux_dev.stats.rx_dropped++;
    671				dev_kfree_skb_any(skb);
    672			}
    673		} else {
    674			if (skb) {
    675				bus->mux_dev.stats.rx_dropped++;
    676				dev_kfree_skb_any(skb);
    677			}
    678		}
    679
    680		/* notify card about recv packets once per several packets */
    681		if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
    682			writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
    683			       TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
    684
    685		priv->rx_skb[r_idx] = NULL;
    686		if (++r_idx >= priv->rx_bd_num)
    687			r_idx = 0;
    688
    689		priv->rx_bd_r_index = r_idx;
    690
    691		/* repalce processed buffer by a new one */
    692		w_idx = priv->rx_bd_w_index;
    693		while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
    694				  priv->rx_bd_num) > 0) {
    695			if (++w_idx >= priv->rx_bd_num)
    696				w_idx = 0;
    697
    698			ret = topaz_skb2rbd_attach(ts, w_idx,
    699						   descw & QTN_BD_WRAP);
    700			if (ret) {
    701				pr_err("failed to allocate new rx_skb[%d]\n",
    702				       w_idx);
    703				break;
    704			}
    705		}
    706
    707		processed++;
    708	}
    709
    710rx_out:
    711	if (processed < budget) {
    712		napi_complete(napi);
    713		enable_rx_irqs(ts);
    714	}
    715
    716	return processed;
    717}
    718
    719static void
    720qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
    721{
    722	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    723
    724	qtnf_try_wake_xmit(bus, ndev);
    725	tasklet_hi_schedule(&ts->base.reclaim_tq);
    726}
    727
    728static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
    729{
    730	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    731
    732	napi_enable(&bus->mux_napi);
    733	enable_rx_irqs(ts);
    734}
    735
    736static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
    737{
    738	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    739
    740	disable_rx_irqs(ts);
    741	napi_disable(&bus->mux_napi);
    742}
    743
    744static struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
    745	/* control path methods */
    746	.control_tx	= qtnf_pcie_control_tx,
    747
    748	/* data path methods */
    749	.data_tx		= qtnf_pcie_data_tx,
    750	.data_tx_timeout	= qtnf_pcie_data_tx_timeout,
    751	.data_rx_start		= qtnf_pcie_data_rx_start,
    752	.data_rx_stop		= qtnf_pcie_data_rx_stop,
    753};
    754
    755static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
    756{
    757	struct qtnf_bus *bus = dev_get_drvdata(s->private);
    758	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    759
    760	seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
    761
    762	return 0;
    763}
    764
    765static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
    766{
    767	struct qtnf_bus *bus = dev_get_drvdata(s->private);
    768	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
    769	struct qtnf_pcie_bus_priv *priv = &ts->base;
    770	u32 tx_done_index = readl(ts->ep_next_rx_pkt);
    771
    772	seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
    773	seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
    774	seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
    775	seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
    776
    777	seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
    778	seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
    779	seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
    780
    781	seq_printf(s, "tx host queue len(%u)\n",
    782		   CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
    783			    priv->tx_bd_num));
    784	seq_printf(s, "tx reclaim queue len(%u)\n",
    785		   CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
    786			    priv->tx_bd_num));
    787	seq_printf(s, "tx card queue len(%u)\n",
    788		   CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
    789			    priv->tx_bd_num));
    790
    791	seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
    792	seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
    793	seq_printf(s, "rx alloc queue len(%u)\n",
    794		   CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
    795			      priv->rx_bd_num));
    796
    797	return 0;
    798}
    799
    800static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
    801{
    802	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    803	u32 offset = readl(&bda->bda_dma_offset);
    804
    805	if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
    806		return;
    807
    808	writel(0x0, &bda->bda_dma_offset);
    809}
    810
    811static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
    812{
    813	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    814	u32 timeout = 0;
    815	u32 endian;
    816	int ret = 0;
    817
    818	writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
    819
    820	/* flush endian modifications before status update */
    821	dma_wmb();
    822
    823	writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
    824
    825	while (readl(&bda->bda_pci_post_status) !=
    826	       QTN_PCI_ENDIAN_VALID_STATUS) {
    827		usleep_range(1000, 1200);
    828		if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
    829			pr_err("card endianness detection timed out\n");
    830			ret = -ETIMEDOUT;
    831			goto endian_out;
    832		}
    833	}
    834
    835	/* do not read before status is updated */
    836	dma_rmb();
    837
    838	endian = readl(&bda->bda_pci_endian);
    839	WARN(endian != QTN_PCI_LITTLE_ENDIAN,
    840	     "%s: unexpected card endianness", __func__);
    841
    842endian_out:
    843	writel(0, &bda->bda_pci_pre_status);
    844	writel(0, &bda->bda_pci_post_status);
    845	writel(0, &bda->bda_pci_endian);
    846
    847	return ret;
    848}
    849
    850static int qtnf_pre_init_ep(struct qtnf_bus *bus)
    851{
    852	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
    853	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    854	u32 flags;
    855	int ret;
    856
    857	ret = qtnf_pcie_endian_detect(ts);
    858	if (ret < 0) {
    859		pr_err("failed to detect card endianness\n");
    860		return ret;
    861	}
    862
    863	writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
    864	qtnf_reset_dma_offset(ts);
    865
    866	/* notify card about driver type and boot mode */
    867	flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
    868
    869	if (ts->base.flashboot)
    870		flags |= QTN_BDA_FLASH_BOOT;
    871	else
    872		flags &= ~QTN_BDA_FLASH_BOOT;
    873
    874	writel(flags, &bda->bda_flags);
    875
    876	qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
    877	if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
    878			    QTN_FW_DL_TIMEOUT_MS)) {
    879		pr_err("card is not ready to boot...\n");
    880		return -ETIMEDOUT;
    881	}
    882
    883	return ret;
    884}
    885
    886static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
    887{
    888	struct pci_dev *pdev = ts->base.pdev;
    889
    890	setup_rx_irqs(ts);
    891	disable_rx_irqs(ts);
    892
    893	if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
    894			    QTN_FW_QLINK_TIMEOUT_MS))
    895		return -ETIMEDOUT;
    896
    897	enable_irq(pdev->irq);
    898	return 0;
    899}
    900
    901static int
    902qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
    903{
    904	struct qtnf_topaz_bda __iomem *bda = ts->bda;
    905	struct pci_dev *pdev = ts->base.pdev;
    906	u32 remaining = fw_size;
    907	u8 *curr = (u8 *)fw;
    908	u32 blksize;
    909	u32 nblocks;
    910	u32 offset;
    911	u32 count;
    912	u32 size;
    913	dma_addr_t paddr;
    914	void *data;
    915	int ret = 0;
    916
    917	pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
    918
    919	blksize = ts->base.fw_blksize;
    920
    921	if (blksize < PAGE_SIZE)
    922		blksize = PAGE_SIZE;
    923
    924	while (blksize >= PAGE_SIZE) {
    925		pr_debug("allocating %u bytes to upload FW\n", blksize);
    926		data = dma_alloc_coherent(&pdev->dev, blksize,
    927					  &paddr, GFP_KERNEL);
    928		if (data)
    929			break;
    930		blksize /= 2;
    931	}
    932
    933	if (!data) {
    934		pr_err("failed to allocate DMA buffer for FW upload\n");
    935		ret = -ENOMEM;
    936		goto fw_load_out;
    937	}
    938
    939	nblocks = NBLOCKS(fw_size, blksize);
    940	offset = readl(&bda->bda_dma_offset);
    941
    942	qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
    943	if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
    944			    QTN_FW_DL_TIMEOUT_MS)) {
    945		pr_err("card is not ready to download FW\n");
    946		ret = -ETIMEDOUT;
    947		goto fw_load_map;
    948	}
    949
    950	for (count = 0 ; count < nblocks; count++) {
    951		size = (remaining > blksize) ? blksize : remaining;
    952
    953		memcpy(data, curr, size);
    954		qtnf_non_posted_write(paddr + offset, &bda->bda_img);
    955		qtnf_non_posted_write(size, &bda->bda_img_size);
    956
    957		pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
    958			 count, (void *)curr, &paddr, size);
    959
    960		qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
    961		if (qtnf_poll_state(&ts->bda->bda_bootstate,
    962				    QTN_BDA_FW_BLOCK_DONE,
    963				    QTN_FW_DL_TIMEOUT_MS)) {
    964			pr_err("confirmation for block #%d timed out\n", count);
    965			ret = -ETIMEDOUT;
    966			goto fw_load_map;
    967		}
    968
    969		remaining = (remaining < size) ? remaining : (remaining - size);
    970		curr += size;
    971	}
    972
    973	/* upload completion mark: zero-sized block */
    974	qtnf_non_posted_write(0, &bda->bda_img);
    975	qtnf_non_posted_write(0, &bda->bda_img_size);
    976
    977	qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
    978	if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
    979			    QTN_FW_DL_TIMEOUT_MS)) {
    980		pr_err("confirmation for the last block timed out\n");
    981		ret = -ETIMEDOUT;
    982		goto fw_load_map;
    983	}
    984
    985	/* RC is done */
    986	qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
    987	if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
    988			    QTN_FW_DL_TIMEOUT_MS)) {
    989		pr_err("confirmation for FW upload completion timed out\n");
    990		ret = -ETIMEDOUT;
    991		goto fw_load_map;
    992	}
    993
    994	pr_debug("FW upload completed: totally sent %d blocks\n", count);
    995
    996fw_load_map:
    997	dma_free_coherent(&pdev->dev, blksize, data, paddr);
    998
    999fw_load_out:
   1000	return ret;
   1001}
   1002
   1003static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
   1004				const char *fwname)
   1005{
   1006	const struct firmware *fw;
   1007	struct pci_dev *pdev = ts->base.pdev;
   1008	int ret;
   1009
   1010	if (qtnf_poll_state(&ts->bda->bda_bootstate,
   1011			    QTN_BDA_FW_LOAD_RDY,
   1012			    QTN_FW_DL_TIMEOUT_MS)) {
   1013		pr_err("%s: card is not ready\n", fwname);
   1014		return -1;
   1015	}
   1016
   1017	pr_info("starting firmware upload: %s\n", fwname);
   1018
   1019	ret = request_firmware(&fw, fwname, &pdev->dev);
   1020	if (ret < 0) {
   1021		pr_err("%s: request_firmware error %d\n", fwname, ret);
   1022		return -1;
   1023	}
   1024
   1025	ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
   1026	release_firmware(fw);
   1027
   1028	if (ret)
   1029		pr_err("%s: FW upload error\n", fwname);
   1030
   1031	return ret;
   1032}
   1033
   1034static void qtnf_topaz_fw_work_handler(struct work_struct *work)
   1035{
   1036	struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
   1037	struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
   1038	int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
   1039	struct pci_dev *pdev = ts->base.pdev;
   1040	int ret;
   1041
   1042	qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
   1043
   1044	if (bootloader_needed) {
   1045		ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
   1046		if (ret)
   1047			goto fw_load_exit;
   1048
   1049		ret = qtnf_pre_init_ep(bus);
   1050		if (ret)
   1051			goto fw_load_exit;
   1052
   1053		qtnf_set_state(&ts->bda->bda_bootstate,
   1054			       QTN_BDA_FW_TARGET_BOOT);
   1055	}
   1056
   1057	if (ts->base.flashboot) {
   1058		pr_info("booting firmware from flash\n");
   1059
   1060		ret = qtnf_poll_state(&ts->bda->bda_bootstate,
   1061				      QTN_BDA_FW_FLASH_BOOT,
   1062				      QTN_FW_DL_TIMEOUT_MS);
   1063		if (ret)
   1064			goto fw_load_exit;
   1065	} else {
   1066		ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
   1067		if (ret)
   1068			goto fw_load_exit;
   1069
   1070		qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
   1071		ret = qtnf_poll_state(&ts->bda->bda_bootstate,
   1072				      QTN_BDA_FW_CONFIG,
   1073				      QTN_FW_QLINK_TIMEOUT_MS);
   1074		if (ret) {
   1075			pr_err("FW bringup timed out\n");
   1076			goto fw_load_exit;
   1077		}
   1078
   1079		qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
   1080		ret = qtnf_poll_state(&ts->bda->bda_bootstate,
   1081				      QTN_BDA_FW_RUNNING,
   1082				      QTN_FW_QLINK_TIMEOUT_MS);
   1083		if (ret) {
   1084			pr_err("card bringup timed out\n");
   1085			goto fw_load_exit;
   1086		}
   1087	}
   1088
   1089	ret = qtnf_post_init_ep(ts);
   1090	if (ret) {
   1091		pr_err("FW runtime failure\n");
   1092		goto fw_load_exit;
   1093	}
   1094
   1095	pr_info("firmware is up and running\n");
   1096
   1097	ret = qtnf_pcie_fw_boot_done(bus);
   1098	if (ret)
   1099		goto fw_load_exit;
   1100
   1101	qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
   1102	qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
   1103
   1104fw_load_exit:
   1105	put_device(&pdev->dev);
   1106}
   1107
   1108static void qtnf_reclaim_tasklet_fn(struct tasklet_struct *t)
   1109{
   1110	struct qtnf_pcie_topaz_state *ts = from_tasklet(ts, t, base.reclaim_tq);
   1111
   1112	qtnf_topaz_data_tx_reclaim(ts);
   1113}
   1114
   1115static u64 qtnf_topaz_dma_mask_get(void)
   1116{
   1117	return DMA_BIT_MASK(32);
   1118}
   1119
   1120static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
   1121				 unsigned int tx_bd_num, unsigned int rx_bd_num)
   1122{
   1123	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
   1124	struct pci_dev *pdev = ts->base.pdev;
   1125	struct qtnf_shm_ipc_int ipc_int;
   1126	unsigned long irqflags;
   1127	int ret;
   1128
   1129	bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
   1130	INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
   1131	ts->bda = ts->base.epmem_bar;
   1132
   1133	/* assign host msi irq before card init */
   1134	if (ts->base.msi_enabled)
   1135		irqflags = IRQF_NOBALANCING;
   1136	else
   1137		irqflags = IRQF_NOBALANCING | IRQF_SHARED;
   1138
   1139	ret = devm_request_irq(&pdev->dev, pdev->irq,
   1140			       &qtnf_pcie_topaz_interrupt,
   1141			       irqflags, "qtnf_topaz_irq", (void *)bus);
   1142	if (ret) {
   1143		pr_err("failed to request pcie irq %d\n", pdev->irq);
   1144		return ret;
   1145	}
   1146
   1147	disable_irq(pdev->irq);
   1148
   1149	ret = qtnf_pre_init_ep(bus);
   1150	if (ret) {
   1151		pr_err("failed to init card\n");
   1152		return ret;
   1153	}
   1154
   1155	ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num, rx_bd_num);
   1156	if (ret) {
   1157		pr_err("PCIE xfer init failed\n");
   1158		return ret;
   1159	}
   1160
   1161	tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
   1162	netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
   1163			      qtnf_topaz_rx_poll, 10);
   1164
   1165	ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
   1166	ipc_int.arg = ts;
   1167	qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
   1168			       &ts->bda->bda_shm_reg2, &ipc_int);
   1169
   1170	return 0;
   1171}
   1172
   1173static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
   1174{
   1175	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
   1176
   1177	qtnf_topaz_reset_ep(ts);
   1178	qtnf_topaz_free_xfer_buffers(ts);
   1179}
   1180
   1181#ifdef CONFIG_PM_SLEEP
   1182static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
   1183{
   1184	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
   1185	struct pci_dev *pdev = ts->base.pdev;
   1186
   1187	writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
   1188	dma_wmb();
   1189	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
   1190	       TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
   1191
   1192	pci_save_state(pdev);
   1193	pci_enable_wake(pdev, PCI_D3hot, 1);
   1194	pci_set_power_state(pdev, PCI_D3hot);
   1195
   1196	return 0;
   1197}
   1198
   1199static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
   1200{
   1201	struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
   1202	struct pci_dev *pdev = ts->base.pdev;
   1203
   1204	pci_set_power_state(pdev, PCI_D0);
   1205	pci_restore_state(pdev);
   1206	pci_enable_wake(pdev, PCI_D0, 0);
   1207
   1208	writel((u32 __force)PCI_D0, ts->ep_pmstate);
   1209	dma_wmb();
   1210	writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
   1211	       TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
   1212
   1213	return 0;
   1214}
   1215#endif
   1216
   1217struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
   1218{
   1219	struct qtnf_bus *bus;
   1220	struct qtnf_pcie_topaz_state *ts;
   1221
   1222	bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
   1223	if (!bus)
   1224		return NULL;
   1225
   1226	ts = get_bus_priv(bus);
   1227	ts->base.probe_cb = qtnf_pcie_topaz_probe;
   1228	ts->base.remove_cb = qtnf_pcie_topaz_remove;
   1229	ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
   1230#ifdef CONFIG_PM_SLEEP
   1231	ts->base.resume_cb = qtnf_pcie_topaz_resume;
   1232	ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
   1233#endif
   1234
   1235	return bus;
   1236}