cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

puda.c (46764B)


      1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
      2/* Copyright (c) 2015 - 2021 Intel Corporation */
      3#include "osdep.h"
      4#include "hmc.h"
      5#include "defs.h"
      6#include "type.h"
      7#include "protos.h"
      8#include "puda.h"
      9#include "ws.h"
     10
     11static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
     12			      struct irdma_puda_buf *buf);
     13static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
     14static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
     15				     struct irdma_puda_buf *buf, u32 wqe_idx);
     16/**
     17 * irdma_puda_get_listbuf - get buffer from puda list
     18 * @list: list to use for buffers (ILQ or IEQ)
     19 */
     20static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
     21{
     22	struct irdma_puda_buf *buf = NULL;
     23
     24	if (!list_empty(list)) {
     25		buf = (struct irdma_puda_buf *)list->next;
     26		list_del((struct list_head *)&buf->list);
     27	}
     28
     29	return buf;
     30}
     31
     32/**
     33 * irdma_puda_get_bufpool - return buffer from resource
     34 * @rsrc: resource to use for buffer
     35 */
     36struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
     37{
     38	struct irdma_puda_buf *buf = NULL;
     39	struct list_head *list = &rsrc->bufpool;
     40	unsigned long flags;
     41
     42	spin_lock_irqsave(&rsrc->bufpool_lock, flags);
     43	buf = irdma_puda_get_listbuf(list);
     44	if (buf) {
     45		rsrc->avail_buf_count--;
     46		buf->vsi = rsrc->vsi;
     47	} else {
     48		rsrc->stats_buf_alloc_fail++;
     49	}
     50	spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
     51
     52	return buf;
     53}
     54
     55/**
     56 * irdma_puda_ret_bufpool - return buffer to rsrc list
     57 * @rsrc: resource to use for buffer
     58 * @buf: buffer to return to resource
     59 */
     60void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
     61			    struct irdma_puda_buf *buf)
     62{
     63	unsigned long flags;
     64
     65	buf->do_lpb = false;
     66	spin_lock_irqsave(&rsrc->bufpool_lock, flags);
     67	list_add(&buf->list, &rsrc->bufpool);
     68	spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
     69	rsrc->avail_buf_count++;
     70}
     71
     72/**
     73 * irdma_puda_post_recvbuf - set wqe for rcv buffer
     74 * @rsrc: resource ptr
     75 * @wqe_idx: wqe index to use
     76 * @buf: puda buffer for rcv q
     77 * @initial: flag if during init time
     78 */
     79static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
     80				    struct irdma_puda_buf *buf, bool initial)
     81{
     82	__le64 *wqe;
     83	struct irdma_sc_qp *qp = &rsrc->qp;
     84	u64 offset24 = 0;
     85
     86	/* Synch buffer for use by device */
     87	dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
     88				   buf->mem.size, DMA_BIDIRECTIONAL);
     89	qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
     90	wqe = qp->qp_uk.rq_base[wqe_idx].elem;
     91	if (!initial)
     92		get_64bit_val(wqe, 24, &offset24);
     93
     94	offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
     95
     96	set_64bit_val(wqe, 16, 0);
     97	set_64bit_val(wqe, 0, buf->mem.pa);
     98	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
     99		set_64bit_val(wqe, 8,
    100			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
    101	} else {
    102		set_64bit_val(wqe, 8,
    103			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
    104			      offset24);
    105	}
    106	dma_wmb(); /* make sure WQE is written before valid bit is set */
    107
    108	set_64bit_val(wqe, 24, offset24);
    109}
    110
    111/**
    112 * irdma_puda_replenish_rq - post rcv buffers
    113 * @rsrc: resource to use for buffer
    114 * @initial: flag if during init time
    115 */
    116static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
    117{
    118	u32 i;
    119	u32 invalid_cnt = rsrc->rxq_invalid_cnt;
    120	struct irdma_puda_buf *buf = NULL;
    121
    122	for (i = 0; i < invalid_cnt; i++) {
    123		buf = irdma_puda_get_bufpool(rsrc);
    124		if (!buf)
    125			return -ENOBUFS;
    126		irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
    127		rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
    128		rsrc->rxq_invalid_cnt--;
    129	}
    130
    131	return 0;
    132}
    133
    134/**
    135 * irdma_puda_alloc_buf - allocate mem for buffer
    136 * @dev: iwarp device
    137 * @len: length of buffer
    138 */
    139static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
    140						   u32 len)
    141{
    142	struct irdma_puda_buf *buf;
    143	struct irdma_virt_mem buf_mem;
    144
    145	buf_mem.size = sizeof(struct irdma_puda_buf);
    146	buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
    147	if (!buf_mem.va)
    148		return NULL;
    149
    150	buf = buf_mem.va;
    151	buf->mem.size = len;
    152	buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
    153	if (!buf->mem.va)
    154		goto free_virt;
    155	buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
    156				     buf->mem.size, DMA_BIDIRECTIONAL);
    157	if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
    158		kfree(buf->mem.va);
    159		goto free_virt;
    160	}
    161
    162	buf->buf_mem.va = buf_mem.va;
    163	buf->buf_mem.size = buf_mem.size;
    164
    165	return buf;
    166
    167free_virt:
    168	kfree(buf_mem.va);
    169	return NULL;
    170}
    171
    172/**
    173 * irdma_puda_dele_buf - delete buffer back to system
    174 * @dev: iwarp device
    175 * @buf: buffer to free
    176 */
    177static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
    178				struct irdma_puda_buf *buf)
    179{
    180	dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
    181			 DMA_BIDIRECTIONAL);
    182	kfree(buf->mem.va);
    183	kfree(buf->buf_mem.va);
    184}
    185
    186/**
    187 * irdma_puda_get_next_send_wqe - return next wqe for processing
    188 * @qp: puda qp for wqe
    189 * @wqe_idx: wqe index for caller
    190 */
    191static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
    192					    u32 *wqe_idx)
    193{
    194	int ret_code = 0;
    195
    196	*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
    197	if (!*wqe_idx)
    198		qp->swqe_polarity = !qp->swqe_polarity;
    199	IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
    200	if (ret_code)
    201		return NULL;
    202
    203	return qp->sq_base[*wqe_idx].elem;
    204}
    205
    206/**
    207 * irdma_puda_poll_info - poll cq for completion
    208 * @cq: cq for poll
    209 * @info: info return for successful completion
    210 */
    211static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
    212				struct irdma_puda_cmpl_info *info)
    213{
    214	struct irdma_cq_uk *cq_uk = &cq->cq_uk;
    215	u64 qword0, qword2, qword3, qword6;
    216	__le64 *cqe;
    217	__le64 *ext_cqe = NULL;
    218	u64 qword7 = 0;
    219	u64 comp_ctx;
    220	bool valid_bit;
    221	bool ext_valid = 0;
    222	u32 major_err, minor_err;
    223	u32 peek_head;
    224	bool error;
    225	u8 polarity;
    226
    227	cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
    228	get_64bit_val(cqe, 24, &qword3);
    229	valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
    230	if (valid_bit != cq_uk->polarity)
    231		return -ENOENT;
    232
    233	if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
    234		ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
    235
    236	if (ext_valid) {
    237		peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
    238		ext_cqe = cq_uk->cq_base[peek_head].buf;
    239		get_64bit_val(ext_cqe, 24, &qword7);
    240		polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
    241		if (!peek_head)
    242			polarity ^= 1;
    243		if (polarity != cq_uk->polarity)
    244			return -ENOENT;
    245
    246		IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
    247		if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
    248			cq_uk->polarity = !cq_uk->polarity;
    249		/* update cq tail in cq shadow memory also */
    250		IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
    251	}
    252
    253	print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
    254			     32, false);
    255	if (ext_valid)
    256		print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
    257				     16, 8, ext_cqe, 32, false);
    258
    259	error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
    260	if (error) {
    261		ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
    262		major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
    263		minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
    264		info->compl_error = major_err << 16 | minor_err;
    265		return -EIO;
    266	}
    267
    268	get_64bit_val(cqe, 0, &qword0);
    269	get_64bit_val(cqe, 16, &qword2);
    270
    271	info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
    272	info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
    273	if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
    274		info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
    275
    276	get_64bit_val(cqe, 8, &comp_ctx);
    277	info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
    278	info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
    279
    280	if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
    281		if (ext_valid) {
    282			info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
    283			if (info->vlan_valid) {
    284				get_64bit_val(ext_cqe, 16, &qword6);
    285				info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
    286			}
    287			info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
    288			if (info->smac_valid) {
    289				get_64bit_val(ext_cqe, 16, &qword6);
    290				info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
    291				info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
    292				info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
    293				info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
    294				info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
    295				info->smac[5] = (u8)(qword6 & 0xFF);
    296			}
    297		}
    298
    299		if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
    300			info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
    301			info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
    302			info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
    303		}
    304
    305		info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
    306	}
    307
    308	return 0;
    309}
    310
    311/**
    312 * irdma_puda_poll_cmpl - processes completion for cq
    313 * @dev: iwarp device
    314 * @cq: cq getting interrupt
    315 * @compl_err: return any completion err
    316 */
    317int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
    318			 u32 *compl_err)
    319{
    320	struct irdma_qp_uk *qp;
    321	struct irdma_cq_uk *cq_uk = &cq->cq_uk;
    322	struct irdma_puda_cmpl_info info = {};
    323	int ret = 0;
    324	struct irdma_puda_buf *buf;
    325	struct irdma_puda_rsrc *rsrc;
    326	u8 cq_type = cq->cq_type;
    327	unsigned long flags;
    328
    329	if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
    330		rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
    331							cq->vsi->ieq;
    332	} else {
    333		ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
    334		return -EINVAL;
    335	}
    336
    337	ret = irdma_puda_poll_info(cq, &info);
    338	*compl_err = info.compl_error;
    339	if (ret == -ENOENT)
    340		return ret;
    341	if (ret)
    342		goto done;
    343
    344	qp = info.qp;
    345	if (!qp || !rsrc) {
    346		ret = -EFAULT;
    347		goto done;
    348	}
    349
    350	if (qp->qp_id != rsrc->qp_id) {
    351		ret = -EFAULT;
    352		goto done;
    353	}
    354
    355	if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
    356		buf = (struct irdma_puda_buf *)(uintptr_t)
    357			      qp->rq_wrid_array[info.wqe_idx];
    358
    359		/* reusing so synch the buffer for CPU use */
    360		dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
    361					buf->mem.size, DMA_BIDIRECTIONAL);
    362		/* Get all the tcpip information in the buf header */
    363		ret = irdma_puda_get_tcpip_info(&info, buf);
    364		if (ret) {
    365			rsrc->stats_rcvd_pkt_err++;
    366			if (cq_type == IRDMA_CQ_TYPE_ILQ) {
    367				irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
    368							 info.wqe_idx);
    369			} else {
    370				irdma_puda_ret_bufpool(rsrc, buf);
    371				irdma_puda_replenish_rq(rsrc, false);
    372			}
    373			goto done;
    374		}
    375
    376		rsrc->stats_pkt_rcvd++;
    377		rsrc->compl_rxwqe_idx = info.wqe_idx;
    378		ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
    379		rsrc->receive(rsrc->vsi, buf);
    380		if (cq_type == IRDMA_CQ_TYPE_ILQ)
    381			irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
    382		else
    383			irdma_puda_replenish_rq(rsrc, false);
    384
    385	} else {
    386		ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
    387		buf = (struct irdma_puda_buf *)(uintptr_t)
    388					qp->sq_wrtrk_array[info.wqe_idx].wrid;
    389
    390		/* reusing so synch the buffer for CPU use */
    391		dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
    392					buf->mem.size, DMA_BIDIRECTIONAL);
    393		IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
    394		rsrc->xmit_complete(rsrc->vsi, buf);
    395		spin_lock_irqsave(&rsrc->bufpool_lock, flags);
    396		rsrc->tx_wqe_avail_cnt++;
    397		spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
    398		if (!list_empty(&rsrc->txpend))
    399			irdma_puda_send_buf(rsrc, NULL);
    400	}
    401
    402done:
    403	IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
    404	if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
    405		cq_uk->polarity = !cq_uk->polarity;
    406	/* update cq tail in cq shadow memory also */
    407	IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
    408	set_64bit_val(cq_uk->shadow_area, 0,
    409		      IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
    410
    411	return ret;
    412}
    413
    414/**
    415 * irdma_puda_send - complete send wqe for transmit
    416 * @qp: puda qp for send
    417 * @info: buffer information for transmit
    418 */
    419int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
    420{
    421	__le64 *wqe;
    422	u32 iplen, l4len;
    423	u64 hdr[2];
    424	u32 wqe_idx;
    425	u8 iipt;
    426
    427	/* number of 32 bits DWORDS in header */
    428	l4len = info->tcplen >> 2;
    429	if (info->ipv4) {
    430		iipt = 3;
    431		iplen = 5;
    432	} else {
    433		iipt = 1;
    434		iplen = 10;
    435	}
    436
    437	wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
    438	if (!wqe)
    439		return -ENOMEM;
    440
    441	qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
    442	/* Third line of WQE descriptor */
    443	/* maclen is in words */
    444
    445	if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
    446		hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
    447		hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
    448			 FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
    449			 FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
    450			 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
    451			 FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
    452				    qp->qp_uk.swqe_polarity);
    453
    454		/* Forth line of WQE descriptor */
    455
    456		set_64bit_val(wqe, 0, info->paddr);
    457		set_64bit_val(wqe, 8,
    458			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
    459			      FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
    460	} else {
    461		hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
    462			 FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
    463			 FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
    464			 FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
    465			 FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
    466
    467		hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
    468			 FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
    469			 FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
    470			 FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
    471
    472		/* Forth line of WQE descriptor */
    473
    474		set_64bit_val(wqe, 0, info->paddr);
    475		set_64bit_val(wqe, 8,
    476			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
    477	}
    478
    479	set_64bit_val(wqe, 16, hdr[0]);
    480	dma_wmb(); /* make sure WQE is written before valid bit is set */
    481
    482	set_64bit_val(wqe, 24, hdr[1]);
    483
    484	print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
    485			     wqe, 32, false);
    486	irdma_uk_qp_post_wr(&qp->qp_uk);
    487	return 0;
    488}
    489
    490/**
    491 * irdma_puda_send_buf - transmit puda buffer
    492 * @rsrc: resource to use for buffer
    493 * @buf: puda buffer to transmit
    494 */
    495void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
    496			 struct irdma_puda_buf *buf)
    497{
    498	struct irdma_puda_send_info info;
    499	int ret = 0;
    500	unsigned long flags;
    501
    502	spin_lock_irqsave(&rsrc->bufpool_lock, flags);
    503	/* if no wqe available or not from a completion and we have
    504	 * pending buffers, we must queue new buffer
    505	 */
    506	if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
    507		list_add_tail(&buf->list, &rsrc->txpend);
    508		spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
    509		rsrc->stats_sent_pkt_q++;
    510		if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
    511			ibdev_dbg(to_ibdev(rsrc->dev),
    512				  "PUDA: adding to txpend\n");
    513		return;
    514	}
    515	rsrc->tx_wqe_avail_cnt--;
    516	/* if we are coming from a completion and have pending buffers
    517	 * then Get one from pending list
    518	 */
    519	if (!buf) {
    520		buf = irdma_puda_get_listbuf(&rsrc->txpend);
    521		if (!buf)
    522			goto done;
    523	}
    524
    525	info.scratch = buf;
    526	info.paddr = buf->mem.pa;
    527	info.len = buf->totallen;
    528	info.tcplen = buf->tcphlen;
    529	info.ipv4 = buf->ipv4;
    530
    531	if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
    532		info.ah_id = buf->ah_id;
    533	} else {
    534		info.maclen = buf->maclen;
    535		info.do_lpb = buf->do_lpb;
    536	}
    537
    538	/* Synch buffer for use by device */
    539	dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
    540				buf->mem.size, DMA_BIDIRECTIONAL);
    541	ret = irdma_puda_send(&rsrc->qp, &info);
    542	if (ret) {
    543		rsrc->tx_wqe_avail_cnt++;
    544		rsrc->stats_sent_pkt_q++;
    545		list_add(&buf->list, &rsrc->txpend);
    546		if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
    547			ibdev_dbg(to_ibdev(rsrc->dev),
    548				  "PUDA: adding to puda_send\n");
    549	} else {
    550		rsrc->stats_pkt_sent++;
    551	}
    552done:
    553	spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
    554}
    555
    556/**
    557 * irdma_puda_qp_setctx - during init, set qp's context
    558 * @rsrc: qp's resource
    559 */
    560static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
    561{
    562	struct irdma_sc_qp *qp = &rsrc->qp;
    563	__le64 *qp_ctx = qp->hw_host_ctx;
    564
    565	set_64bit_val(qp_ctx, 8, qp->sq_pa);
    566	set_64bit_val(qp_ctx, 16, qp->rq_pa);
    567	set_64bit_val(qp_ctx, 24,
    568		      FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
    569		      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
    570	set_64bit_val(qp_ctx, 48,
    571		      FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
    572	set_64bit_val(qp_ctx, 56, 0);
    573	if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
    574		set_64bit_val(qp_ctx, 64, 1);
    575	set_64bit_val(qp_ctx, 136,
    576		      FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
    577		      FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
    578	set_64bit_val(qp_ctx, 144,
    579		      FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
    580	set_64bit_val(qp_ctx, 160,
    581		      FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
    582		      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
    583	set_64bit_val(qp_ctx, 168,
    584		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
    585	set_64bit_val(qp_ctx, 176,
    586		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
    587		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
    588		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
    589
    590	print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
    591			     8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
    592}
    593
    594/**
    595 * irdma_puda_qp_wqe - setup wqe for qp create
    596 * @dev: Device
    597 * @qp: Resource qp
    598 */
    599static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
    600{
    601	struct irdma_sc_cqp *cqp;
    602	__le64 *wqe;
    603	u64 hdr;
    604	struct irdma_ccq_cqe_info compl_info;
    605	int status = 0;
    606
    607	cqp = dev->cqp;
    608	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
    609	if (!wqe)
    610		return -ENOMEM;
    611
    612	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
    613	set_64bit_val(wqe, 40, qp->shadow_area_pa);
    614
    615	hdr = qp->qp_uk.qp_id |
    616	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
    617	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
    618	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
    619	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
    620	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    621	dma_wmb(); /* make sure WQE is written before valid bit is set */
    622
    623	set_64bit_val(wqe, 24, hdr);
    624
    625	print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
    626			     8, wqe, 40, false);
    627	irdma_sc_cqp_post_sq(cqp);
    628	status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
    629					       &compl_info);
    630
    631	return status;
    632}
    633
    634/**
    635 * irdma_puda_qp_create - create qp for resource
    636 * @rsrc: resource to use for buffer
    637 */
    638static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
    639{
    640	struct irdma_sc_qp *qp = &rsrc->qp;
    641	struct irdma_qp_uk *ukqp = &qp->qp_uk;
    642	int ret = 0;
    643	u32 sq_size, rq_size;
    644	struct irdma_dma_mem *mem;
    645
    646	sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
    647	rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
    648	rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
    649				 IRDMA_HW_PAGE_SIZE);
    650	rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
    651					    rsrc->qpmem.size, &rsrc->qpmem.pa,
    652					    GFP_KERNEL);
    653	if (!rsrc->qpmem.va)
    654		return -ENOMEM;
    655
    656	mem = &rsrc->qpmem;
    657	memset(mem->va, 0, rsrc->qpmem.size);
    658	qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
    659	qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
    660	qp->pd = &rsrc->sc_pd;
    661	qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
    662	qp->dev = rsrc->dev;
    663	qp->qp_uk.back_qp = rsrc;
    664	qp->sq_pa = mem->pa;
    665	qp->rq_pa = qp->sq_pa + sq_size;
    666	qp->vsi = rsrc->vsi;
    667	ukqp->sq_base = mem->va;
    668	ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
    669	ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
    670	ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
    671	qp->shadow_area_pa = qp->rq_pa + rq_size;
    672	qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
    673	qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
    674	qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
    675	ukqp->qp_id = rsrc->qp_id;
    676	ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
    677	ukqp->rq_wrid_array = rsrc->rq_wrid_array;
    678	ukqp->sq_size = rsrc->sq_size;
    679	ukqp->rq_size = rsrc->rq_size;
    680
    681	IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
    682	IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
    683	IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
    684	ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
    685
    686	ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
    687	if (ret) {
    688		dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
    689				  rsrc->qpmem.va, rsrc->qpmem.pa);
    690		rsrc->qpmem.va = NULL;
    691		return ret;
    692	}
    693
    694	irdma_qp_add_qos(qp);
    695	irdma_puda_qp_setctx(rsrc);
    696
    697	if (rsrc->dev->ceq_valid)
    698		ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
    699	else
    700		ret = irdma_puda_qp_wqe(rsrc->dev, qp);
    701	if (ret) {
    702		irdma_qp_rem_qos(qp);
    703		rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
    704		dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
    705				  rsrc->qpmem.va, rsrc->qpmem.pa);
    706		rsrc->qpmem.va = NULL;
    707	}
    708
    709	return ret;
    710}
    711
    712/**
    713 * irdma_puda_cq_wqe - setup wqe for CQ create
    714 * @dev: Device
    715 * @cq: resource for cq
    716 */
    717static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
    718{
    719	__le64 *wqe;
    720	struct irdma_sc_cqp *cqp;
    721	u64 hdr;
    722	struct irdma_ccq_cqe_info compl_info;
    723	int status = 0;
    724
    725	cqp = dev->cqp;
    726	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
    727	if (!wqe)
    728		return -ENOMEM;
    729
    730	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
    731	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
    732	set_64bit_val(wqe, 16,
    733		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
    734	set_64bit_val(wqe, 32, cq->cq_pa);
    735	set_64bit_val(wqe, 40, cq->shadow_area_pa);
    736	set_64bit_val(wqe, 56,
    737		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
    738		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
    739
    740	hdr = cq->cq_uk.cq_id |
    741	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
    742	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
    743	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
    744	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
    745	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    746	dma_wmb(); /* make sure WQE is written before valid bit is set */
    747
    748	set_64bit_val(wqe, 24, hdr);
    749
    750	print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
    751			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    752	irdma_sc_cqp_post_sq(dev->cqp);
    753	status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
    754					       &compl_info);
    755	if (!status) {
    756		struct irdma_sc_ceq *ceq = dev->ceq[0];
    757
    758		if (ceq && ceq->reg_cq)
    759			status = irdma_sc_add_cq_ctx(ceq, cq);
    760	}
    761
    762	return status;
    763}
    764
    765/**
    766 * irdma_puda_cq_create - create cq for resource
    767 * @rsrc: resource for which cq to create
    768 */
    769static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
    770{
    771	struct irdma_sc_dev *dev = rsrc->dev;
    772	struct irdma_sc_cq *cq = &rsrc->cq;
    773	int ret = 0;
    774	u32 cqsize;
    775	struct irdma_dma_mem *mem;
    776	struct irdma_cq_init_info info = {};
    777	struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
    778
    779	cq->vsi = rsrc->vsi;
    780	cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
    781	rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
    782				 IRDMA_CQ0_ALIGNMENT);
    783	rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
    784					    &rsrc->cqmem.pa, GFP_KERNEL);
    785	if (!rsrc->cqmem.va)
    786		return -ENOMEM;
    787
    788	mem = &rsrc->cqmem;
    789	info.dev = dev;
    790	info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
    791		    IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
    792	info.shadow_read_threshold = rsrc->cq_size >> 2;
    793	info.cq_base_pa = mem->pa;
    794	info.shadow_area_pa = mem->pa + cqsize;
    795	init_info->cq_base = mem->va;
    796	init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
    797	init_info->cq_size = rsrc->cq_size;
    798	init_info->cq_id = rsrc->cq_id;
    799	info.ceqe_mask = true;
    800	info.ceq_id_valid = true;
    801	info.vsi = rsrc->vsi;
    802
    803	ret = irdma_sc_cq_init(cq, &info);
    804	if (ret)
    805		goto error;
    806
    807	if (rsrc->dev->ceq_valid)
    808		ret = irdma_cqp_cq_create_cmd(dev, cq);
    809	else
    810		ret = irdma_puda_cq_wqe(dev, cq);
    811error:
    812	if (ret) {
    813		dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
    814				  rsrc->cqmem.va, rsrc->cqmem.pa);
    815		rsrc->cqmem.va = NULL;
    816	}
    817
    818	return ret;
    819}
    820
    821/**
    822 * irdma_puda_free_qp - free qp for resource
    823 * @rsrc: resource for which qp to free
    824 */
    825static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
    826{
    827	int ret;
    828	struct irdma_ccq_cqe_info compl_info;
    829	struct irdma_sc_dev *dev = rsrc->dev;
    830
    831	if (rsrc->dev->ceq_valid) {
    832		irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
    833		rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
    834		return;
    835	}
    836
    837	ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
    838	if (ret)
    839		ibdev_dbg(to_ibdev(dev),
    840			  "PUDA: error puda qp destroy wqe, status = %d\n",
    841			  ret);
    842	if (!ret) {
    843		ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
    844						    &compl_info);
    845		if (ret)
    846			ibdev_dbg(to_ibdev(dev),
    847				  "PUDA: error puda qp destroy failed, status = %d\n",
    848				  ret);
    849	}
    850	rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
    851}
    852
    853/**
    854 * irdma_puda_free_cq - free cq for resource
    855 * @rsrc: resource for which cq to free
    856 */
    857static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
    858{
    859	int ret;
    860	struct irdma_ccq_cqe_info compl_info;
    861	struct irdma_sc_dev *dev = rsrc->dev;
    862
    863	if (rsrc->dev->ceq_valid) {
    864		irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
    865		return;
    866	}
    867
    868	ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
    869	if (ret)
    870		ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
    871	if (!ret) {
    872		ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
    873						    &compl_info);
    874		if (ret)
    875			ibdev_dbg(to_ibdev(dev),
    876				  "PUDA: error ieq qp destroy done\n");
    877	}
    878}
    879
    880/**
    881 * irdma_puda_dele_rsrc - delete all resources during close
    882 * @vsi: VSI structure of device
    883 * @type: type of resource to dele
    884 * @reset: true if reset chip
    885 */
    886void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
    887			  bool reset)
    888{
    889	struct irdma_sc_dev *dev = vsi->dev;
    890	struct irdma_puda_rsrc *rsrc;
    891	struct irdma_puda_buf *buf = NULL;
    892	struct irdma_puda_buf *nextbuf = NULL;
    893	struct irdma_virt_mem *vmem;
    894	struct irdma_sc_ceq *ceq;
    895
    896	ceq = vsi->dev->ceq[0];
    897	switch (type) {
    898	case IRDMA_PUDA_RSRC_TYPE_ILQ:
    899		rsrc = vsi->ilq;
    900		vmem = &vsi->ilq_mem;
    901		vsi->ilq = NULL;
    902		if (ceq && ceq->reg_cq)
    903			irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
    904		break;
    905	case IRDMA_PUDA_RSRC_TYPE_IEQ:
    906		rsrc = vsi->ieq;
    907		vmem = &vsi->ieq_mem;
    908		vsi->ieq = NULL;
    909		if (ceq && ceq->reg_cq)
    910			irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
    911		break;
    912	default:
    913		ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
    914			  type);
    915		return;
    916	}
    917
    918	switch (rsrc->cmpl) {
    919	case PUDA_HASH_CRC_COMPLETE:
    920		irdma_free_hash_desc(rsrc->hash_desc);
    921		fallthrough;
    922	case PUDA_QP_CREATED:
    923		irdma_qp_rem_qos(&rsrc->qp);
    924
    925		if (!reset)
    926			irdma_puda_free_qp(rsrc);
    927
    928		dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
    929				  rsrc->qpmem.va, rsrc->qpmem.pa);
    930		rsrc->qpmem.va = NULL;
    931		fallthrough;
    932	case PUDA_CQ_CREATED:
    933		if (!reset)
    934			irdma_puda_free_cq(rsrc);
    935
    936		dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
    937				  rsrc->cqmem.va, rsrc->cqmem.pa);
    938		rsrc->cqmem.va = NULL;
    939		break;
    940	default:
    941		ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
    942		break;
    943	}
    944	/* Free all allocated puda buffers for both tx and rx */
    945	buf = rsrc->alloclist;
    946	while (buf) {
    947		nextbuf = buf->next;
    948		irdma_puda_dele_buf(dev, buf);
    949		buf = nextbuf;
    950		rsrc->alloc_buf_count--;
    951	}
    952
    953	kfree(vmem->va);
    954}
    955
    956/**
    957 * irdma_puda_allocbufs - allocate buffers for resource
    958 * @rsrc: resource for buffer allocation
    959 * @count: number of buffers to create
    960 */
    961static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
    962{
    963	u32 i;
    964	struct irdma_puda_buf *buf;
    965	struct irdma_puda_buf *nextbuf;
    966
    967	for (i = 0; i < count; i++) {
    968		buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
    969		if (!buf) {
    970			rsrc->stats_buf_alloc_fail++;
    971			return -ENOMEM;
    972		}
    973		irdma_puda_ret_bufpool(rsrc, buf);
    974		rsrc->alloc_buf_count++;
    975		if (!rsrc->alloclist) {
    976			rsrc->alloclist = buf;
    977		} else {
    978			nextbuf = rsrc->alloclist;
    979			rsrc->alloclist = buf;
    980			buf->next = nextbuf;
    981		}
    982	}
    983
    984	rsrc->avail_buf_count = rsrc->alloc_buf_count;
    985
    986	return 0;
    987}
    988
    989/**
    990 * irdma_puda_create_rsrc - create resource (ilq or ieq)
    991 * @vsi: sc VSI struct
    992 * @info: resource information
    993 */
    994int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
    995			   struct irdma_puda_rsrc_info *info)
    996{
    997	struct irdma_sc_dev *dev = vsi->dev;
    998	int ret = 0;
    999	struct irdma_puda_rsrc *rsrc;
   1000	u32 pudasize;
   1001	u32 sqwridsize, rqwridsize;
   1002	struct irdma_virt_mem *vmem;
   1003
   1004	info->count = 1;
   1005	pudasize = sizeof(struct irdma_puda_rsrc);
   1006	sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
   1007	rqwridsize = info->rq_size * 8;
   1008	switch (info->type) {
   1009	case IRDMA_PUDA_RSRC_TYPE_ILQ:
   1010		vmem = &vsi->ilq_mem;
   1011		break;
   1012	case IRDMA_PUDA_RSRC_TYPE_IEQ:
   1013		vmem = &vsi->ieq_mem;
   1014		break;
   1015	default:
   1016		return -EOPNOTSUPP;
   1017	}
   1018	vmem->size = pudasize + sqwridsize + rqwridsize;
   1019	vmem->va = kzalloc(vmem->size, GFP_KERNEL);
   1020	if (!vmem->va)
   1021		return -ENOMEM;
   1022
   1023	rsrc = vmem->va;
   1024	spin_lock_init(&rsrc->bufpool_lock);
   1025	switch (info->type) {
   1026	case IRDMA_PUDA_RSRC_TYPE_ILQ:
   1027		vsi->ilq = vmem->va;
   1028		vsi->ilq_count = info->count;
   1029		rsrc->receive = info->receive;
   1030		rsrc->xmit_complete = info->xmit_complete;
   1031		break;
   1032	case IRDMA_PUDA_RSRC_TYPE_IEQ:
   1033		vsi->ieq_count = info->count;
   1034		vsi->ieq = vmem->va;
   1035		rsrc->receive = irdma_ieq_receive;
   1036		rsrc->xmit_complete = irdma_ieq_tx_compl;
   1037		break;
   1038	default:
   1039		return -EOPNOTSUPP;
   1040	}
   1041
   1042	rsrc->type = info->type;
   1043	rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
   1044			       ((u8 *)vmem->va + pudasize);
   1045	rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
   1046	/* Initialize all ieq lists */
   1047	INIT_LIST_HEAD(&rsrc->bufpool);
   1048	INIT_LIST_HEAD(&rsrc->txpend);
   1049
   1050	rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
   1051	irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
   1052	rsrc->qp_id = info->qp_id;
   1053	rsrc->cq_id = info->cq_id;
   1054	rsrc->sq_size = info->sq_size;
   1055	rsrc->rq_size = info->rq_size;
   1056	rsrc->cq_size = info->rq_size + info->sq_size;
   1057	if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   1058		if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
   1059			rsrc->cq_size += info->rq_size;
   1060	}
   1061	rsrc->buf_size = info->buf_size;
   1062	rsrc->dev = dev;
   1063	rsrc->vsi = vsi;
   1064	rsrc->stats_idx = info->stats_idx;
   1065	rsrc->stats_idx_valid = info->stats_idx_valid;
   1066
   1067	ret = irdma_puda_cq_create(rsrc);
   1068	if (!ret) {
   1069		rsrc->cmpl = PUDA_CQ_CREATED;
   1070		ret = irdma_puda_qp_create(rsrc);
   1071	}
   1072	if (ret) {
   1073		ibdev_dbg(to_ibdev(dev),
   1074			  "PUDA: error qp_create type=%d, status=%d\n",
   1075			  rsrc->type, ret);
   1076		goto error;
   1077	}
   1078	rsrc->cmpl = PUDA_QP_CREATED;
   1079
   1080	ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
   1081	if (ret) {
   1082		ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
   1083		goto error;
   1084	}
   1085
   1086	rsrc->rxq_invalid_cnt = info->rq_size;
   1087	ret = irdma_puda_replenish_rq(rsrc, true);
   1088	if (ret)
   1089		goto error;
   1090
   1091	if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
   1092		if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
   1093			rsrc->check_crc = true;
   1094			rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
   1095			ret = 0;
   1096		}
   1097	}
   1098
   1099	irdma_sc_ccq_arm(&rsrc->cq);
   1100	return ret;
   1101
   1102error:
   1103	irdma_puda_dele_rsrc(vsi, info->type, false);
   1104
   1105	return ret;
   1106}
   1107
   1108/**
   1109 * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
   1110 * @qp: ilq's qp resource
   1111 * @buf: puda buffer for rcv q
   1112 * @wqe_idx:  wqe index of completed rcvbuf
   1113 */
   1114static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
   1115				     struct irdma_puda_buf *buf, u32 wqe_idx)
   1116{
   1117	__le64 *wqe;
   1118	u64 offset8, offset24;
   1119
   1120	/* Synch buffer for use by device */
   1121	dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
   1122				   buf->mem.size, DMA_BIDIRECTIONAL);
   1123	wqe = qp->qp_uk.rq_base[wqe_idx].elem;
   1124	get_64bit_val(wqe, 24, &offset24);
   1125	if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   1126		get_64bit_val(wqe, 8, &offset8);
   1127		if (offset24)
   1128			offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
   1129		else
   1130			offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
   1131		set_64bit_val(wqe, 8, offset8);
   1132		dma_wmb(); /* make sure WQE is written before valid bit is set */
   1133	}
   1134	if (offset24)
   1135		offset24 = 0;
   1136	else
   1137		offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
   1138
   1139	set_64bit_val(wqe, 24, offset24);
   1140}
   1141
   1142/**
   1143 * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
   1144 * @pfpdu: pointer to fpdu
   1145 * @datap: pointer to data in the buffer
   1146 * @rcv_seq: seqnum of the data buffer
   1147 */
   1148static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
   1149				  u32 rcv_seq)
   1150{
   1151	u32 marker_seq, end_seq, blk_start;
   1152	u8 marker_len = pfpdu->marker_len;
   1153	u16 total_len = 0;
   1154	u16 fpdu_len;
   1155
   1156	blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
   1157	if (!blk_start) {
   1158		total_len = marker_len;
   1159		marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
   1160		if (marker_len && *(u32 *)datap)
   1161			return 0;
   1162	} else {
   1163		marker_seq = rcv_seq + blk_start;
   1164	}
   1165
   1166	datap += total_len;
   1167	fpdu_len = ntohs(*(__be16 *)datap);
   1168	fpdu_len += IRDMA_IEQ_MPA_FRAMING;
   1169	fpdu_len = (fpdu_len + 3) & 0xfffc;
   1170
   1171	if (fpdu_len > pfpdu->max_fpdu_data)
   1172		return 0;
   1173
   1174	total_len += fpdu_len;
   1175	end_seq = rcv_seq + total_len;
   1176	while ((int)(marker_seq - end_seq) < 0) {
   1177		total_len += marker_len;
   1178		end_seq += marker_len;
   1179		marker_seq += IRDMA_MRK_BLK_SZ;
   1180	}
   1181
   1182	return total_len;
   1183}
   1184
   1185/**
   1186 * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
   1187 * @buf: rcv buffer with partial
   1188 * @txbuf: tx buffer for sending back
   1189 * @buf_offset: rcv buffer offset to copy from
   1190 * @txbuf_offset: at offset in tx buf to copy
   1191 * @len: length of data to copy
   1192 */
   1193static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
   1194				    struct irdma_puda_buf *txbuf,
   1195				    u16 buf_offset, u32 txbuf_offset, u32 len)
   1196{
   1197	void *mem1 = (u8 *)buf->mem.va + buf_offset;
   1198	void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
   1199
   1200	memcpy(mem2, mem1, len);
   1201}
   1202
   1203/**
   1204 * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
   1205 * @buf: reeive buffer with partial
   1206 * @txbuf: buffer to prepare
   1207 */
   1208static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
   1209				   struct irdma_puda_buf *txbuf)
   1210{
   1211	txbuf->tcphlen = buf->tcphlen;
   1212	txbuf->ipv4 = buf->ipv4;
   1213
   1214	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   1215		txbuf->hdrlen = txbuf->tcphlen;
   1216		irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
   1217					txbuf->hdrlen);
   1218	} else {
   1219		txbuf->maclen = buf->maclen;
   1220		txbuf->hdrlen = buf->hdrlen;
   1221		irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
   1222	}
   1223}
   1224
   1225/**
   1226 * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
   1227 * @buf: receive exception buffer
   1228 * @fps: first partial sequence number
   1229 */
   1230static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
   1231{
   1232	u32 offset;
   1233
   1234	if (buf->seqnum < fps) {
   1235		offset = fps - buf->seqnum;
   1236		if (offset > buf->datalen)
   1237			return;
   1238		buf->data += offset;
   1239		buf->datalen -= (u16)offset;
   1240		buf->seqnum = fps;
   1241	}
   1242}
   1243
   1244/**
   1245 * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
   1246 * @ieq: ieq resource
   1247 * @rxlist: ieq's received buffer list
   1248 * @pbufl: temporary list for buffers for fpddu
   1249 * @txbuf: tx buffer for fpdu
   1250 * @fpdu_len: total length of fpdu
   1251 */
   1252static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
   1253				  struct list_head *rxlist,
   1254				  struct list_head *pbufl,
   1255				  struct irdma_puda_buf *txbuf, u16 fpdu_len)
   1256{
   1257	struct irdma_puda_buf *buf;
   1258	u32 nextseqnum;
   1259	u16 txoffset, bufoffset;
   1260
   1261	buf = irdma_puda_get_listbuf(pbufl);
   1262	if (!buf)
   1263		return;
   1264
   1265	nextseqnum = buf->seqnum + fpdu_len;
   1266	irdma_ieq_setup_tx_buf(buf, txbuf);
   1267	if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   1268		txoffset = txbuf->hdrlen;
   1269		txbuf->totallen = txbuf->hdrlen + fpdu_len;
   1270		txbuf->data = (u8 *)txbuf->mem.va + txoffset;
   1271	} else {
   1272		txoffset = buf->hdrlen;
   1273		txbuf->totallen = buf->hdrlen + fpdu_len;
   1274		txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
   1275	}
   1276	bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
   1277
   1278	do {
   1279		if (buf->datalen >= fpdu_len) {
   1280			/* copied full fpdu */
   1281			irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
   1282						fpdu_len);
   1283			buf->datalen -= fpdu_len;
   1284			buf->data += fpdu_len;
   1285			buf->seqnum = nextseqnum;
   1286			break;
   1287		}
   1288		/* copy partial fpdu */
   1289		irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
   1290					buf->datalen);
   1291		txoffset += buf->datalen;
   1292		fpdu_len -= buf->datalen;
   1293		irdma_puda_ret_bufpool(ieq, buf);
   1294		buf = irdma_puda_get_listbuf(pbufl);
   1295		if (!buf)
   1296			return;
   1297
   1298		bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
   1299	} while (1);
   1300
   1301	/* last buffer on the list*/
   1302	if (buf->datalen)
   1303		list_add(&buf->list, rxlist);
   1304	else
   1305		irdma_puda_ret_bufpool(ieq, buf);
   1306}
   1307
   1308/**
   1309 * irdma_ieq_create_pbufl - create buffer list for single fpdu
   1310 * @pfpdu: pointer to fpdu
   1311 * @rxlist: resource list for receive ieq buffes
   1312 * @pbufl: temp. list for buffers for fpddu
   1313 * @buf: first receive buffer
   1314 * @fpdu_len: total length of fpdu
   1315 */
   1316static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
   1317				  struct list_head *rxlist,
   1318				  struct list_head *pbufl,
   1319				  struct irdma_puda_buf *buf, u16 fpdu_len)
   1320{
   1321	int status = 0;
   1322	struct irdma_puda_buf *nextbuf;
   1323	u32 nextseqnum;
   1324	u16 plen = fpdu_len - buf->datalen;
   1325	bool done = false;
   1326
   1327	nextseqnum = buf->seqnum + buf->datalen;
   1328	do {
   1329		nextbuf = irdma_puda_get_listbuf(rxlist);
   1330		if (!nextbuf) {
   1331			status = -ENOBUFS;
   1332			break;
   1333		}
   1334		list_add_tail(&nextbuf->list, pbufl);
   1335		if (nextbuf->seqnum != nextseqnum) {
   1336			pfpdu->bad_seq_num++;
   1337			status = -ERANGE;
   1338			break;
   1339		}
   1340		if (nextbuf->datalen >= plen) {
   1341			done = true;
   1342		} else {
   1343			plen -= nextbuf->datalen;
   1344			nextseqnum = nextbuf->seqnum + nextbuf->datalen;
   1345		}
   1346
   1347	} while (!done);
   1348
   1349	return status;
   1350}
   1351
   1352/**
   1353 * irdma_ieq_handle_partial - process partial fpdu buffer
   1354 * @ieq: ieq resource
   1355 * @pfpdu: partial management per user qp
   1356 * @buf: receive buffer
   1357 * @fpdu_len: fpdu len in the buffer
   1358 */
   1359static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
   1360				    struct irdma_pfpdu *pfpdu,
   1361				    struct irdma_puda_buf *buf, u16 fpdu_len)
   1362{
   1363	int status = 0;
   1364	u8 *crcptr;
   1365	u32 mpacrc;
   1366	u32 seqnum = buf->seqnum;
   1367	struct list_head pbufl; /* partial buffer list */
   1368	struct irdma_puda_buf *txbuf = NULL;
   1369	struct list_head *rxlist = &pfpdu->rxlist;
   1370
   1371	ieq->partials_handled++;
   1372
   1373	INIT_LIST_HEAD(&pbufl);
   1374	list_add(&buf->list, &pbufl);
   1375
   1376	status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
   1377	if (status)
   1378		goto error;
   1379
   1380	txbuf = irdma_puda_get_bufpool(ieq);
   1381	if (!txbuf) {
   1382		pfpdu->no_tx_bufs++;
   1383		status = -ENOBUFS;
   1384		goto error;
   1385	}
   1386
   1387	irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
   1388	irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
   1389
   1390	crcptr = txbuf->data + fpdu_len - 4;
   1391	mpacrc = *(u32 *)crcptr;
   1392	if (ieq->check_crc) {
   1393		status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
   1394						(fpdu_len - 4), mpacrc);
   1395		if (status) {
   1396			ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
   1397			goto error;
   1398		}
   1399	}
   1400
   1401	print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
   1402			     txbuf->mem.va, txbuf->totallen, false);
   1403	if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
   1404		txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
   1405	txbuf->do_lpb = true;
   1406	irdma_puda_send_buf(ieq, txbuf);
   1407	pfpdu->rcv_nxt = seqnum + fpdu_len;
   1408	return status;
   1409
   1410error:
   1411	while (!list_empty(&pbufl)) {
   1412		buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
   1413		list_move(&buf->list, rxlist);
   1414	}
   1415	if (txbuf)
   1416		irdma_puda_ret_bufpool(ieq, txbuf);
   1417
   1418	return status;
   1419}
   1420
   1421/**
   1422 * irdma_ieq_process_buf - process buffer rcvd for ieq
   1423 * @ieq: ieq resource
   1424 * @pfpdu: partial management per user qp
   1425 * @buf: receive buffer
   1426 */
   1427static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
   1428				 struct irdma_pfpdu *pfpdu,
   1429				 struct irdma_puda_buf *buf)
   1430{
   1431	u16 fpdu_len = 0;
   1432	u16 datalen = buf->datalen;
   1433	u8 *datap = buf->data;
   1434	u8 *crcptr;
   1435	u16 ioffset = 0;
   1436	u32 mpacrc;
   1437	u32 seqnum = buf->seqnum;
   1438	u16 len = 0;
   1439	u16 full = 0;
   1440	bool partial = false;
   1441	struct irdma_puda_buf *txbuf;
   1442	struct list_head *rxlist = &pfpdu->rxlist;
   1443	int ret = 0;
   1444
   1445	ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
   1446	while (datalen) {
   1447		fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
   1448		if (!fpdu_len) {
   1449			ibdev_dbg(to_ibdev(ieq->dev),
   1450				  "IEQ: error bad fpdu len\n");
   1451			list_add(&buf->list, rxlist);
   1452			return -EINVAL;
   1453		}
   1454
   1455		if (datalen < fpdu_len) {
   1456			partial = true;
   1457			break;
   1458		}
   1459		crcptr = datap + fpdu_len - 4;
   1460		mpacrc = *(u32 *)crcptr;
   1461		if (ieq->check_crc)
   1462			ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
   1463						     fpdu_len - 4, mpacrc);
   1464		if (ret) {
   1465			list_add(&buf->list, rxlist);
   1466			ibdev_dbg(to_ibdev(ieq->dev),
   1467				  "ERR: IRDMA_ERR_MPA_CRC\n");
   1468			return -EINVAL;
   1469		}
   1470		full++;
   1471		pfpdu->fpdu_processed++;
   1472		ieq->fpdu_processed++;
   1473		datap += fpdu_len;
   1474		len += fpdu_len;
   1475		datalen -= fpdu_len;
   1476	}
   1477	if (full) {
   1478		/* copy full pdu's in the txbuf and send them out */
   1479		txbuf = irdma_puda_get_bufpool(ieq);
   1480		if (!txbuf) {
   1481			pfpdu->no_tx_bufs++;
   1482			list_add(&buf->list, rxlist);
   1483			return -ENOBUFS;
   1484		}
   1485		/* modify txbuf's buffer header */
   1486		irdma_ieq_setup_tx_buf(buf, txbuf);
   1487		/* copy full fpdu's to new buffer */
   1488		if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   1489			irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
   1490						txbuf->hdrlen, len);
   1491			txbuf->totallen = txbuf->hdrlen + len;
   1492			txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
   1493		} else {
   1494			irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
   1495						buf->hdrlen, len);
   1496			txbuf->totallen = buf->hdrlen + len;
   1497		}
   1498		irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
   1499		print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
   1500				     16, 8, txbuf->mem.va, txbuf->totallen,
   1501				     false);
   1502		txbuf->do_lpb = true;
   1503		irdma_puda_send_buf(ieq, txbuf);
   1504
   1505		if (!datalen) {
   1506			pfpdu->rcv_nxt = buf->seqnum + len;
   1507			irdma_puda_ret_bufpool(ieq, buf);
   1508			return 0;
   1509		}
   1510		buf->data = datap;
   1511		buf->seqnum = seqnum + len;
   1512		buf->datalen = datalen;
   1513		pfpdu->rcv_nxt = buf->seqnum;
   1514	}
   1515	if (partial)
   1516		return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
   1517
   1518	return 0;
   1519}
   1520
   1521/**
   1522 * irdma_ieq_process_fpdus - process fpdu's buffers on its list
   1523 * @qp: qp for which partial fpdus
   1524 * @ieq: ieq resource
   1525 */
   1526void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
   1527			     struct irdma_puda_rsrc *ieq)
   1528{
   1529	struct irdma_pfpdu *pfpdu = &qp->pfpdu;
   1530	struct list_head *rxlist = &pfpdu->rxlist;
   1531	struct irdma_puda_buf *buf;
   1532	int status;
   1533
   1534	do {
   1535		if (list_empty(rxlist))
   1536			break;
   1537		buf = irdma_puda_get_listbuf(rxlist);
   1538		if (!buf) {
   1539			ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
   1540			break;
   1541		}
   1542		if (buf->seqnum != pfpdu->rcv_nxt) {
   1543			/* This could be out of order or missing packet */
   1544			pfpdu->out_of_order++;
   1545			list_add(&buf->list, rxlist);
   1546			break;
   1547		}
   1548		/* keep processing buffers from the head of the list */
   1549		status = irdma_ieq_process_buf(ieq, pfpdu, buf);
   1550		if (status == -EINVAL) {
   1551			pfpdu->mpa_crc_err = true;
   1552			while (!list_empty(rxlist)) {
   1553				buf = irdma_puda_get_listbuf(rxlist);
   1554				irdma_puda_ret_bufpool(ieq, buf);
   1555				pfpdu->crc_err++;
   1556				ieq->crc_err++;
   1557			}
   1558			/* create CQP for AE */
   1559			irdma_ieq_mpa_crc_ae(ieq->dev, qp);
   1560		}
   1561	} while (!status);
   1562}
   1563
   1564/**
   1565 * irdma_ieq_create_ah - create an address handle for IEQ
   1566 * @qp: qp pointer
   1567 * @buf: buf received on IEQ used to create AH
   1568 */
   1569static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
   1570{
   1571	struct irdma_ah_info ah_info = {};
   1572
   1573	qp->pfpdu.ah_buf = buf;
   1574	irdma_puda_ieq_get_ah_info(qp, &ah_info);
   1575	return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
   1576				    IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
   1577				    &qp->pfpdu.ah);
   1578}
   1579
   1580/**
   1581 * irdma_ieq_handle_exception - handle qp's exception
   1582 * @ieq: ieq resource
   1583 * @qp: qp receiving excpetion
   1584 * @buf: receive buffer
   1585 */
   1586static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
   1587				       struct irdma_sc_qp *qp,
   1588				       struct irdma_puda_buf *buf)
   1589{
   1590	struct irdma_pfpdu *pfpdu = &qp->pfpdu;
   1591	u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
   1592	u32 rcv_wnd = hw_host_ctx[23];
   1593	/* first partial seq # in q2 */
   1594	u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
   1595	struct list_head *rxlist = &pfpdu->rxlist;
   1596	unsigned long flags = 0;
   1597	u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
   1598
   1599	print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
   1600			     buf->mem.va, buf->totallen, false);
   1601
   1602	spin_lock_irqsave(&pfpdu->lock, flags);
   1603	pfpdu->total_ieq_bufs++;
   1604	if (pfpdu->mpa_crc_err) {
   1605		pfpdu->crc_err++;
   1606		goto error;
   1607	}
   1608	if (pfpdu->mode && fps != pfpdu->fps) {
   1609		/* clean up qp as it is new partial sequence */
   1610		irdma_ieq_cleanup_qp(ieq, qp);
   1611		ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
   1612		pfpdu->mode = false;
   1613	}
   1614
   1615	if (!pfpdu->mode) {
   1616		print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
   1617				     8, (u64 *)qp->q2_buf, 128, false);
   1618		/* First_Partial_Sequence_Number check */
   1619		pfpdu->rcv_nxt = fps;
   1620		pfpdu->fps = fps;
   1621		pfpdu->mode = true;
   1622		pfpdu->max_fpdu_data = (buf->ipv4) ?
   1623				       (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
   1624				       (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
   1625		pfpdu->pmode_count++;
   1626		ieq->pmode_count++;
   1627		INIT_LIST_HEAD(rxlist);
   1628		irdma_ieq_check_first_buf(buf, fps);
   1629	}
   1630
   1631	if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
   1632		pfpdu->bad_seq_num++;
   1633		ieq->bad_seq_num++;
   1634		goto error;
   1635	}
   1636
   1637	if (!list_empty(rxlist)) {
   1638		if (buf->seqnum != pfpdu->nextseqnum) {
   1639			irdma_send_ieq_ack(qp);
   1640			/* throw away out-of-order, duplicates*/
   1641			goto error;
   1642		}
   1643	}
   1644	/* Insert buf before head */
   1645	list_add_tail(&buf->list, rxlist);
   1646	pfpdu->nextseqnum = buf->seqnum + buf->datalen;
   1647	pfpdu->lastrcv_buf = buf;
   1648	if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
   1649		irdma_ieq_create_ah(qp, buf);
   1650		if (!pfpdu->ah)
   1651			goto error;
   1652		goto exit;
   1653	}
   1654	if (hw_rev == IRDMA_GEN_1)
   1655		irdma_ieq_process_fpdus(qp, ieq);
   1656	else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
   1657		irdma_ieq_process_fpdus(qp, ieq);
   1658exit:
   1659	spin_unlock_irqrestore(&pfpdu->lock, flags);
   1660
   1661	return;
   1662
   1663error:
   1664	irdma_puda_ret_bufpool(ieq, buf);
   1665	spin_unlock_irqrestore(&pfpdu->lock, flags);
   1666}
   1667
   1668/**
   1669 * irdma_ieq_receive - received exception buffer
   1670 * @vsi: VSI of device
   1671 * @buf: exception buffer received
   1672 */
   1673static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
   1674			      struct irdma_puda_buf *buf)
   1675{
   1676	struct irdma_puda_rsrc *ieq = vsi->ieq;
   1677	struct irdma_sc_qp *qp = NULL;
   1678	u32 wqe_idx = ieq->compl_rxwqe_idx;
   1679
   1680	qp = irdma_ieq_get_qp(vsi->dev, buf);
   1681	if (!qp) {
   1682		ieq->stats_bad_qp_id++;
   1683		irdma_puda_ret_bufpool(ieq, buf);
   1684	} else {
   1685		irdma_ieq_handle_exception(ieq, qp, buf);
   1686	}
   1687	/*
   1688	 * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
   1689	 * on which wqe_idx to start replenish rq
   1690	 */
   1691	if (!ieq->rxq_invalid_cnt)
   1692		ieq->rx_wqe_idx = wqe_idx;
   1693	ieq->rxq_invalid_cnt++;
   1694}
   1695
   1696/**
   1697 * irdma_ieq_tx_compl - put back after sending completed exception buffer
   1698 * @vsi: sc VSI struct
   1699 * @sqwrid: pointer to puda buffer
   1700 */
   1701static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
   1702{
   1703	struct irdma_puda_rsrc *ieq = vsi->ieq;
   1704	struct irdma_puda_buf *buf = sqwrid;
   1705
   1706	irdma_puda_ret_bufpool(ieq, buf);
   1707}
   1708
   1709/**
   1710 * irdma_ieq_cleanup_qp - qp is being destroyed
   1711 * @ieq: ieq resource
   1712 * @qp: all pending fpdu buffers
   1713 */
   1714void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
   1715{
   1716	struct irdma_puda_buf *buf;
   1717	struct irdma_pfpdu *pfpdu = &qp->pfpdu;
   1718	struct list_head *rxlist = &pfpdu->rxlist;
   1719
   1720	if (qp->pfpdu.ah) {
   1721		irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
   1722		qp->pfpdu.ah = NULL;
   1723		qp->pfpdu.ah_buf = NULL;
   1724	}
   1725
   1726	if (!pfpdu->mode)
   1727		return;
   1728
   1729	while (!list_empty(rxlist)) {
   1730		buf = irdma_puda_get_listbuf(rxlist);
   1731		irdma_puda_ret_bufpool(ieq, buf);
   1732	}
   1733}