cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cq.c (30396B)


      1/*
      2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *	  copyright notice, this list of conditions and the following
     16 *	  disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *	  copyright notice, this list of conditions and the following
     20 *	  disclaimer in the documentation and/or other materials
     21 *	  provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <rdma/uverbs_ioctl.h>
     34
     35#include "iw_cxgb4.h"
     36
     37static void destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
     38		       struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
     39		       struct c4iw_wr_wait *wr_waitp)
     40{
     41	struct fw_ri_res_wr *res_wr;
     42	struct fw_ri_res *res;
     43	int wr_len;
     44
     45	wr_len = sizeof(*res_wr) + sizeof(*res);
     46	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
     47
     48	res_wr = __skb_put_zero(skb, wr_len);
     49	res_wr->op_nres = cpu_to_be32(
     50			FW_WR_OP_V(FW_RI_RES_WR) |
     51			FW_RI_RES_WR_NRES_V(1) |
     52			FW_WR_COMPL_F);
     53	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
     54	res_wr->cookie = (uintptr_t)wr_waitp;
     55	res = res_wr->res;
     56	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
     57	res->u.cq.op = FW_RI_RES_OP_RESET;
     58	res->u.cq.iqid = cpu_to_be32(cq->cqid);
     59
     60	c4iw_init_wr_wait(wr_waitp);
     61	c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
     62
     63	kfree(cq->sw_queue);
     64	dma_free_coherent(&(rdev->lldi.pdev->dev),
     65			  cq->memsize, cq->queue,
     66			  dma_unmap_addr(cq, mapping));
     67	c4iw_put_cqid(rdev, cq->cqid, uctx);
     68}
     69
     70static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
     71		     struct c4iw_dev_ucontext *uctx,
     72		     struct c4iw_wr_wait *wr_waitp)
     73{
     74	struct fw_ri_res_wr *res_wr;
     75	struct fw_ri_res *res;
     76	int wr_len;
     77	int user = (uctx != &rdev->uctx);
     78	int ret;
     79	struct sk_buff *skb;
     80	struct c4iw_ucontext *ucontext = NULL;
     81
     82	if (user)
     83		ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
     84
     85	cq->cqid = c4iw_get_cqid(rdev, uctx);
     86	if (!cq->cqid) {
     87		ret = -ENOMEM;
     88		goto err1;
     89	}
     90
     91	if (!user) {
     92		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
     93		if (!cq->sw_queue) {
     94			ret = -ENOMEM;
     95			goto err2;
     96		}
     97	}
     98	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
     99				       &cq->dma_addr, GFP_KERNEL);
    100	if (!cq->queue) {
    101		ret = -ENOMEM;
    102		goto err3;
    103	}
    104	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
    105
    106	if (user && ucontext->is_32b_cqe) {
    107		cq->qp_errp = &((struct t4_status_page *)
    108		((u8 *)cq->queue + (cq->size - 1) *
    109		 (sizeof(*cq->queue) / 2)))->qp_err;
    110	} else {
    111		cq->qp_errp = &((struct t4_status_page *)
    112		((u8 *)cq->queue + (cq->size - 1) *
    113		 sizeof(*cq->queue)))->qp_err;
    114	}
    115
    116	/* build fw_ri_res_wr */
    117	wr_len = sizeof(*res_wr) + sizeof(*res);
    118
    119	skb = alloc_skb(wr_len, GFP_KERNEL);
    120	if (!skb) {
    121		ret = -ENOMEM;
    122		goto err4;
    123	}
    124	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
    125
    126	res_wr = __skb_put_zero(skb, wr_len);
    127	res_wr->op_nres = cpu_to_be32(
    128			FW_WR_OP_V(FW_RI_RES_WR) |
    129			FW_RI_RES_WR_NRES_V(1) |
    130			FW_WR_COMPL_F);
    131	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
    132	res_wr->cookie = (uintptr_t)wr_waitp;
    133	res = res_wr->res;
    134	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
    135	res->u.cq.op = FW_RI_RES_OP_WRITE;
    136	res->u.cq.iqid = cpu_to_be32(cq->cqid);
    137	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
    138			FW_RI_RES_WR_IQANUS_V(0) |
    139			FW_RI_RES_WR_IQANUD_V(1) |
    140			FW_RI_RES_WR_IQANDST_F |
    141			FW_RI_RES_WR_IQANDSTINDEX_V(
    142				rdev->lldi.ciq_ids[cq->vector]));
    143	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
    144			FW_RI_RES_WR_IQDROPRSS_F |
    145			FW_RI_RES_WR_IQPCIECH_V(2) |
    146			FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
    147			FW_RI_RES_WR_IQO_F |
    148			((user && ucontext->is_32b_cqe) ?
    149			 FW_RI_RES_WR_IQESIZE_V(1) :
    150			 FW_RI_RES_WR_IQESIZE_V(2)));
    151	res->u.cq.iqsize = cpu_to_be16(cq->size);
    152	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
    153
    154	c4iw_init_wr_wait(wr_waitp);
    155	ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
    156	if (ret)
    157		goto err4;
    158
    159	cq->gen = 1;
    160	cq->gts = rdev->lldi.gts_reg;
    161	cq->rdev = rdev;
    162
    163	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
    164				      &cq->bar2_qid,
    165				      user ? &cq->bar2_pa : NULL);
    166	if (user && !cq->bar2_pa) {
    167		pr_warn("%s: cqid %u not in BAR2 range\n",
    168			pci_name(rdev->lldi.pdev), cq->cqid);
    169		ret = -EINVAL;
    170		goto err4;
    171	}
    172	return 0;
    173err4:
    174	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
    175			  dma_unmap_addr(cq, mapping));
    176err3:
    177	kfree(cq->sw_queue);
    178err2:
    179	c4iw_put_cqid(rdev, cq->cqid, uctx);
    180err1:
    181	return ret;
    182}
    183
    184static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx)
    185{
    186	struct t4_cqe cqe;
    187
    188	pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
    189		 wq, cq, cq->sw_cidx, cq->sw_pidx);
    190	memset(&cqe, 0, sizeof(cqe));
    191	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
    192				 CQE_OPCODE_V(FW_RI_SEND) |
    193				 CQE_TYPE_V(0) |
    194				 CQE_SWCQE_V(1) |
    195				 CQE_QPID_V(wq->sq.qid));
    196	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
    197	if (srqidx)
    198		cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
    199	cq->sw_queue[cq->sw_pidx] = cqe;
    200	t4_swcq_produce(cq);
    201}
    202
    203int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
    204{
    205	int flushed = 0;
    206	int in_use = wq->rq.in_use - count;
    207
    208	pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
    209		 wq, cq, wq->rq.in_use, count);
    210	while (in_use--) {
    211		insert_recv_cqe(wq, cq, 0);
    212		flushed++;
    213	}
    214	return flushed;
    215}
    216
    217static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
    218			  struct t4_swsqe *swcqe)
    219{
    220	struct t4_cqe cqe;
    221
    222	pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
    223		 wq, cq, cq->sw_cidx, cq->sw_pidx);
    224	memset(&cqe, 0, sizeof(cqe));
    225	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
    226				 CQE_OPCODE_V(swcqe->opcode) |
    227				 CQE_TYPE_V(1) |
    228				 CQE_SWCQE_V(1) |
    229				 CQE_QPID_V(wq->sq.qid));
    230	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
    231	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
    232	cq->sw_queue[cq->sw_pidx] = cqe;
    233	t4_swcq_produce(cq);
    234}
    235
    236static void advance_oldest_read(struct t4_wq *wq);
    237
    238int c4iw_flush_sq(struct c4iw_qp *qhp)
    239{
    240	int flushed = 0;
    241	struct t4_wq *wq = &qhp->wq;
    242	struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
    243	struct t4_cq *cq = &chp->cq;
    244	int idx;
    245	struct t4_swsqe *swsqe;
    246
    247	if (wq->sq.flush_cidx == -1)
    248		wq->sq.flush_cidx = wq->sq.cidx;
    249	idx = wq->sq.flush_cidx;
    250	while (idx != wq->sq.pidx) {
    251		swsqe = &wq->sq.sw_sq[idx];
    252		swsqe->flushed = 1;
    253		insert_sq_cqe(wq, cq, swsqe);
    254		if (wq->sq.oldest_read == swsqe) {
    255			advance_oldest_read(wq);
    256		}
    257		flushed++;
    258		if (++idx == wq->sq.size)
    259			idx = 0;
    260	}
    261	wq->sq.flush_cidx += flushed;
    262	if (wq->sq.flush_cidx >= wq->sq.size)
    263		wq->sq.flush_cidx -= wq->sq.size;
    264	return flushed;
    265}
    266
    267static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
    268{
    269	struct t4_swsqe *swsqe;
    270	int cidx;
    271
    272	if (wq->sq.flush_cidx == -1)
    273		wq->sq.flush_cidx = wq->sq.cidx;
    274	cidx = wq->sq.flush_cidx;
    275
    276	while (cidx != wq->sq.pidx) {
    277		swsqe = &wq->sq.sw_sq[cidx];
    278		if (!swsqe->signaled) {
    279			if (++cidx == wq->sq.size)
    280				cidx = 0;
    281		} else if (swsqe->complete) {
    282
    283			/*
    284			 * Insert this completed cqe into the swcq.
    285			 */
    286			pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
    287				 cidx, cq->sw_pidx);
    288			swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
    289			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
    290			t4_swcq_produce(cq);
    291			swsqe->flushed = 1;
    292			if (++cidx == wq->sq.size)
    293				cidx = 0;
    294			wq->sq.flush_cidx = cidx;
    295		} else
    296			break;
    297	}
    298}
    299
    300static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
    301		struct t4_cqe *read_cqe)
    302{
    303	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
    304	read_cqe->len = htonl(wq->sq.oldest_read->read_len);
    305	read_cqe->header = htonl(CQE_QPID_V(CQE_QPID(hw_cqe)) |
    306			CQE_SWCQE_V(SW_CQE(hw_cqe)) |
    307			CQE_OPCODE_V(FW_RI_READ_REQ) |
    308			CQE_TYPE_V(1));
    309	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
    310}
    311
    312static void advance_oldest_read(struct t4_wq *wq)
    313{
    314
    315	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
    316
    317	if (rptr == wq->sq.size)
    318		rptr = 0;
    319	while (rptr != wq->sq.pidx) {
    320		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
    321
    322		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
    323			return;
    324		if (++rptr == wq->sq.size)
    325			rptr = 0;
    326	}
    327	wq->sq.oldest_read = NULL;
    328}
    329
    330/*
    331 * Move all CQEs from the HWCQ into the SWCQ.
    332 * Deal with out-of-order and/or completions that complete
    333 * prior unsignalled WRs.
    334 */
    335void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
    336{
    337	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
    338	struct c4iw_qp *qhp;
    339	struct t4_swsqe *swsqe;
    340	int ret;
    341
    342	pr_debug("cqid 0x%x\n", chp->cq.cqid);
    343	ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
    344
    345	/*
    346	 * This logic is similar to poll_cq(), but not quite the same
    347	 * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
    348	 * also do any translation magic that poll_cq() normally does.
    349	 */
    350	while (!ret) {
    351		qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
    352
    353		/*
    354		 * drop CQEs with no associated QP
    355		 */
    356		if (qhp == NULL)
    357			goto next_cqe;
    358
    359		if (flush_qhp != qhp) {
    360			spin_lock(&qhp->lock);
    361
    362			if (qhp->wq.flushed == 1)
    363				goto next_cqe;
    364		}
    365
    366		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
    367			goto next_cqe;
    368
    369		if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
    370
    371			/* If we have reached here because of async
    372			 * event or other error, and have egress error
    373			 * then drop
    374			 */
    375			if (CQE_TYPE(hw_cqe) == 1)
    376				goto next_cqe;
    377
    378			/* drop peer2peer RTR reads.
    379			 */
    380			if (CQE_WRID_STAG(hw_cqe) == 1)
    381				goto next_cqe;
    382
    383			/*
    384			 * Eat completions for unsignaled read WRs.
    385			 */
    386			if (!qhp->wq.sq.oldest_read->signaled) {
    387				advance_oldest_read(&qhp->wq);
    388				goto next_cqe;
    389			}
    390
    391			/*
    392			 * Don't write to the HWCQ, create a new read req CQE
    393			 * in local memory and move it into the swcq.
    394			 */
    395			create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
    396			hw_cqe = &read_cqe;
    397			advance_oldest_read(&qhp->wq);
    398		}
    399
    400		/* if its a SQ completion, then do the magic to move all the
    401		 * unsignaled and now in-order completions into the swcq.
    402		 */
    403		if (SQ_TYPE(hw_cqe)) {
    404			swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
    405			swsqe->cqe = *hw_cqe;
    406			swsqe->complete = 1;
    407			flush_completed_wrs(&qhp->wq, &chp->cq);
    408		} else {
    409			swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
    410			*swcqe = *hw_cqe;
    411			swcqe->header |= cpu_to_be32(CQE_SWCQE_V(1));
    412			t4_swcq_produce(&chp->cq);
    413		}
    414next_cqe:
    415		t4_hwcq_consume(&chp->cq);
    416		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
    417		if (qhp && flush_qhp != qhp)
    418			spin_unlock(&qhp->lock);
    419	}
    420}
    421
    422static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
    423{
    424	if (DRAIN_CQE(cqe)) {
    425		WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
    426		return 0;
    427	}
    428
    429	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
    430		return 0;
    431
    432	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
    433		return 0;
    434
    435	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
    436		return 0;
    437
    438	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
    439		return 0;
    440	return 1;
    441}
    442
    443void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
    444{
    445	struct t4_cqe *cqe;
    446	u32 ptr;
    447
    448	*count = 0;
    449	pr_debug("count zero %d\n", *count);
    450	ptr = cq->sw_cidx;
    451	while (ptr != cq->sw_pidx) {
    452		cqe = &cq->sw_queue[ptr];
    453		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
    454		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
    455			(*count)++;
    456		if (++ptr == cq->size)
    457			ptr = 0;
    458	}
    459	pr_debug("cq %p count %d\n", cq, *count);
    460}
    461
    462static void post_pending_srq_wrs(struct t4_srq *srq)
    463{
    464	struct t4_srq_pending_wr *pwr;
    465	u16 idx = 0;
    466
    467	while (srq->pending_in_use) {
    468		pwr = &srq->pending_wrs[srq->pending_cidx];
    469		srq->sw_rq[srq->pidx].wr_id = pwr->wr_id;
    470		srq->sw_rq[srq->pidx].valid = 1;
    471
    472		pr_debug("%s posting pending cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
    473			 __func__,
    474			 srq->cidx, srq->pidx, srq->wq_pidx,
    475			 srq->in_use, srq->size,
    476			 (unsigned long long)pwr->wr_id);
    477
    478		c4iw_copy_wr_to_srq(srq, &pwr->wqe, pwr->len16);
    479		t4_srq_consume_pending_wr(srq);
    480		t4_srq_produce(srq, pwr->len16);
    481		idx += DIV_ROUND_UP(pwr->len16 * 16, T4_EQ_ENTRY_SIZE);
    482	}
    483
    484	if (idx) {
    485		t4_ring_srq_db(srq, idx, pwr->len16, &pwr->wqe);
    486		srq->queue[srq->size].status.host_wq_pidx =
    487			srq->wq_pidx;
    488	}
    489}
    490
    491static u64 reap_srq_cqe(struct t4_cqe *hw_cqe, struct t4_srq *srq)
    492{
    493	int rel_idx = CQE_ABS_RQE_IDX(hw_cqe) - srq->rqt_abs_idx;
    494	u64 wr_id;
    495
    496	srq->sw_rq[rel_idx].valid = 0;
    497	wr_id = srq->sw_rq[rel_idx].wr_id;
    498
    499	if (rel_idx == srq->cidx) {
    500		pr_debug("%s in order cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u wr_id %llx\n",
    501			 __func__, rel_idx, srq->cidx, srq->pidx,
    502			 srq->wq_pidx, srq->in_use, srq->size,
    503			 (unsigned long long)srq->sw_rq[rel_idx].wr_id);
    504		t4_srq_consume(srq);
    505		while (srq->ooo_count && !srq->sw_rq[srq->cidx].valid) {
    506			pr_debug("%s eat ooo cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
    507				 __func__, srq->cidx, srq->pidx,
    508				 srq->wq_pidx, srq->in_use,
    509				 srq->size, srq->ooo_count,
    510				 (unsigned long long)
    511				 srq->sw_rq[srq->cidx].wr_id);
    512			t4_srq_consume_ooo(srq);
    513		}
    514		if (srq->ooo_count == 0 && srq->pending_in_use)
    515			post_pending_srq_wrs(srq);
    516	} else {
    517		pr_debug("%s ooo cqe rel_idx %u cidx %u pidx %u wq_pidx %u in_use %u rq_size %u ooo_count %u wr_id %llx\n",
    518			 __func__, rel_idx, srq->cidx,
    519			 srq->pidx, srq->wq_pidx,
    520			 srq->in_use, srq->size,
    521			 srq->ooo_count,
    522			 (unsigned long long)srq->sw_rq[rel_idx].wr_id);
    523		t4_srq_produce_ooo(srq);
    524	}
    525	return wr_id;
    526}
    527
    528/*
    529 * poll_cq
    530 *
    531 * Caller must:
    532 *     check the validity of the first CQE,
    533 *     supply the wq assicated with the qpid.
    534 *
    535 * credit: cq credit to return to sge.
    536 * cqe_flushed: 1 iff the CQE is flushed.
    537 * cqe: copy of the polled CQE.
    538 *
    539 * return value:
    540 *    0		    CQE returned ok.
    541 *    -EAGAIN       CQE skipped, try again.
    542 *    -EOVERFLOW    CQ overflow detected.
    543 */
    544static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
    545		   u8 *cqe_flushed, u64 *cookie, u32 *credit,
    546		   struct t4_srq *srq)
    547{
    548	int ret = 0;
    549	struct t4_cqe *hw_cqe, read_cqe;
    550
    551	*cqe_flushed = 0;
    552	*credit = 0;
    553	ret = t4_next_cqe(cq, &hw_cqe);
    554	if (ret)
    555		return ret;
    556
    557	pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
    558		 CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
    559		 CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
    560		 CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
    561		 CQE_WRID_LOW(hw_cqe));
    562
    563	/*
    564	 * skip cqe's not affiliated with a QP.
    565	 */
    566	if (wq == NULL) {
    567		ret = -EAGAIN;
    568		goto skip_cqe;
    569	}
    570
    571	/*
    572	* skip hw cqe's if the wq is flushed.
    573	*/
    574	if (wq->flushed && !SW_CQE(hw_cqe)) {
    575		ret = -EAGAIN;
    576		goto skip_cqe;
    577	}
    578
    579	/*
    580	 * skip TERMINATE cqes...
    581	 */
    582	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
    583		ret = -EAGAIN;
    584		goto skip_cqe;
    585	}
    586
    587	/*
    588	 * Special cqe for drain WR completions...
    589	 */
    590	if (DRAIN_CQE(hw_cqe)) {
    591		*cookie = CQE_DRAIN_COOKIE(hw_cqe);
    592		*cqe = *hw_cqe;
    593		goto skip_cqe;
    594	}
    595
    596	/*
    597	 * Gotta tweak READ completions:
    598	 *	1) the cqe doesn't contain the sq_wptr from the wr.
    599	 *	2) opcode not reflected from the wr.
    600	 *	3) read_len not reflected from the wr.
    601	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
    602	 */
    603	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
    604
    605		/* If we have reached here because of async
    606		 * event or other error, and have egress error
    607		 * then drop
    608		 */
    609		if (CQE_TYPE(hw_cqe) == 1) {
    610			if (CQE_STATUS(hw_cqe))
    611				t4_set_wq_in_error(wq, 0);
    612			ret = -EAGAIN;
    613			goto skip_cqe;
    614		}
    615
    616		/* If this is an unsolicited read response, then the read
    617		 * was generated by the kernel driver as part of peer-2-peer
    618		 * connection setup.  So ignore the completion.
    619		 */
    620		if (CQE_WRID_STAG(hw_cqe) == 1) {
    621			if (CQE_STATUS(hw_cqe))
    622				t4_set_wq_in_error(wq, 0);
    623			ret = -EAGAIN;
    624			goto skip_cqe;
    625		}
    626
    627		/*
    628		 * Eat completions for unsignaled read WRs.
    629		 */
    630		if (!wq->sq.oldest_read->signaled) {
    631			advance_oldest_read(wq);
    632			ret = -EAGAIN;
    633			goto skip_cqe;
    634		}
    635
    636		/*
    637		 * Don't write to the HWCQ, so create a new read req CQE
    638		 * in local memory.
    639		 */
    640		create_read_req_cqe(wq, hw_cqe, &read_cqe);
    641		hw_cqe = &read_cqe;
    642		advance_oldest_read(wq);
    643	}
    644
    645	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
    646		*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
    647		t4_set_wq_in_error(wq, 0);
    648	}
    649
    650	/*
    651	 * RECV completion.
    652	 */
    653	if (RQ_TYPE(hw_cqe)) {
    654
    655		/*
    656		 * HW only validates 4 bits of MSN.  So we must validate that
    657		 * the MSN in the SEND is the next expected MSN.  If its not,
    658		 * then we complete this with T4_ERR_MSN and mark the wq in
    659		 * error.
    660		 */
    661		if (unlikely(!CQE_STATUS(hw_cqe) &&
    662			     CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
    663			t4_set_wq_in_error(wq, 0);
    664			hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
    665		}
    666		goto proc_cqe;
    667	}
    668
    669	/*
    670	 * If we get here its a send completion.
    671	 *
    672	 * Handle out of order completion. These get stuffed
    673	 * in the SW SQ. Then the SW SQ is walked to move any
    674	 * now in-order completions into the SW CQ.  This handles
    675	 * 2 cases:
    676	 *	1) reaping unsignaled WRs when the first subsequent
    677	 *	   signaled WR is completed.
    678	 *	2) out of order read completions.
    679	 */
    680	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
    681		struct t4_swsqe *swsqe;
    682
    683		pr_debug("out of order completion going in sw_sq at idx %u\n",
    684			 CQE_WRID_SQ_IDX(hw_cqe));
    685		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
    686		swsqe->cqe = *hw_cqe;
    687		swsqe->complete = 1;
    688		ret = -EAGAIN;
    689		goto flush_wq;
    690	}
    691
    692proc_cqe:
    693	*cqe = *hw_cqe;
    694
    695	/*
    696	 * Reap the associated WR(s) that are freed up with this
    697	 * completion.
    698	 */
    699	if (SQ_TYPE(hw_cqe)) {
    700		int idx = CQE_WRID_SQ_IDX(hw_cqe);
    701
    702		/*
    703		* Account for any unsignaled completions completed by
    704		* this signaled completion.  In this case, cidx points
    705		* to the first unsignaled one, and idx points to the
    706		* signaled one.  So adjust in_use based on this delta.
    707		* if this is not completing any unsigned wrs, then the
    708		* delta will be 0. Handle wrapping also!
    709		*/
    710		if (idx < wq->sq.cidx)
    711			wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
    712		else
    713			wq->sq.in_use -= idx - wq->sq.cidx;
    714
    715		wq->sq.cidx = (uint16_t)idx;
    716		pr_debug("completing sq idx %u\n", wq->sq.cidx);
    717		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
    718		if (c4iw_wr_log)
    719			c4iw_log_wr_stats(wq, hw_cqe);
    720		t4_sq_consume(wq);
    721	} else {
    722		if (!srq) {
    723			pr_debug("completing rq idx %u\n", wq->rq.cidx);
    724			*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
    725			if (c4iw_wr_log)
    726				c4iw_log_wr_stats(wq, hw_cqe);
    727			t4_rq_consume(wq);
    728		} else {
    729			*cookie = reap_srq_cqe(hw_cqe, srq);
    730		}
    731		wq->rq.msn++;
    732		goto skip_cqe;
    733	}
    734
    735flush_wq:
    736	/*
    737	 * Flush any completed cqes that are now in-order.
    738	 */
    739	flush_completed_wrs(wq, cq);
    740
    741skip_cqe:
    742	if (SW_CQE(hw_cqe)) {
    743		pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
    744			 cq, cq->cqid, cq->sw_cidx);
    745		t4_swcq_consume(cq);
    746	} else {
    747		pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
    748			 cq, cq->cqid, cq->cidx);
    749		t4_hwcq_consume(cq);
    750	}
    751	return ret;
    752}
    753
    754static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
    755			      struct ib_wc *wc, struct c4iw_srq *srq)
    756{
    757	struct t4_cqe cqe;
    758	struct t4_wq *wq = qhp ? &qhp->wq : NULL;
    759	u32 credit = 0;
    760	u8 cqe_flushed;
    761	u64 cookie = 0;
    762	int ret;
    763
    764	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
    765		      srq ? &srq->wq : NULL);
    766	if (ret)
    767		goto out;
    768
    769	wc->wr_id = cookie;
    770	wc->qp = qhp ? &qhp->ibqp : NULL;
    771	wc->vendor_err = CQE_STATUS(&cqe);
    772	wc->wc_flags = 0;
    773
    774	/*
    775	 * Simulate a SRQ_LIMIT_REACHED HW notification if required.
    776	 */
    777	if (srq && !(srq->flags & T4_SRQ_LIMIT_SUPPORT) && srq->armed &&
    778	    srq->wq.in_use < srq->srq_limit)
    779		c4iw_dispatch_srq_limit_reached_event(srq);
    780
    781	pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
    782		 CQE_QPID(&cqe),
    783		 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
    784		 CQE_STATUS(&cqe), CQE_LEN(&cqe),
    785		 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
    786		 (unsigned long long)cookie);
    787
    788	if (CQE_TYPE(&cqe) == 0) {
    789		if (!CQE_STATUS(&cqe))
    790			wc->byte_len = CQE_LEN(&cqe);
    791		else
    792			wc->byte_len = 0;
    793
    794		switch (CQE_OPCODE(&cqe)) {
    795		case FW_RI_SEND:
    796			wc->opcode = IB_WC_RECV;
    797			break;
    798		case FW_RI_SEND_WITH_INV:
    799		case FW_RI_SEND_WITH_SE_INV:
    800			wc->opcode = IB_WC_RECV;
    801			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
    802			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
    803			c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey);
    804			break;
    805		case FW_RI_WRITE_IMMEDIATE:
    806			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
    807			wc->ex.imm_data = CQE_IMM_DATA(&cqe);
    808			wc->wc_flags |= IB_WC_WITH_IMM;
    809			break;
    810		default:
    811			pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
    812			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
    813			ret = -EINVAL;
    814			goto out;
    815		}
    816	} else {
    817		switch (CQE_OPCODE(&cqe)) {
    818		case FW_RI_WRITE_IMMEDIATE:
    819		case FW_RI_RDMA_WRITE:
    820			wc->opcode = IB_WC_RDMA_WRITE;
    821			break;
    822		case FW_RI_READ_REQ:
    823			wc->opcode = IB_WC_RDMA_READ;
    824			wc->byte_len = CQE_LEN(&cqe);
    825			break;
    826		case FW_RI_SEND_WITH_INV:
    827		case FW_RI_SEND_WITH_SE_INV:
    828			wc->opcode = IB_WC_SEND;
    829			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
    830			break;
    831		case FW_RI_SEND:
    832		case FW_RI_SEND_WITH_SE:
    833			wc->opcode = IB_WC_SEND;
    834			break;
    835
    836		case FW_RI_LOCAL_INV:
    837			wc->opcode = IB_WC_LOCAL_INV;
    838			break;
    839		case FW_RI_FAST_REGISTER:
    840			wc->opcode = IB_WC_REG_MR;
    841
    842			/* Invalidate the MR if the fastreg failed */
    843			if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
    844				c4iw_invalidate_mr(qhp->rhp,
    845						   CQE_WRID_FR_STAG(&cqe));
    846			break;
    847		default:
    848			pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
    849			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
    850			ret = -EINVAL;
    851			goto out;
    852		}
    853	}
    854
    855	if (cqe_flushed)
    856		wc->status = IB_WC_WR_FLUSH_ERR;
    857	else {
    858
    859		switch (CQE_STATUS(&cqe)) {
    860		case T4_ERR_SUCCESS:
    861			wc->status = IB_WC_SUCCESS;
    862			break;
    863		case T4_ERR_STAG:
    864			wc->status = IB_WC_LOC_ACCESS_ERR;
    865			break;
    866		case T4_ERR_PDID:
    867			wc->status = IB_WC_LOC_PROT_ERR;
    868			break;
    869		case T4_ERR_QPID:
    870		case T4_ERR_ACCESS:
    871			wc->status = IB_WC_LOC_ACCESS_ERR;
    872			break;
    873		case T4_ERR_WRAP:
    874			wc->status = IB_WC_GENERAL_ERR;
    875			break;
    876		case T4_ERR_BOUND:
    877			wc->status = IB_WC_LOC_LEN_ERR;
    878			break;
    879		case T4_ERR_INVALIDATE_SHARED_MR:
    880		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
    881			wc->status = IB_WC_MW_BIND_ERR;
    882			break;
    883		case T4_ERR_CRC:
    884		case T4_ERR_MARKER:
    885		case T4_ERR_PDU_LEN_ERR:
    886		case T4_ERR_OUT_OF_RQE:
    887		case T4_ERR_DDP_VERSION:
    888		case T4_ERR_RDMA_VERSION:
    889		case T4_ERR_DDP_QUEUE_NUM:
    890		case T4_ERR_MSN:
    891		case T4_ERR_TBIT:
    892		case T4_ERR_MO:
    893		case T4_ERR_MSN_RANGE:
    894		case T4_ERR_IRD_OVERFLOW:
    895		case T4_ERR_OPCODE:
    896		case T4_ERR_INTERNAL_ERR:
    897			wc->status = IB_WC_FATAL_ERR;
    898			break;
    899		case T4_ERR_SWFLUSH:
    900			wc->status = IB_WC_WR_FLUSH_ERR;
    901			break;
    902		default:
    903			pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
    904			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
    905			wc->status = IB_WC_FATAL_ERR;
    906		}
    907	}
    908out:
    909	return ret;
    910}
    911
    912/*
    913 * Get one cq entry from c4iw and map it to openib.
    914 *
    915 * Returns:
    916 *	0			cqe returned
    917 *	-ENODATA		EMPTY;
    918 *	-EAGAIN			caller must try again
    919 *	any other -errno	fatal error
    920 */
    921static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
    922{
    923	struct c4iw_srq *srq = NULL;
    924	struct c4iw_qp *qhp = NULL;
    925	struct t4_cqe *rd_cqe;
    926	int ret;
    927
    928	ret = t4_next_cqe(&chp->cq, &rd_cqe);
    929
    930	if (ret)
    931		return ret;
    932
    933	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
    934	if (qhp) {
    935		spin_lock(&qhp->lock);
    936		srq = qhp->srq;
    937		if (srq)
    938			spin_lock(&srq->lock);
    939		ret = __c4iw_poll_cq_one(chp, qhp, wc, srq);
    940		spin_unlock(&qhp->lock);
    941		if (srq)
    942			spin_unlock(&srq->lock);
    943	} else {
    944		ret = __c4iw_poll_cq_one(chp, NULL, wc, NULL);
    945	}
    946	return ret;
    947}
    948
    949int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
    950{
    951	struct c4iw_cq *chp;
    952	unsigned long flags;
    953	int npolled;
    954	int err = 0;
    955
    956	chp = to_c4iw_cq(ibcq);
    957
    958	spin_lock_irqsave(&chp->lock, flags);
    959	for (npolled = 0; npolled < num_entries; ++npolled) {
    960		do {
    961			err = c4iw_poll_cq_one(chp, wc + npolled);
    962		} while (err == -EAGAIN);
    963		if (err)
    964			break;
    965	}
    966	spin_unlock_irqrestore(&chp->lock, flags);
    967	return !err || err == -ENODATA ? npolled : err;
    968}
    969
    970void c4iw_cq_rem_ref(struct c4iw_cq *chp)
    971{
    972	if (refcount_dec_and_test(&chp->refcnt))
    973		complete(&chp->cq_rel_comp);
    974}
    975
    976int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
    977{
    978	struct c4iw_cq *chp;
    979	struct c4iw_ucontext *ucontext;
    980
    981	pr_debug("ib_cq %p\n", ib_cq);
    982	chp = to_c4iw_cq(ib_cq);
    983
    984	xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
    985	c4iw_cq_rem_ref(chp);
    986	wait_for_completion(&chp->cq_rel_comp);
    987
    988	ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
    989					     ibucontext);
    990	destroy_cq(&chp->rhp->rdev, &chp->cq,
    991		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
    992		   chp->destroy_skb, chp->wr_waitp);
    993	c4iw_put_wr_wait(chp->wr_waitp);
    994	return 0;
    995}
    996
    997int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
    998		   struct ib_udata *udata)
    999{
   1000	struct ib_device *ibdev = ibcq->device;
   1001	int entries = attr->cqe;
   1002	int vector = attr->comp_vector;
   1003	struct c4iw_dev *rhp = to_c4iw_dev(ibcq->device);
   1004	struct c4iw_cq *chp = to_c4iw_cq(ibcq);
   1005	struct c4iw_create_cq ucmd;
   1006	struct c4iw_create_cq_resp uresp;
   1007	int ret, wr_len;
   1008	size_t memsize, hwentries;
   1009	struct c4iw_mm_entry *mm, *mm2;
   1010	struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
   1011		udata, struct c4iw_ucontext, ibucontext);
   1012
   1013	pr_debug("ib_dev %p entries %d\n", ibdev, entries);
   1014	if (attr->flags)
   1015		return -EOPNOTSUPP;
   1016
   1017	if (entries < 1 || entries > ibdev->attrs.max_cqe)
   1018		return -EINVAL;
   1019
   1020	if (vector >= rhp->rdev.lldi.nciq)
   1021		return -EINVAL;
   1022
   1023	if (udata) {
   1024		if (udata->inlen < sizeof(ucmd))
   1025			ucontext->is_32b_cqe = 1;
   1026	}
   1027
   1028	chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
   1029	if (!chp->wr_waitp) {
   1030		ret = -ENOMEM;
   1031		goto err_free_chp;
   1032	}
   1033	c4iw_init_wr_wait(chp->wr_waitp);
   1034
   1035	wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
   1036	chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
   1037	if (!chp->destroy_skb) {
   1038		ret = -ENOMEM;
   1039		goto err_free_wr_wait;
   1040	}
   1041
   1042	/* account for the status page. */
   1043	entries++;
   1044
   1045	/* IQ needs one extra entry to differentiate full vs empty. */
   1046	entries++;
   1047
   1048	/*
   1049	 * entries must be multiple of 16 for HW.
   1050	 */
   1051	entries = roundup(entries, 16);
   1052
   1053	/*
   1054	 * Make actual HW queue 2x to avoid cdix_inc overflows.
   1055	 */
   1056	hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
   1057
   1058	/*
   1059	 * Make HW queue at least 64 entries so GTS updates aren't too
   1060	 * frequent.
   1061	 */
   1062	if (hwentries < 64)
   1063		hwentries = 64;
   1064
   1065	memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
   1066			(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
   1067
   1068	/*
   1069	 * memsize must be a multiple of the page size if its a user cq.
   1070	 */
   1071	if (udata)
   1072		memsize = roundup(memsize, PAGE_SIZE);
   1073
   1074	chp->cq.size = hwentries;
   1075	chp->cq.memsize = memsize;
   1076	chp->cq.vector = vector;
   1077
   1078	ret = create_cq(&rhp->rdev, &chp->cq,
   1079			ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
   1080			chp->wr_waitp);
   1081	if (ret)
   1082		goto err_free_skb;
   1083
   1084	chp->rhp = rhp;
   1085	chp->cq.size--;				/* status page */
   1086	chp->ibcq.cqe = entries - 2;
   1087	spin_lock_init(&chp->lock);
   1088	spin_lock_init(&chp->comp_handler_lock);
   1089	refcount_set(&chp->refcnt, 1);
   1090	init_completion(&chp->cq_rel_comp);
   1091	ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
   1092	if (ret)
   1093		goto err_destroy_cq;
   1094
   1095	if (ucontext) {
   1096		ret = -ENOMEM;
   1097		mm = kmalloc(sizeof(*mm), GFP_KERNEL);
   1098		if (!mm)
   1099			goto err_remove_handle;
   1100		mm2 = kmalloc(sizeof(*mm2), GFP_KERNEL);
   1101		if (!mm2)
   1102			goto err_free_mm;
   1103
   1104		memset(&uresp, 0, sizeof(uresp));
   1105		uresp.qid_mask = rhp->rdev.cqmask;
   1106		uresp.cqid = chp->cq.cqid;
   1107		uresp.size = chp->cq.size;
   1108		uresp.memsize = chp->cq.memsize;
   1109		spin_lock(&ucontext->mmap_lock);
   1110		uresp.key = ucontext->key;
   1111		ucontext->key += PAGE_SIZE;
   1112		uresp.gts_key = ucontext->key;
   1113		ucontext->key += PAGE_SIZE;
   1114		/* communicate to the userspace that
   1115		 * kernel driver supports 64B CQE
   1116		 */
   1117		uresp.flags |= C4IW_64B_CQE;
   1118
   1119		spin_unlock(&ucontext->mmap_lock);
   1120		ret = ib_copy_to_udata(udata, &uresp,
   1121				       ucontext->is_32b_cqe ?
   1122				       sizeof(uresp) - sizeof(uresp.flags) :
   1123				       sizeof(uresp));
   1124		if (ret)
   1125			goto err_free_mm2;
   1126
   1127		mm->key = uresp.key;
   1128		mm->addr = virt_to_phys(chp->cq.queue);
   1129		mm->len = chp->cq.memsize;
   1130		insert_mmap(ucontext, mm);
   1131
   1132		mm2->key = uresp.gts_key;
   1133		mm2->addr = chp->cq.bar2_pa;
   1134		mm2->len = PAGE_SIZE;
   1135		insert_mmap(ucontext, mm2);
   1136	}
   1137
   1138	pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr %pad\n",
   1139		 chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
   1140		 &chp->cq.dma_addr);
   1141	return 0;
   1142err_free_mm2:
   1143	kfree(mm2);
   1144err_free_mm:
   1145	kfree(mm);
   1146err_remove_handle:
   1147	xa_erase_irq(&rhp->cqs, chp->cq.cqid);
   1148err_destroy_cq:
   1149	destroy_cq(&chp->rhp->rdev, &chp->cq,
   1150		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
   1151		   chp->destroy_skb, chp->wr_waitp);
   1152err_free_skb:
   1153	kfree_skb(chp->destroy_skb);
   1154err_free_wr_wait:
   1155	c4iw_put_wr_wait(chp->wr_waitp);
   1156err_free_chp:
   1157	return ret;
   1158}
   1159
   1160int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
   1161{
   1162	struct c4iw_cq *chp;
   1163	int ret = 0;
   1164	unsigned long flag;
   1165
   1166	chp = to_c4iw_cq(ibcq);
   1167	spin_lock_irqsave(&chp->lock, flag);
   1168	t4_arm_cq(&chp->cq,
   1169		  (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
   1170	if (flags & IB_CQ_REPORT_MISSED_EVENTS)
   1171		ret = t4_cq_notempty(&chp->cq);
   1172	spin_unlock_irqrestore(&chp->lock, flag);
   1173	return ret;
   1174}
   1175
   1176void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx)
   1177{
   1178	struct c4iw_cq *rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
   1179	unsigned long flag;
   1180
   1181	/* locking heirarchy: cq lock first, then qp lock. */
   1182	spin_lock_irqsave(&rchp->lock, flag);
   1183	spin_lock(&qhp->lock);
   1184
   1185	/* create a SRQ RECV CQE for srqidx */
   1186	insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx);
   1187
   1188	spin_unlock(&qhp->lock);
   1189	spin_unlock_irqrestore(&rchp->lock, flag);
   1190}