cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hw.c (74606B)


      1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
      2/* Copyright (c) 2015 - 2021 Intel Corporation */
      3#include "main.h"
      4
      5static struct irdma_rsrc_limits rsrc_limits_table[] = {
      6	[0] = {
      7		.qplimit = SZ_128,
      8	},
      9	[1] = {
     10		.qplimit = SZ_1K,
     11	},
     12	[2] = {
     13		.qplimit = SZ_2K,
     14	},
     15	[3] = {
     16		.qplimit = SZ_4K,
     17	},
     18	[4] = {
     19		.qplimit = SZ_16K,
     20	},
     21	[5] = {
     22		.qplimit = SZ_64K,
     23	},
     24	[6] = {
     25		.qplimit = SZ_128K,
     26	},
     27	[7] = {
     28		.qplimit = SZ_256K,
     29	},
     30};
     31
     32/* types of hmc objects */
     33static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
     34	IRDMA_HMC_IW_QP,
     35	IRDMA_HMC_IW_CQ,
     36	IRDMA_HMC_IW_HTE,
     37	IRDMA_HMC_IW_ARP,
     38	IRDMA_HMC_IW_APBVT_ENTRY,
     39	IRDMA_HMC_IW_MR,
     40	IRDMA_HMC_IW_XF,
     41	IRDMA_HMC_IW_XFFL,
     42	IRDMA_HMC_IW_Q1,
     43	IRDMA_HMC_IW_Q1FL,
     44	IRDMA_HMC_IW_TIMER,
     45	IRDMA_HMC_IW_FSIMC,
     46	IRDMA_HMC_IW_FSIAV,
     47	IRDMA_HMC_IW_RRF,
     48	IRDMA_HMC_IW_RRFFL,
     49	IRDMA_HMC_IW_HDR,
     50	IRDMA_HMC_IW_MD,
     51	IRDMA_HMC_IW_OOISC,
     52	IRDMA_HMC_IW_OOISCFFL,
     53};
     54
     55/**
     56 * irdma_iwarp_ce_handler - handle iwarp completions
     57 * @iwcq: iwarp cq receiving event
     58 */
     59static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
     60{
     61	struct irdma_cq *cq = iwcq->back_cq;
     62
     63	if (!cq->user_mode)
     64		atomic_set(&cq->armed, 0);
     65	if (cq->ibcq.comp_handler)
     66		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
     67}
     68
     69/**
     70 * irdma_puda_ce_handler - handle puda completion events
     71 * @rf: RDMA PCI function
     72 * @cq: puda completion q for event
     73 */
     74static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
     75				  struct irdma_sc_cq *cq)
     76{
     77	struct irdma_sc_dev *dev = &rf->sc_dev;
     78	u32 compl_error;
     79	int status;
     80
     81	do {
     82		status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
     83		if (status == -ENOENT)
     84			break;
     85		if (status) {
     86			ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
     87			break;
     88		}
     89		if (compl_error) {
     90			ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err  =0x%x\n",
     91				  compl_error);
     92			break;
     93		}
     94	} while (1);
     95
     96	irdma_sc_ccq_arm(cq);
     97}
     98
     99/**
    100 * irdma_process_ceq - handle ceq for completions
    101 * @rf: RDMA PCI function
    102 * @ceq: ceq having cq for completion
    103 */
    104static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
    105{
    106	struct irdma_sc_dev *dev = &rf->sc_dev;
    107	struct irdma_sc_ceq *sc_ceq;
    108	struct irdma_sc_cq *cq;
    109	unsigned long flags;
    110
    111	sc_ceq = &ceq->sc_ceq;
    112	do {
    113		spin_lock_irqsave(&ceq->ce_lock, flags);
    114		cq = irdma_sc_process_ceq(dev, sc_ceq);
    115		if (!cq) {
    116			spin_unlock_irqrestore(&ceq->ce_lock, flags);
    117			break;
    118		}
    119
    120		if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
    121			irdma_iwarp_ce_handler(cq);
    122
    123		spin_unlock_irqrestore(&ceq->ce_lock, flags);
    124
    125		if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
    126			queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
    127		else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
    128			 cq->cq_type == IRDMA_CQ_TYPE_IEQ)
    129			irdma_puda_ce_handler(rf, cq);
    130	} while (1);
    131}
    132
    133static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
    134				   struct irdma_aeqe_info *info)
    135{
    136	qp->sq_flush_code = info->sq;
    137	qp->rq_flush_code = info->rq;
    138	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
    139
    140	switch (info->ae_id) {
    141	case IRDMA_AE_AMP_UNALLOCATED_STAG:
    142	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
    143	case IRDMA_AE_AMP_INVALID_STAG:
    144		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
    145		fallthrough;
    146	case IRDMA_AE_AMP_BAD_PD:
    147	case IRDMA_AE_UDA_XMIT_BAD_PD:
    148		qp->flush_code = FLUSH_PROT_ERR;
    149		break;
    150	case IRDMA_AE_AMP_BAD_QP:
    151	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
    152		qp->flush_code = FLUSH_LOC_QP_OP_ERR;
    153		break;
    154	case IRDMA_AE_AMP_BAD_STAG_KEY:
    155	case IRDMA_AE_AMP_BAD_STAG_INDEX:
    156	case IRDMA_AE_AMP_TO_WRAP:
    157	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
    158	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
    159	case IRDMA_AE_PRIV_OPERATION_DENIED:
    160	case IRDMA_AE_IB_INVALID_REQUEST:
    161	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
    162		qp->flush_code = FLUSH_REM_ACCESS_ERR;
    163		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
    164		break;
    165	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
    166	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
    167	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
    168	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
    169	case IRDMA_AE_UDA_L4LEN_INVALID:
    170	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
    171		qp->flush_code = FLUSH_LOC_LEN_ERR;
    172		break;
    173	case IRDMA_AE_LCE_QP_CATASTROPHIC:
    174		qp->flush_code = FLUSH_FATAL_ERR;
    175		break;
    176	case IRDMA_AE_DDP_UBE_INVALID_MO:
    177	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
    178	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
    179		qp->flush_code = FLUSH_GENERAL_ERR;
    180		break;
    181	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
    182		qp->flush_code = FLUSH_RETRY_EXC_ERR;
    183		break;
    184	case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
    185	case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
    186	case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
    187		qp->flush_code = FLUSH_MW_BIND_ERR;
    188		break;
    189	case IRDMA_AE_IB_REMOTE_OP_ERROR:
    190		qp->flush_code = FLUSH_REM_OP_ERR;
    191		break;
    192	default:
    193		qp->flush_code = FLUSH_FATAL_ERR;
    194		break;
    195	}
    196}
    197
    198/**
    199 * irdma_process_aeq - handle aeq events
    200 * @rf: RDMA PCI function
    201 */
    202static void irdma_process_aeq(struct irdma_pci_f *rf)
    203{
    204	struct irdma_sc_dev *dev = &rf->sc_dev;
    205	struct irdma_aeq *aeq = &rf->aeq;
    206	struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
    207	struct irdma_aeqe_info aeinfo;
    208	struct irdma_aeqe_info *info = &aeinfo;
    209	int ret;
    210	struct irdma_qp *iwqp = NULL;
    211	struct irdma_sc_cq *cq = NULL;
    212	struct irdma_cq *iwcq = NULL;
    213	struct irdma_sc_qp *qp = NULL;
    214	struct irdma_qp_host_ctx_info *ctx_info = NULL;
    215	struct irdma_device *iwdev = rf->iwdev;
    216	unsigned long flags;
    217
    218	u32 aeqcnt = 0;
    219
    220	if (!sc_aeq->size)
    221		return;
    222
    223	do {
    224		memset(info, 0, sizeof(*info));
    225		ret = irdma_sc_get_next_aeqe(sc_aeq, info);
    226		if (ret)
    227			break;
    228
    229		aeqcnt++;
    230		ibdev_dbg(&iwdev->ibdev,
    231			  "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
    232			  info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
    233			  info->iwarp_state, info->ae_src);
    234
    235		if (info->qp) {
    236			spin_lock_irqsave(&rf->qptable_lock, flags);
    237			iwqp = rf->qp_table[info->qp_cq_id];
    238			if (!iwqp) {
    239				spin_unlock_irqrestore(&rf->qptable_lock,
    240						       flags);
    241				if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
    242					atomic_dec(&iwdev->vsi.qp_suspend_reqs);
    243					wake_up(&iwdev->suspend_wq);
    244					continue;
    245				}
    246				ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
    247					  info->qp_cq_id);
    248				continue;
    249			}
    250			irdma_qp_add_ref(&iwqp->ibqp);
    251			spin_unlock_irqrestore(&rf->qptable_lock, flags);
    252			qp = &iwqp->sc_qp;
    253			spin_lock_irqsave(&iwqp->lock, flags);
    254			iwqp->hw_tcp_state = info->tcp_state;
    255			iwqp->hw_iwarp_state = info->iwarp_state;
    256			if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
    257				iwqp->last_aeq = info->ae_id;
    258			spin_unlock_irqrestore(&iwqp->lock, flags);
    259			ctx_info = &iwqp->ctx_info;
    260			if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1))
    261				ctx_info->roce_info->err_rq_idx_valid = true;
    262			else
    263				ctx_info->iwarp_info->err_rq_idx_valid = true;
    264		} else {
    265			if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
    266				continue;
    267		}
    268
    269		switch (info->ae_id) {
    270			struct irdma_cm_node *cm_node;
    271		case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
    272			cm_node = iwqp->cm_node;
    273			if (cm_node->accept_pend) {
    274				atomic_dec(&cm_node->listener->pend_accepts_cnt);
    275				cm_node->accept_pend = 0;
    276			}
    277			iwqp->rts_ae_rcvd = 1;
    278			wake_up_interruptible(&iwqp->waitq);
    279			break;
    280		case IRDMA_AE_LLP_FIN_RECEIVED:
    281		case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
    282			if (qp->term_flags)
    283				break;
    284			if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
    285				iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
    286				if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
    287				    iwqp->ibqp_state == IB_QPS_RTS) {
    288					irdma_next_iw_state(iwqp,
    289							    IRDMA_QP_STATE_CLOSING,
    290							    0, 0, 0);
    291					irdma_cm_disconn(iwqp);
    292				}
    293				irdma_schedule_cm_timer(iwqp->cm_node,
    294							(struct irdma_puda_buf *)iwqp,
    295							IRDMA_TIMER_TYPE_CLOSE,
    296							1, 0);
    297			}
    298			break;
    299		case IRDMA_AE_LLP_CLOSE_COMPLETE:
    300			if (qp->term_flags)
    301				irdma_terminate_done(qp, 0);
    302			else
    303				irdma_cm_disconn(iwqp);
    304			break;
    305		case IRDMA_AE_BAD_CLOSE:
    306		case IRDMA_AE_RESET_SENT:
    307			irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
    308					    0);
    309			irdma_cm_disconn(iwqp);
    310			break;
    311		case IRDMA_AE_LLP_CONNECTION_RESET:
    312			if (atomic_read(&iwqp->close_timer_started))
    313				break;
    314			irdma_cm_disconn(iwqp);
    315			break;
    316		case IRDMA_AE_QP_SUSPEND_COMPLETE:
    317			if (iwqp->iwdev->vsi.tc_change_pending) {
    318				atomic_dec(&iwqp->sc_qp.vsi->qp_suspend_reqs);
    319				wake_up(&iwqp->iwdev->suspend_wq);
    320			}
    321			break;
    322		case IRDMA_AE_TERMINATE_SENT:
    323			irdma_terminate_send_fin(qp);
    324			break;
    325		case IRDMA_AE_LLP_TERMINATE_RECEIVED:
    326			irdma_terminate_received(qp, info);
    327			break;
    328		case IRDMA_AE_CQ_OPERATION_ERROR:
    329			ibdev_err(&iwdev->ibdev,
    330				  "Processing an iWARP related AE for CQ misc = 0x%04X\n",
    331				  info->ae_id);
    332			cq = (struct irdma_sc_cq *)(unsigned long)
    333			     info->compl_ctx;
    334
    335			iwcq = cq->back_cq;
    336
    337			if (iwcq->ibcq.event_handler) {
    338				struct ib_event ibevent;
    339
    340				ibevent.device = iwcq->ibcq.device;
    341				ibevent.event = IB_EVENT_CQ_ERR;
    342				ibevent.element.cq = &iwcq->ibcq;
    343				iwcq->ibcq.event_handler(&ibevent,
    344							 iwcq->ibcq.cq_context);
    345			}
    346			break;
    347		case IRDMA_AE_RESET_NOT_SENT:
    348		case IRDMA_AE_LLP_DOUBT_REACHABILITY:
    349		case IRDMA_AE_RESOURCE_EXHAUSTION:
    350			break;
    351		case IRDMA_AE_PRIV_OPERATION_DENIED:
    352		case IRDMA_AE_STAG_ZERO_INVALID:
    353		case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
    354		case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
    355		case IRDMA_AE_DDP_UBE_INVALID_MO:
    356		case IRDMA_AE_DDP_UBE_INVALID_QN:
    357		case IRDMA_AE_DDP_NO_L_BIT:
    358		case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
    359		case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
    360		case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
    361		case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
    362		case IRDMA_AE_INVALID_ARP_ENTRY:
    363		case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
    364		case IRDMA_AE_STALE_ARP_ENTRY:
    365		case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
    366		case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
    367		case IRDMA_AE_LLP_SYN_RECEIVED:
    368		case IRDMA_AE_LLP_TOO_MANY_RETRIES:
    369		case IRDMA_AE_LCE_QP_CATASTROPHIC:
    370		case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
    371		case IRDMA_AE_LCE_CQ_CATASTROPHIC:
    372		case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
    373			if (rdma_protocol_roce(&iwdev->ibdev, 1))
    374				ctx_info->roce_info->err_rq_idx_valid = false;
    375			else
    376				ctx_info->iwarp_info->err_rq_idx_valid = false;
    377			fallthrough;
    378		default:
    379			ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d\n",
    380				  info->ae_id, info->qp, info->qp_cq_id);
    381			if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
    382				if (!info->sq && ctx_info->roce_info->err_rq_idx_valid) {
    383					ctx_info->roce_info->err_rq_idx = info->wqe_idx;
    384					irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
    385								ctx_info);
    386				}
    387				irdma_set_flush_fields(qp, info);
    388				irdma_cm_disconn(iwqp);
    389				break;
    390			}
    391			if (!info->sq && ctx_info->iwarp_info->err_rq_idx_valid) {
    392				ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
    393				ctx_info->tcp_info_valid = false;
    394				ctx_info->iwarp_info_valid = true;
    395				irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
    396						   ctx_info);
    397			}
    398			if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
    399			    iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
    400				irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
    401				irdma_cm_disconn(iwqp);
    402			} else {
    403				irdma_terminate_connection(qp, info);
    404			}
    405			break;
    406		}
    407		if (info->qp)
    408			irdma_qp_rem_ref(&iwqp->ibqp);
    409	} while (1);
    410
    411	if (aeqcnt)
    412		irdma_sc_repost_aeq_entries(dev, aeqcnt);
    413}
    414
    415/**
    416 * irdma_ena_intr - set up device interrupts
    417 * @dev: hardware control device structure
    418 * @msix_id: id of the interrupt to be enabled
    419 */
    420static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
    421{
    422	dev->irq_ops->irdma_en_irq(dev, msix_id);
    423}
    424
    425/**
    426 * irdma_dpc - tasklet for aeq and ceq 0
    427 * @t: tasklet_struct ptr
    428 */
    429static void irdma_dpc(struct tasklet_struct *t)
    430{
    431	struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
    432
    433	if (rf->msix_shared)
    434		irdma_process_ceq(rf, rf->ceqlist);
    435	irdma_process_aeq(rf);
    436	irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
    437}
    438
    439/**
    440 * irdma_ceq_dpc - dpc handler for CEQ
    441 * @t: tasklet_struct ptr
    442 */
    443static void irdma_ceq_dpc(struct tasklet_struct *t)
    444{
    445	struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
    446	struct irdma_pci_f *rf = iwceq->rf;
    447
    448	irdma_process_ceq(rf, iwceq);
    449	irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
    450}
    451
    452/**
    453 * irdma_save_msix_info - copy msix vector information to iwarp device
    454 * @rf: RDMA PCI function
    455 *
    456 * Allocate iwdev msix table and copy the msix info to the table
    457 * Return 0 if successful, otherwise return error
    458 */
    459static int irdma_save_msix_info(struct irdma_pci_f *rf)
    460{
    461	struct irdma_qvlist_info *iw_qvlist;
    462	struct irdma_qv_info *iw_qvinfo;
    463	struct msix_entry *pmsix;
    464	u32 ceq_idx;
    465	u32 i;
    466	size_t size;
    467
    468	if (!rf->msix_count)
    469		return -EINVAL;
    470
    471	size = sizeof(struct irdma_msix_vector) * rf->msix_count;
    472	size += struct_size(iw_qvlist, qv_info, rf->msix_count);
    473	rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
    474	if (!rf->iw_msixtbl)
    475		return -ENOMEM;
    476
    477	rf->iw_qvlist = (struct irdma_qvlist_info *)
    478			(&rf->iw_msixtbl[rf->msix_count]);
    479	iw_qvlist = rf->iw_qvlist;
    480	iw_qvinfo = iw_qvlist->qv_info;
    481	iw_qvlist->num_vectors = rf->msix_count;
    482	if (rf->msix_count <= num_online_cpus())
    483		rf->msix_shared = true;
    484
    485	pmsix = rf->msix_entries;
    486	for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
    487		rf->iw_msixtbl[i].idx = pmsix->entry;
    488		rf->iw_msixtbl[i].irq = pmsix->vector;
    489		rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
    490		if (!i) {
    491			iw_qvinfo->aeq_idx = 0;
    492			if (rf->msix_shared)
    493				iw_qvinfo->ceq_idx = ceq_idx++;
    494			else
    495				iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
    496		} else {
    497			iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
    498			iw_qvinfo->ceq_idx = ceq_idx++;
    499		}
    500		iw_qvinfo->itr_idx = 3;
    501		iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
    502		pmsix++;
    503	}
    504
    505	return 0;
    506}
    507
    508/**
    509 * irdma_irq_handler - interrupt handler for aeq and ceq0
    510 * @irq: Interrupt request number
    511 * @data: RDMA PCI function
    512 */
    513static irqreturn_t irdma_irq_handler(int irq, void *data)
    514{
    515	struct irdma_pci_f *rf = data;
    516
    517	tasklet_schedule(&rf->dpc_tasklet);
    518
    519	return IRQ_HANDLED;
    520}
    521
    522/**
    523 * irdma_ceq_handler - interrupt handler for ceq
    524 * @irq: interrupt request number
    525 * @data: ceq pointer
    526 */
    527static irqreturn_t irdma_ceq_handler(int irq, void *data)
    528{
    529	struct irdma_ceq *iwceq = data;
    530
    531	if (iwceq->irq != irq)
    532		ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
    533			  iwceq->irq, irq);
    534	tasklet_schedule(&iwceq->dpc_tasklet);
    535
    536	return IRQ_HANDLED;
    537}
    538
    539/**
    540 * irdma_destroy_irq - destroy device interrupts
    541 * @rf: RDMA PCI function
    542 * @msix_vec: msix vector to disable irq
    543 * @dev_id: parameter to pass to free_irq (used during irq setup)
    544 *
    545 * The function is called when destroying aeq/ceq
    546 */
    547static void irdma_destroy_irq(struct irdma_pci_f *rf,
    548			      struct irdma_msix_vector *msix_vec, void *dev_id)
    549{
    550	struct irdma_sc_dev *dev = &rf->sc_dev;
    551
    552	dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
    553	irq_update_affinity_hint(msix_vec->irq, NULL);
    554	free_irq(msix_vec->irq, dev_id);
    555}
    556
    557/**
    558 * irdma_destroy_cqp  - destroy control qp
    559 * @rf: RDMA PCI function
    560 * @free_hwcqp: 1 if hw cqp should be freed
    561 *
    562 * Issue destroy cqp request and
    563 * free the resources associated with the cqp
    564 */
    565static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
    566{
    567	struct irdma_sc_dev *dev = &rf->sc_dev;
    568	struct irdma_cqp *cqp = &rf->cqp;
    569	int status = 0;
    570
    571	if (rf->cqp_cmpl_wq)
    572		destroy_workqueue(rf->cqp_cmpl_wq);
    573	if (free_hwcqp)
    574		status = irdma_sc_cqp_destroy(dev->cqp);
    575	if (status)
    576		ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
    577
    578	irdma_cleanup_pending_cqp_op(rf);
    579	dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
    580			  cqp->sq.pa);
    581	cqp->sq.va = NULL;
    582	kfree(cqp->scratch_array);
    583	cqp->scratch_array = NULL;
    584	kfree(cqp->cqp_requests);
    585	cqp->cqp_requests = NULL;
    586}
    587
    588static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
    589{
    590	struct irdma_aeq *aeq = &rf->aeq;
    591	u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
    592	dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
    593
    594	irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
    595	irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
    596	vfree(aeq->mem.va);
    597}
    598
    599/**
    600 * irdma_destroy_aeq - destroy aeq
    601 * @rf: RDMA PCI function
    602 *
    603 * Issue a destroy aeq request and
    604 * free the resources associated with the aeq
    605 * The function is called during driver unload
    606 */
    607static void irdma_destroy_aeq(struct irdma_pci_f *rf)
    608{
    609	struct irdma_sc_dev *dev = &rf->sc_dev;
    610	struct irdma_aeq *aeq = &rf->aeq;
    611	int status = -EBUSY;
    612
    613	if (!rf->msix_shared) {
    614		rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
    615		irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
    616	}
    617	if (rf->reset)
    618		goto exit;
    619
    620	aeq->sc_aeq.size = 0;
    621	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
    622	if (status)
    623		ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
    624
    625exit:
    626	if (aeq->virtual_map) {
    627		irdma_destroy_virt_aeq(rf);
    628	} else {
    629		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
    630				  aeq->mem.pa);
    631		aeq->mem.va = NULL;
    632	}
    633}
    634
    635/**
    636 * irdma_destroy_ceq - destroy ceq
    637 * @rf: RDMA PCI function
    638 * @iwceq: ceq to be destroyed
    639 *
    640 * Issue a destroy ceq request and
    641 * free the resources associated with the ceq
    642 */
    643static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
    644{
    645	struct irdma_sc_dev *dev = &rf->sc_dev;
    646	int status;
    647
    648	if (rf->reset)
    649		goto exit;
    650
    651	status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
    652	if (status) {
    653		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
    654		goto exit;
    655	}
    656
    657	status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
    658	if (status)
    659		ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
    660			  status);
    661exit:
    662	dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
    663			  iwceq->mem.pa);
    664	iwceq->mem.va = NULL;
    665}
    666
    667/**
    668 * irdma_del_ceq_0 - destroy ceq 0
    669 * @rf: RDMA PCI function
    670 *
    671 * Disable the ceq 0 interrupt and destroy the ceq 0
    672 */
    673static void irdma_del_ceq_0(struct irdma_pci_f *rf)
    674{
    675	struct irdma_ceq *iwceq = rf->ceqlist;
    676	struct irdma_msix_vector *msix_vec;
    677
    678	if (rf->msix_shared) {
    679		msix_vec = &rf->iw_msixtbl[0];
    680		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
    681						  msix_vec->ceq_id,
    682						  msix_vec->idx, false);
    683		irdma_destroy_irq(rf, msix_vec, rf);
    684	} else {
    685		msix_vec = &rf->iw_msixtbl[1];
    686		irdma_destroy_irq(rf, msix_vec, iwceq);
    687	}
    688
    689	irdma_destroy_ceq(rf, iwceq);
    690	rf->sc_dev.ceq_valid = false;
    691	rf->ceqs_count = 0;
    692}
    693
    694/**
    695 * irdma_del_ceqs - destroy all ceq's except CEQ 0
    696 * @rf: RDMA PCI function
    697 *
    698 * Go through all of the device ceq's, except 0, and for each
    699 * ceq disable the ceq interrupt and destroy the ceq
    700 */
    701static void irdma_del_ceqs(struct irdma_pci_f *rf)
    702{
    703	struct irdma_ceq *iwceq = &rf->ceqlist[1];
    704	struct irdma_msix_vector *msix_vec;
    705	u32 i = 0;
    706
    707	if (rf->msix_shared)
    708		msix_vec = &rf->iw_msixtbl[1];
    709	else
    710		msix_vec = &rf->iw_msixtbl[2];
    711
    712	for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
    713		rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
    714						  msix_vec->idx, false);
    715		irdma_destroy_irq(rf, msix_vec, iwceq);
    716		irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
    717				  IRDMA_OP_CEQ_DESTROY);
    718		dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
    719				  iwceq->mem.va, iwceq->mem.pa);
    720		iwceq->mem.va = NULL;
    721	}
    722	rf->ceqs_count = 1;
    723}
    724
    725/**
    726 * irdma_destroy_ccq - destroy control cq
    727 * @rf: RDMA PCI function
    728 *
    729 * Issue destroy ccq request and
    730 * free the resources associated with the ccq
    731 */
    732static void irdma_destroy_ccq(struct irdma_pci_f *rf)
    733{
    734	struct irdma_sc_dev *dev = &rf->sc_dev;
    735	struct irdma_ccq *ccq = &rf->ccq;
    736	int status = 0;
    737
    738	if (!rf->reset)
    739		status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
    740	if (status)
    741		ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
    742	dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
    743			  ccq->mem_cq.pa);
    744	ccq->mem_cq.va = NULL;
    745}
    746
    747/**
    748 * irdma_close_hmc_objects_type - delete hmc objects of a given type
    749 * @dev: iwarp device
    750 * @obj_type: the hmc object type to be deleted
    751 * @hmc_info: host memory info struct
    752 * @privileged: permission to close HMC objects
    753 * @reset: true if called before reset
    754 */
    755static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
    756					 enum irdma_hmc_rsrc_type obj_type,
    757					 struct irdma_hmc_info *hmc_info,
    758					 bool privileged, bool reset)
    759{
    760	struct irdma_hmc_del_obj_info info = {};
    761
    762	info.hmc_info = hmc_info;
    763	info.rsrc_type = obj_type;
    764	info.count = hmc_info->hmc_obj[obj_type].cnt;
    765	info.privileged = privileged;
    766	if (irdma_sc_del_hmc_obj(dev, &info, reset))
    767		ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
    768			  obj_type);
    769}
    770
    771/**
    772 * irdma_del_hmc_objects - remove all device hmc objects
    773 * @dev: iwarp device
    774 * @hmc_info: hmc_info to free
    775 * @privileged: permission to delete HMC objects
    776 * @reset: true if called before reset
    777 * @vers: hardware version
    778 */
    779static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
    780				  struct irdma_hmc_info *hmc_info, bool privileged,
    781				  bool reset, enum irdma_vers vers)
    782{
    783	unsigned int i;
    784
    785	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
    786		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
    787			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
    788						     hmc_info, privileged, reset);
    789		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
    790			break;
    791	}
    792}
    793
    794/**
    795 * irdma_create_hmc_obj_type - create hmc object of a given type
    796 * @dev: hardware control device structure
    797 * @info: information for the hmc object to create
    798 */
    799static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
    800				     struct irdma_hmc_create_obj_info *info)
    801{
    802	return irdma_sc_create_hmc_obj(dev, info);
    803}
    804
    805/**
    806 * irdma_create_hmc_objs - create all hmc objects for the device
    807 * @rf: RDMA PCI function
    808 * @privileged: permission to create HMC objects
    809 * @vers: HW version
    810 *
    811 * Create the device hmc objects and allocate hmc pages
    812 * Return 0 if successful, otherwise clean up and return error
    813 */
    814static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
    815				 enum irdma_vers vers)
    816{
    817	struct irdma_sc_dev *dev = &rf->sc_dev;
    818	struct irdma_hmc_create_obj_info info = {};
    819	int i, status = 0;
    820
    821	info.hmc_info = dev->hmc_info;
    822	info.privileged = privileged;
    823	info.entry_type = rf->sd_type;
    824
    825	for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
    826		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
    827			info.rsrc_type = iw_hmc_obj_types[i];
    828			info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
    829			info.add_sd_cnt = 0;
    830			status = irdma_create_hmc_obj_type(dev, &info);
    831			if (status) {
    832				ibdev_dbg(to_ibdev(dev),
    833					  "ERR: create obj type %d status = %d\n",
    834					  iw_hmc_obj_types[i], status);
    835				break;
    836			}
    837		}
    838		if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
    839			break;
    840	}
    841
    842	if (!status)
    843		return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
    844							   true, true);
    845
    846	while (i) {
    847		i--;
    848		/* destroy the hmc objects of a given type */
    849		if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
    850			irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
    851						     dev->hmc_info, privileged,
    852						     false);
    853	}
    854
    855	return status;
    856}
    857
    858/**
    859 * irdma_obj_aligned_mem - get aligned memory from device allocated memory
    860 * @rf: RDMA PCI function
    861 * @memptr: points to the memory addresses
    862 * @size: size of memory needed
    863 * @mask: mask for the aligned memory
    864 *
    865 * Get aligned memory of the requested size and
    866 * update the memptr to point to the new aligned memory
    867 * Return 0 if successful, otherwise return no memory error
    868 */
    869static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
    870				 struct irdma_dma_mem *memptr, u32 size,
    871				 u32 mask)
    872{
    873	unsigned long va, newva;
    874	unsigned long extra;
    875
    876	va = (unsigned long)rf->obj_next.va;
    877	newva = va;
    878	if (mask)
    879		newva = ALIGN(va, (unsigned long)mask + 1ULL);
    880	extra = newva - va;
    881	memptr->va = (u8 *)va + extra;
    882	memptr->pa = rf->obj_next.pa + extra;
    883	memptr->size = size;
    884	if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
    885		return -ENOMEM;
    886
    887	rf->obj_next.va = (u8 *)memptr->va + size;
    888	rf->obj_next.pa = memptr->pa + size;
    889
    890	return 0;
    891}
    892
    893/**
    894 * irdma_create_cqp - create control qp
    895 * @rf: RDMA PCI function
    896 *
    897 * Return 0, if the cqp and all the resources associated with it
    898 * are successfully created, otherwise return error
    899 */
    900static int irdma_create_cqp(struct irdma_pci_f *rf)
    901{
    902	u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
    903	struct irdma_dma_mem mem;
    904	struct irdma_sc_dev *dev = &rf->sc_dev;
    905	struct irdma_cqp_init_info cqp_init_info = {};
    906	struct irdma_cqp *cqp = &rf->cqp;
    907	u16 maj_err, min_err;
    908	int i, status;
    909
    910	cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
    911	if (!cqp->cqp_requests)
    912		return -ENOMEM;
    913
    914	cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
    915	if (!cqp->scratch_array) {
    916		kfree(cqp->cqp_requests);
    917		return -ENOMEM;
    918	}
    919
    920	dev->cqp = &cqp->sc_cqp;
    921	dev->cqp->dev = dev;
    922	cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
    923			     IRDMA_CQP_ALIGNMENT);
    924	cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
    925					&cqp->sq.pa, GFP_KERNEL);
    926	if (!cqp->sq.va) {
    927		kfree(cqp->scratch_array);
    928		kfree(cqp->cqp_requests);
    929		return -ENOMEM;
    930	}
    931
    932	status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
    933				       IRDMA_HOST_CTX_ALIGNMENT_M);
    934	if (status)
    935		goto exit;
    936
    937	dev->cqp->host_ctx_pa = mem.pa;
    938	dev->cqp->host_ctx = mem.va;
    939	/* populate the cqp init info */
    940	cqp_init_info.dev = dev;
    941	cqp_init_info.sq_size = sqsize;
    942	cqp_init_info.sq = cqp->sq.va;
    943	cqp_init_info.sq_pa = cqp->sq.pa;
    944	cqp_init_info.host_ctx_pa = mem.pa;
    945	cqp_init_info.host_ctx = mem.va;
    946	cqp_init_info.hmc_profile = rf->rsrc_profile;
    947	cqp_init_info.scratch_array = cqp->scratch_array;
    948	cqp_init_info.protocol_used = rf->protocol_used;
    949
    950	switch (rf->rdma_ver) {
    951	case IRDMA_GEN_1:
    952		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
    953		break;
    954	case IRDMA_GEN_2:
    955		cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
    956		break;
    957	}
    958	status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
    959	if (status) {
    960		ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
    961		goto exit;
    962	}
    963
    964	spin_lock_init(&cqp->req_lock);
    965	spin_lock_init(&cqp->compl_lock);
    966
    967	status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
    968	if (status) {
    969		ibdev_dbg(to_ibdev(dev),
    970			  "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
    971			  status, maj_err, min_err);
    972		goto exit;
    973	}
    974
    975	INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
    976	INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
    977
    978	/* init the waitqueue of the cqp_requests and add them to the list */
    979	for (i = 0; i < sqsize; i++) {
    980		init_waitqueue_head(&cqp->cqp_requests[i].waitq);
    981		list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
    982	}
    983	init_waitqueue_head(&cqp->remove_wq);
    984	return 0;
    985
    986exit:
    987	irdma_destroy_cqp(rf, false);
    988
    989	return status;
    990}
    991
    992/**
    993 * irdma_create_ccq - create control cq
    994 * @rf: RDMA PCI function
    995 *
    996 * Return 0, if the ccq and the resources associated with it
    997 * are successfully created, otherwise return error
    998 */
    999static int irdma_create_ccq(struct irdma_pci_f *rf)
   1000{
   1001	struct irdma_sc_dev *dev = &rf->sc_dev;
   1002	struct irdma_ccq_init_info info = {};
   1003	struct irdma_ccq *ccq = &rf->ccq;
   1004	int status;
   1005
   1006	dev->ccq = &ccq->sc_cq;
   1007	dev->ccq->dev = dev;
   1008	info.dev = dev;
   1009	ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
   1010	ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
   1011				 IRDMA_CQ0_ALIGNMENT);
   1012	ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
   1013					    &ccq->mem_cq.pa, GFP_KERNEL);
   1014	if (!ccq->mem_cq.va)
   1015		return -ENOMEM;
   1016
   1017	status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
   1018				       ccq->shadow_area.size,
   1019				       IRDMA_SHADOWAREA_M);
   1020	if (status)
   1021		goto exit;
   1022
   1023	ccq->sc_cq.back_cq = ccq;
   1024	/* populate the ccq init info */
   1025	info.cq_base = ccq->mem_cq.va;
   1026	info.cq_pa = ccq->mem_cq.pa;
   1027	info.num_elem = IW_CCQ_SIZE;
   1028	info.shadow_area = ccq->shadow_area.va;
   1029	info.shadow_area_pa = ccq->shadow_area.pa;
   1030	info.ceqe_mask = false;
   1031	info.ceq_id_valid = true;
   1032	info.shadow_read_threshold = 16;
   1033	info.vsi = &rf->default_vsi;
   1034	status = irdma_sc_ccq_init(dev->ccq, &info);
   1035	if (!status)
   1036		status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
   1037exit:
   1038	if (status) {
   1039		dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
   1040				  ccq->mem_cq.va, ccq->mem_cq.pa);
   1041		ccq->mem_cq.va = NULL;
   1042	}
   1043
   1044	return status;
   1045}
   1046
   1047/**
   1048 * irdma_alloc_set_mac - set up a mac address table entry
   1049 * @iwdev: irdma device
   1050 *
   1051 * Allocate a mac ip entry and add it to the hw table Return 0
   1052 * if successful, otherwise return error
   1053 */
   1054static int irdma_alloc_set_mac(struct irdma_device *iwdev)
   1055{
   1056	int status;
   1057
   1058	status = irdma_alloc_local_mac_entry(iwdev->rf,
   1059					     &iwdev->mac_ip_table_idx);
   1060	if (!status) {
   1061		status = irdma_add_local_mac_entry(iwdev->rf,
   1062						   (const u8 *)iwdev->netdev->dev_addr,
   1063						   (u8)iwdev->mac_ip_table_idx);
   1064		if (status)
   1065			irdma_del_local_mac_entry(iwdev->rf,
   1066						  (u8)iwdev->mac_ip_table_idx);
   1067	}
   1068	return status;
   1069}
   1070
   1071/**
   1072 * irdma_cfg_ceq_vector - set up the msix interrupt vector for
   1073 * ceq
   1074 * @rf: RDMA PCI function
   1075 * @iwceq: ceq associated with the vector
   1076 * @ceq_id: the id number of the iwceq
   1077 * @msix_vec: interrupt vector information
   1078 *
   1079 * Allocate interrupt resources and enable irq handling
   1080 * Return 0 if successful, otherwise return error
   1081 */
   1082static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
   1083				u32 ceq_id, struct irdma_msix_vector *msix_vec)
   1084{
   1085	int status;
   1086
   1087	if (rf->msix_shared && !ceq_id) {
   1088		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
   1089		status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
   1090				     "AEQCEQ", rf);
   1091	} else {
   1092		tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
   1093
   1094		status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
   1095				     "CEQ", iwceq);
   1096	}
   1097	cpumask_clear(&msix_vec->mask);
   1098	cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
   1099	irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
   1100	if (status) {
   1101		ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
   1102		return status;
   1103	}
   1104
   1105	msix_vec->ceq_id = ceq_id;
   1106	rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
   1107
   1108	return 0;
   1109}
   1110
   1111/**
   1112 * irdma_cfg_aeq_vector - set up the msix vector for aeq
   1113 * @rf: RDMA PCI function
   1114 *
   1115 * Allocate interrupt resources and enable irq handling
   1116 * Return 0 if successful, otherwise return error
   1117 */
   1118static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
   1119{
   1120	struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
   1121	u32 ret = 0;
   1122
   1123	if (!rf->msix_shared) {
   1124		tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
   1125		ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
   1126				  "irdma", rf);
   1127	}
   1128	if (ret) {
   1129		ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
   1130		return -EINVAL;
   1131	}
   1132
   1133	rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
   1134
   1135	return 0;
   1136}
   1137
   1138/**
   1139 * irdma_create_ceq - create completion event queue
   1140 * @rf: RDMA PCI function
   1141 * @iwceq: pointer to the ceq resources to be created
   1142 * @ceq_id: the id number of the iwceq
   1143 * @vsi: SC vsi struct
   1144 *
   1145 * Return 0, if the ceq and the resources associated with it
   1146 * are successfully created, otherwise return error
   1147 */
   1148static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
   1149			    u32 ceq_id, struct irdma_sc_vsi *vsi)
   1150{
   1151	int status;
   1152	struct irdma_ceq_init_info info = {};
   1153	struct irdma_sc_dev *dev = &rf->sc_dev;
   1154	u64 scratch;
   1155	u32 ceq_size;
   1156
   1157	info.ceq_id = ceq_id;
   1158	iwceq->rf = rf;
   1159	ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
   1160		       dev->hw_attrs.max_hw_ceq_size);
   1161	iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
   1162				IRDMA_CEQ_ALIGNMENT);
   1163	iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
   1164					   &iwceq->mem.pa, GFP_KERNEL);
   1165	if (!iwceq->mem.va)
   1166		return -ENOMEM;
   1167
   1168	info.ceq_id = ceq_id;
   1169	info.ceqe_base = iwceq->mem.va;
   1170	info.ceqe_pa = iwceq->mem.pa;
   1171	info.elem_cnt = ceq_size;
   1172	iwceq->sc_ceq.ceq_id = ceq_id;
   1173	info.dev = dev;
   1174	info.vsi = vsi;
   1175	scratch = (uintptr_t)&rf->cqp.sc_cqp;
   1176	status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
   1177	if (!status) {
   1178		if (dev->ceq_valid)
   1179			status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
   1180						   IRDMA_OP_CEQ_CREATE);
   1181		else
   1182			status = irdma_sc_cceq_create(&iwceq->sc_ceq, scratch);
   1183	}
   1184
   1185	if (status) {
   1186		dma_free_coherent(dev->hw->device, iwceq->mem.size,
   1187				  iwceq->mem.va, iwceq->mem.pa);
   1188		iwceq->mem.va = NULL;
   1189	}
   1190
   1191	return status;
   1192}
   1193
   1194/**
   1195 * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
   1196 * @rf: RDMA PCI function
   1197 *
   1198 * Allocate a list for all device completion event queues
   1199 * Create the ceq 0 and configure it's msix interrupt vector
   1200 * Return 0, if successfully set up, otherwise return error
   1201 */
   1202static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
   1203{
   1204	struct irdma_ceq *iwceq;
   1205	struct irdma_msix_vector *msix_vec;
   1206	u32 i;
   1207	int status = 0;
   1208	u32 num_ceqs;
   1209
   1210	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
   1211	rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
   1212	if (!rf->ceqlist) {
   1213		status = -ENOMEM;
   1214		goto exit;
   1215	}
   1216
   1217	iwceq = &rf->ceqlist[0];
   1218	status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
   1219	if (status) {
   1220		ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
   1221			  status);
   1222		goto exit;
   1223	}
   1224
   1225	spin_lock_init(&iwceq->ce_lock);
   1226	i = rf->msix_shared ? 0 : 1;
   1227	msix_vec = &rf->iw_msixtbl[i];
   1228	iwceq->irq = msix_vec->irq;
   1229	iwceq->msix_idx = msix_vec->idx;
   1230	status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
   1231	if (status) {
   1232		irdma_destroy_ceq(rf, iwceq);
   1233		goto exit;
   1234	}
   1235
   1236	irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
   1237	rf->ceqs_count++;
   1238
   1239exit:
   1240	if (status && !rf->ceqs_count) {
   1241		kfree(rf->ceqlist);
   1242		rf->ceqlist = NULL;
   1243		return status;
   1244	}
   1245	rf->sc_dev.ceq_valid = true;
   1246
   1247	return 0;
   1248}
   1249
   1250/**
   1251 * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
   1252 * @rf: RDMA PCI function
   1253 * @vsi: VSI structure for this CEQ
   1254 *
   1255 * Allocate a list for all device completion event queues
   1256 * Create the ceq's and configure their msix interrupt vectors
   1257 * Return 0, if ceqs are successfully set up, otherwise return error
   1258 */
   1259static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
   1260{
   1261	u32 i;
   1262	u32 ceq_id;
   1263	struct irdma_ceq *iwceq;
   1264	struct irdma_msix_vector *msix_vec;
   1265	int status;
   1266	u32 num_ceqs;
   1267
   1268	num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
   1269	i = (rf->msix_shared) ? 1 : 2;
   1270	for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
   1271		iwceq = &rf->ceqlist[ceq_id];
   1272		status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
   1273		if (status) {
   1274			ibdev_dbg(&rf->iwdev->ibdev,
   1275				  "ERR: create ceq status = %d\n", status);
   1276			goto del_ceqs;
   1277		}
   1278		spin_lock_init(&iwceq->ce_lock);
   1279		msix_vec = &rf->iw_msixtbl[i];
   1280		iwceq->irq = msix_vec->irq;
   1281		iwceq->msix_idx = msix_vec->idx;
   1282		status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
   1283		if (status) {
   1284			irdma_destroy_ceq(rf, iwceq);
   1285			goto del_ceqs;
   1286		}
   1287		irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
   1288		rf->ceqs_count++;
   1289	}
   1290
   1291	return 0;
   1292
   1293del_ceqs:
   1294	irdma_del_ceqs(rf);
   1295
   1296	return status;
   1297}
   1298
   1299static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
   1300{
   1301	struct irdma_aeq *aeq = &rf->aeq;
   1302	dma_addr_t *pg_arr;
   1303	u32 pg_cnt;
   1304	int status;
   1305
   1306	if (rf->rdma_ver < IRDMA_GEN_2)
   1307		return -EOPNOTSUPP;
   1308
   1309	aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
   1310	aeq->mem.va = vzalloc(aeq->mem.size);
   1311
   1312	if (!aeq->mem.va)
   1313		return -ENOMEM;
   1314
   1315	pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
   1316	status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
   1317	if (status) {
   1318		vfree(aeq->mem.va);
   1319		return status;
   1320	}
   1321
   1322	pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
   1323	status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
   1324	if (status) {
   1325		irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
   1326		vfree(aeq->mem.va);
   1327		return status;
   1328	}
   1329
   1330	return 0;
   1331}
   1332
   1333/**
   1334 * irdma_create_aeq - create async event queue
   1335 * @rf: RDMA PCI function
   1336 *
   1337 * Return 0, if the aeq and the resources associated with it
   1338 * are successfully created, otherwise return error
   1339 */
   1340static int irdma_create_aeq(struct irdma_pci_f *rf)
   1341{
   1342	struct irdma_aeq_init_info info = {};
   1343	struct irdma_sc_dev *dev = &rf->sc_dev;
   1344	struct irdma_aeq *aeq = &rf->aeq;
   1345	struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
   1346	u32 aeq_size;
   1347	u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
   1348	int status;
   1349
   1350	aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
   1351		   hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
   1352	aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
   1353
   1354	aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
   1355			      IRDMA_AEQ_ALIGNMENT);
   1356	aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
   1357					 &aeq->mem.pa,
   1358					 GFP_KERNEL | __GFP_NOWARN);
   1359	if (aeq->mem.va)
   1360		goto skip_virt_aeq;
   1361
   1362	/* physically mapped aeq failed. setup virtual aeq */
   1363	status = irdma_create_virt_aeq(rf, aeq_size);
   1364	if (status)
   1365		return status;
   1366
   1367	info.virtual_map = true;
   1368	aeq->virtual_map = info.virtual_map;
   1369	info.pbl_chunk_size = 1;
   1370	info.first_pm_pbl_idx = aeq->palloc.level1.idx;
   1371
   1372skip_virt_aeq:
   1373	info.aeqe_base = aeq->mem.va;
   1374	info.aeq_elem_pa = aeq->mem.pa;
   1375	info.elem_cnt = aeq_size;
   1376	info.dev = dev;
   1377	info.msix_idx = rf->iw_msixtbl->idx;
   1378	status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
   1379	if (status)
   1380		goto err;
   1381
   1382	status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
   1383	if (status)
   1384		goto err;
   1385
   1386	return 0;
   1387
   1388err:
   1389	if (aeq->virtual_map) {
   1390		irdma_destroy_virt_aeq(rf);
   1391	} else {
   1392		dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
   1393				  aeq->mem.pa);
   1394		aeq->mem.va = NULL;
   1395	}
   1396
   1397	return status;
   1398}
   1399
   1400/**
   1401 * irdma_setup_aeq - set up the device aeq
   1402 * @rf: RDMA PCI function
   1403 *
   1404 * Create the aeq and configure its msix interrupt vector
   1405 * Return 0 if successful, otherwise return error
   1406 */
   1407static int irdma_setup_aeq(struct irdma_pci_f *rf)
   1408{
   1409	struct irdma_sc_dev *dev = &rf->sc_dev;
   1410	int status;
   1411
   1412	status = irdma_create_aeq(rf);
   1413	if (status)
   1414		return status;
   1415
   1416	status = irdma_cfg_aeq_vector(rf);
   1417	if (status) {
   1418		irdma_destroy_aeq(rf);
   1419		return status;
   1420	}
   1421
   1422	if (!rf->msix_shared)
   1423		irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
   1424
   1425	return 0;
   1426}
   1427
   1428/**
   1429 * irdma_initialize_ilq - create iwarp local queue for cm
   1430 * @iwdev: irdma device
   1431 *
   1432 * Return 0 if successful, otherwise return error
   1433 */
   1434static int irdma_initialize_ilq(struct irdma_device *iwdev)
   1435{
   1436	struct irdma_puda_rsrc_info info = {};
   1437	int status;
   1438
   1439	info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
   1440	info.cq_id = 1;
   1441	info.qp_id = 1;
   1442	info.count = 1;
   1443	info.pd_id = 1;
   1444	info.abi_ver = IRDMA_ABI_VER;
   1445	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
   1446	info.rq_size = info.sq_size;
   1447	info.buf_size = 1024;
   1448	info.tx_buf_cnt = 2 * info.sq_size;
   1449	info.receive = irdma_receive_ilq;
   1450	info.xmit_complete = irdma_free_sqbuf;
   1451	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
   1452	if (status)
   1453		ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
   1454
   1455	return status;
   1456}
   1457
   1458/**
   1459 * irdma_initialize_ieq - create iwarp exception queue
   1460 * @iwdev: irdma device
   1461 *
   1462 * Return 0 if successful, otherwise return error
   1463 */
   1464static int irdma_initialize_ieq(struct irdma_device *iwdev)
   1465{
   1466	struct irdma_puda_rsrc_info info = {};
   1467	int status;
   1468
   1469	info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
   1470	info.cq_id = 2;
   1471	info.qp_id = iwdev->vsi.exception_lan_q;
   1472	info.count = 1;
   1473	info.pd_id = 2;
   1474	info.abi_ver = IRDMA_ABI_VER;
   1475	info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
   1476	info.rq_size = info.sq_size;
   1477	info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
   1478	info.tx_buf_cnt = 4096;
   1479	status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
   1480	if (status)
   1481		ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
   1482
   1483	return status;
   1484}
   1485
   1486/**
   1487 * irdma_reinitialize_ieq - destroy and re-create ieq
   1488 * @vsi: VSI structure
   1489 */
   1490void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
   1491{
   1492	struct irdma_device *iwdev = vsi->back_vsi;
   1493	struct irdma_pci_f *rf = iwdev->rf;
   1494
   1495	irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
   1496	if (irdma_initialize_ieq(iwdev)) {
   1497		iwdev->rf->reset = true;
   1498		rf->gen_ops.request_reset(rf);
   1499	}
   1500}
   1501
   1502/**
   1503 * irdma_hmc_setup - create hmc objects for the device
   1504 * @rf: RDMA PCI function
   1505 *
   1506 * Set up the device private memory space for the number and size of
   1507 * the hmc objects and create the objects
   1508 * Return 0 if successful, otherwise return error
   1509 */
   1510static int irdma_hmc_setup(struct irdma_pci_f *rf)
   1511{
   1512	int status;
   1513	u32 qpcnt;
   1514
   1515	if (rf->rdma_ver == IRDMA_GEN_1)
   1516		qpcnt = rsrc_limits_table[rf->limits_sel].qplimit * 2;
   1517	else
   1518		qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
   1519
   1520	rf->sd_type = IRDMA_SD_TYPE_DIRECT;
   1521	status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
   1522	if (status)
   1523		return status;
   1524
   1525	status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
   1526
   1527	return status;
   1528}
   1529
   1530/**
   1531 * irdma_del_init_mem - deallocate memory resources
   1532 * @rf: RDMA PCI function
   1533 */
   1534static void irdma_del_init_mem(struct irdma_pci_f *rf)
   1535{
   1536	struct irdma_sc_dev *dev = &rf->sc_dev;
   1537
   1538	kfree(dev->hmc_info->sd_table.sd_entry);
   1539	dev->hmc_info->sd_table.sd_entry = NULL;
   1540	kfree(rf->mem_rsrc);
   1541	rf->mem_rsrc = NULL;
   1542	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
   1543			  rf->obj_mem.pa);
   1544	rf->obj_mem.va = NULL;
   1545	if (rf->rdma_ver != IRDMA_GEN_1) {
   1546		kfree(rf->allocated_ws_nodes);
   1547		rf->allocated_ws_nodes = NULL;
   1548	}
   1549	kfree(rf->ceqlist);
   1550	rf->ceqlist = NULL;
   1551	kfree(rf->iw_msixtbl);
   1552	rf->iw_msixtbl = NULL;
   1553	kfree(rf->hmc_info_mem);
   1554	rf->hmc_info_mem = NULL;
   1555}
   1556
   1557/**
   1558 * irdma_initialize_dev - initialize device
   1559 * @rf: RDMA PCI function
   1560 *
   1561 * Allocate memory for the hmc objects and initialize iwdev
   1562 * Return 0 if successful, otherwise clean up the resources
   1563 * and return error
   1564 */
   1565static int irdma_initialize_dev(struct irdma_pci_f *rf)
   1566{
   1567	int status;
   1568	struct irdma_sc_dev *dev = &rf->sc_dev;
   1569	struct irdma_device_init_info info = {};
   1570	struct irdma_dma_mem mem;
   1571	u32 size;
   1572
   1573	size = sizeof(struct irdma_hmc_pble_rsrc) +
   1574	       sizeof(struct irdma_hmc_info) +
   1575	       (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
   1576
   1577	rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
   1578	if (!rf->hmc_info_mem)
   1579		return -ENOMEM;
   1580
   1581	rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
   1582	dev->hmc_info = &rf->hw.hmc;
   1583	dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
   1584				 (rf->pble_rsrc + 1);
   1585
   1586	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
   1587				       IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
   1588	if (status)
   1589		goto error;
   1590
   1591	info.fpm_query_buf_pa = mem.pa;
   1592	info.fpm_query_buf = mem.va;
   1593
   1594	status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
   1595				       IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
   1596	if (status)
   1597		goto error;
   1598
   1599	info.fpm_commit_buf_pa = mem.pa;
   1600	info.fpm_commit_buf = mem.va;
   1601
   1602	info.bar0 = rf->hw.hw_addr;
   1603	info.hmc_fn_id = rf->pf_id;
   1604	info.hw = &rf->hw;
   1605	status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
   1606	if (status)
   1607		goto error;
   1608
   1609	return status;
   1610error:
   1611	kfree(rf->hmc_info_mem);
   1612	rf->hmc_info_mem = NULL;
   1613
   1614	return status;
   1615}
   1616
   1617/**
   1618 * irdma_rt_deinit_hw - clean up the irdma device resources
   1619 * @iwdev: irdma device
   1620 *
   1621 * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
   1622 * device queues and free the pble and the hmc objects
   1623 */
   1624void irdma_rt_deinit_hw(struct irdma_device *iwdev)
   1625{
   1626	ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
   1627
   1628	switch (iwdev->init_state) {
   1629	case IP_ADDR_REGISTERED:
   1630		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
   1631			irdma_del_local_mac_entry(iwdev->rf,
   1632						  (u8)iwdev->mac_ip_table_idx);
   1633		fallthrough;
   1634	case AEQ_CREATED:
   1635	case PBLE_CHUNK_MEM:
   1636	case CEQS_CREATED:
   1637	case IEQ_CREATED:
   1638		if (!iwdev->roce_mode)
   1639			irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
   1640					     iwdev->rf->reset);
   1641		fallthrough;
   1642	case ILQ_CREATED:
   1643		if (!iwdev->roce_mode)
   1644			irdma_puda_dele_rsrc(&iwdev->vsi,
   1645					     IRDMA_PUDA_RSRC_TYPE_ILQ,
   1646					     iwdev->rf->reset);
   1647		break;
   1648	default:
   1649		ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
   1650		break;
   1651	}
   1652
   1653	irdma_cleanup_cm_core(&iwdev->cm_core);
   1654	if (iwdev->vsi.pestat) {
   1655		irdma_vsi_stats_free(&iwdev->vsi);
   1656		kfree(iwdev->vsi.pestat);
   1657	}
   1658	if (iwdev->cleanup_wq)
   1659		destroy_workqueue(iwdev->cleanup_wq);
   1660}
   1661
   1662static int irdma_setup_init_state(struct irdma_pci_f *rf)
   1663{
   1664	int status;
   1665
   1666	status = irdma_save_msix_info(rf);
   1667	if (status)
   1668		return status;
   1669
   1670	rf->hw.device = &rf->pcidev->dev;
   1671	rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
   1672	rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
   1673					    &rf->obj_mem.pa, GFP_KERNEL);
   1674	if (!rf->obj_mem.va) {
   1675		status = -ENOMEM;
   1676		goto clean_msixtbl;
   1677	}
   1678
   1679	rf->obj_next = rf->obj_mem;
   1680	status = irdma_initialize_dev(rf);
   1681	if (status)
   1682		goto clean_obj_mem;
   1683
   1684	return 0;
   1685
   1686clean_obj_mem:
   1687	dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
   1688			  rf->obj_mem.pa);
   1689	rf->obj_mem.va = NULL;
   1690clean_msixtbl:
   1691	kfree(rf->iw_msixtbl);
   1692	rf->iw_msixtbl = NULL;
   1693	return status;
   1694}
   1695
   1696/**
   1697 * irdma_get_used_rsrc - determine resources used internally
   1698 * @iwdev: irdma device
   1699 *
   1700 * Called at the end of open to get all internal allocations
   1701 */
   1702static void irdma_get_used_rsrc(struct irdma_device *iwdev)
   1703{
   1704	iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
   1705						 iwdev->rf->max_pd);
   1706	iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
   1707						 iwdev->rf->max_qp);
   1708	iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
   1709						 iwdev->rf->max_cq);
   1710	iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
   1711						 iwdev->rf->max_mr);
   1712}
   1713
   1714void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
   1715{
   1716	enum init_completion_state state = rf->init_state;
   1717
   1718	rf->init_state = INVALID_STATE;
   1719	if (rf->rsrc_created) {
   1720		irdma_destroy_aeq(rf);
   1721		irdma_destroy_pble_prm(rf->pble_rsrc);
   1722		irdma_del_ceqs(rf);
   1723		rf->rsrc_created = false;
   1724	}
   1725	switch (state) {
   1726	case CEQ0_CREATED:
   1727		irdma_del_ceq_0(rf);
   1728		fallthrough;
   1729	case CCQ_CREATED:
   1730		irdma_destroy_ccq(rf);
   1731		fallthrough;
   1732	case HW_RSRC_INITIALIZED:
   1733	case HMC_OBJS_CREATED:
   1734		irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
   1735				      rf->reset, rf->rdma_ver);
   1736		fallthrough;
   1737	case CQP_CREATED:
   1738		irdma_destroy_cqp(rf, true);
   1739		fallthrough;
   1740	case INITIAL_STATE:
   1741		irdma_del_init_mem(rf);
   1742		break;
   1743	case INVALID_STATE:
   1744	default:
   1745		ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
   1746		break;
   1747	}
   1748}
   1749
   1750/**
   1751 * irdma_rt_init_hw - Initializes runtime portion of HW
   1752 * @iwdev: irdma device
   1753 * @l2params: qos, tc, mtu info from netdev driver
   1754 *
   1755 * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
   1756 * device resource objects.
   1757 */
   1758int irdma_rt_init_hw(struct irdma_device *iwdev,
   1759		     struct irdma_l2params *l2params)
   1760{
   1761	struct irdma_pci_f *rf = iwdev->rf;
   1762	struct irdma_sc_dev *dev = &rf->sc_dev;
   1763	struct irdma_vsi_init_info vsi_info = {};
   1764	struct irdma_vsi_stats_info stats_info = {};
   1765	int status;
   1766
   1767	vsi_info.dev = dev;
   1768	vsi_info.back_vsi = iwdev;
   1769	vsi_info.params = l2params;
   1770	vsi_info.pf_data_vsi_num = iwdev->vsi_num;
   1771	vsi_info.register_qset = rf->gen_ops.register_qset;
   1772	vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
   1773	vsi_info.exception_lan_q = 2;
   1774	irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
   1775
   1776	status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
   1777	if (status)
   1778		return status;
   1779
   1780	stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
   1781	if (!stats_info.pestat) {
   1782		irdma_cleanup_cm_core(&iwdev->cm_core);
   1783		return -ENOMEM;
   1784	}
   1785	stats_info.fcn_id = dev->hmc_fn_id;
   1786	status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
   1787	if (status) {
   1788		irdma_cleanup_cm_core(&iwdev->cm_core);
   1789		kfree(stats_info.pestat);
   1790		return status;
   1791	}
   1792
   1793	do {
   1794		if (!iwdev->roce_mode) {
   1795			status = irdma_initialize_ilq(iwdev);
   1796			if (status)
   1797				break;
   1798			iwdev->init_state = ILQ_CREATED;
   1799			status = irdma_initialize_ieq(iwdev);
   1800			if (status)
   1801				break;
   1802			iwdev->init_state = IEQ_CREATED;
   1803		}
   1804		if (!rf->rsrc_created) {
   1805			status = irdma_setup_ceqs(rf, &iwdev->vsi);
   1806			if (status)
   1807				break;
   1808
   1809			iwdev->init_state = CEQS_CREATED;
   1810
   1811			status = irdma_hmc_init_pble(&rf->sc_dev,
   1812						     rf->pble_rsrc);
   1813			if (status) {
   1814				irdma_del_ceqs(rf);
   1815				break;
   1816			}
   1817
   1818			iwdev->init_state = PBLE_CHUNK_MEM;
   1819
   1820			status = irdma_setup_aeq(rf);
   1821			if (status) {
   1822				irdma_destroy_pble_prm(rf->pble_rsrc);
   1823				irdma_del_ceqs(rf);
   1824				break;
   1825			}
   1826			iwdev->init_state = AEQ_CREATED;
   1827			rf->rsrc_created = true;
   1828		}
   1829
   1830		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
   1831			irdma_alloc_set_mac(iwdev);
   1832		irdma_add_ip(iwdev);
   1833		iwdev->init_state = IP_ADDR_REGISTERED;
   1834
   1835		/* handles asynch cleanup tasks - disconnect CM , free qp,
   1836		 * free cq bufs
   1837		 */
   1838		iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
   1839					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
   1840		if (!iwdev->cleanup_wq)
   1841			return -ENOMEM;
   1842		irdma_get_used_rsrc(iwdev);
   1843		init_waitqueue_head(&iwdev->suspend_wq);
   1844
   1845		return 0;
   1846	} while (0);
   1847
   1848	dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
   1849		status, iwdev->init_state);
   1850	irdma_rt_deinit_hw(iwdev);
   1851
   1852	return status;
   1853}
   1854
   1855/**
   1856 * irdma_ctrl_init_hw - Initializes control portion of HW
   1857 * @rf: RDMA PCI function
   1858 *
   1859 * Create admin queues, HMC obejcts and RF resource objects
   1860 */
   1861int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
   1862{
   1863	struct irdma_sc_dev *dev = &rf->sc_dev;
   1864	int status;
   1865	do {
   1866		status = irdma_setup_init_state(rf);
   1867		if (status)
   1868			break;
   1869		rf->init_state = INITIAL_STATE;
   1870
   1871		status = irdma_create_cqp(rf);
   1872		if (status)
   1873			break;
   1874		rf->init_state = CQP_CREATED;
   1875
   1876		status = irdma_hmc_setup(rf);
   1877		if (status)
   1878			break;
   1879		rf->init_state = HMC_OBJS_CREATED;
   1880
   1881		status = irdma_initialize_hw_rsrc(rf);
   1882		if (status)
   1883			break;
   1884		rf->init_state = HW_RSRC_INITIALIZED;
   1885
   1886		status = irdma_create_ccq(rf);
   1887		if (status)
   1888			break;
   1889		rf->init_state = CCQ_CREATED;
   1890
   1891		dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
   1892		if (rf->rdma_ver != IRDMA_GEN_1) {
   1893			status = irdma_get_rdma_features(dev);
   1894			if (status)
   1895				break;
   1896		}
   1897
   1898		status = irdma_setup_ceq_0(rf);
   1899		if (status)
   1900			break;
   1901		rf->init_state = CEQ0_CREATED;
   1902		/* Handles processing of CQP completions */
   1903		rf->cqp_cmpl_wq = alloc_ordered_workqueue("cqp_cmpl_wq",
   1904						WQ_HIGHPRI | WQ_UNBOUND);
   1905		if (!rf->cqp_cmpl_wq) {
   1906			status = -ENOMEM;
   1907			break;
   1908		}
   1909		INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
   1910		irdma_sc_ccq_arm(dev->ccq);
   1911		return 0;
   1912	} while (0);
   1913
   1914	dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
   1915		rf->init_state, status);
   1916	irdma_ctrl_deinit_hw(rf);
   1917	return status;
   1918}
   1919
   1920/**
   1921 * irdma_set_hw_rsrc - set hw memory resources.
   1922 * @rf: RDMA PCI function
   1923 */
   1924static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
   1925{
   1926	rf->allocated_qps = (void *)(rf->mem_rsrc +
   1927		   (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
   1928	rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
   1929	rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
   1930	rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
   1931	rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
   1932	rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
   1933	rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
   1934	rf->qp_table = (struct irdma_qp **)
   1935		(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
   1936
   1937	spin_lock_init(&rf->rsrc_lock);
   1938	spin_lock_init(&rf->arp_lock);
   1939	spin_lock_init(&rf->qptable_lock);
   1940	spin_lock_init(&rf->qh_list_lock);
   1941}
   1942
   1943/**
   1944 * irdma_calc_mem_rsrc_size - calculate memory resources size.
   1945 * @rf: RDMA PCI function
   1946 */
   1947static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
   1948{
   1949	u32 rsrc_size;
   1950
   1951	rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
   1952	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
   1953	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
   1954	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
   1955	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
   1956	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
   1957	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
   1958	rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
   1959	rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
   1960
   1961	return rsrc_size;
   1962}
   1963
   1964/**
   1965 * irdma_initialize_hw_rsrc - initialize hw resource tracking array
   1966 * @rf: RDMA PCI function
   1967 */
   1968u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
   1969{
   1970	u32 rsrc_size;
   1971	u32 mrdrvbits;
   1972	u32 ret;
   1973
   1974	if (rf->rdma_ver != IRDMA_GEN_1) {
   1975		rf->allocated_ws_nodes =
   1976			kcalloc(BITS_TO_LONGS(IRDMA_MAX_WS_NODES),
   1977				sizeof(unsigned long), GFP_KERNEL);
   1978		if (!rf->allocated_ws_nodes)
   1979			return -ENOMEM;
   1980
   1981		set_bit(0, rf->allocated_ws_nodes);
   1982		rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
   1983	}
   1984	rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
   1985	rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
   1986	rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
   1987	rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
   1988	rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
   1989	rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
   1990	rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
   1991	rf->max_mcg = rf->max_qp;
   1992
   1993	rsrc_size = irdma_calc_mem_rsrc_size(rf);
   1994	rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
   1995	if (!rf->mem_rsrc) {
   1996		ret = -ENOMEM;
   1997		goto mem_rsrc_kzalloc_fail;
   1998	}
   1999
   2000	rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
   2001
   2002	irdma_set_hw_rsrc(rf);
   2003
   2004	set_bit(0, rf->allocated_mrs);
   2005	set_bit(0, rf->allocated_qps);
   2006	set_bit(0, rf->allocated_cqs);
   2007	set_bit(0, rf->allocated_pds);
   2008	set_bit(0, rf->allocated_arps);
   2009	set_bit(0, rf->allocated_ahs);
   2010	set_bit(0, rf->allocated_mcgs);
   2011	set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
   2012	set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
   2013	set_bit(1, rf->allocated_cqs);
   2014	set_bit(1, rf->allocated_pds);
   2015	set_bit(2, rf->allocated_cqs);
   2016	set_bit(2, rf->allocated_pds);
   2017
   2018	INIT_LIST_HEAD(&rf->mc_qht_list.list);
   2019	/* stag index mask has a minimum of 14 bits */
   2020	mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
   2021	rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
   2022
   2023	return 0;
   2024
   2025mem_rsrc_kzalloc_fail:
   2026	kfree(rf->allocated_ws_nodes);
   2027	rf->allocated_ws_nodes = NULL;
   2028
   2029	return ret;
   2030}
   2031
   2032/**
   2033 * irdma_cqp_ce_handler - handle cqp completions
   2034 * @rf: RDMA PCI function
   2035 * @cq: cq for cqp completions
   2036 */
   2037void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
   2038{
   2039	struct irdma_cqp_request *cqp_request;
   2040	struct irdma_sc_dev *dev = &rf->sc_dev;
   2041	u32 cqe_count = 0;
   2042	struct irdma_ccq_cqe_info info;
   2043	unsigned long flags;
   2044	int ret;
   2045
   2046	do {
   2047		memset(&info, 0, sizeof(info));
   2048		spin_lock_irqsave(&rf->cqp.compl_lock, flags);
   2049		ret = irdma_sc_ccq_get_cqe_info(cq, &info);
   2050		spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
   2051		if (ret)
   2052			break;
   2053
   2054		cqp_request = (struct irdma_cqp_request *)
   2055			      (unsigned long)info.scratch;
   2056		if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
   2057						     info.maj_err_code,
   2058						     info.min_err_code))
   2059			ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
   2060				  info.op_code, info.maj_err_code, info.min_err_code);
   2061		if (cqp_request) {
   2062			cqp_request->compl_info.maj_err_code = info.maj_err_code;
   2063			cqp_request->compl_info.min_err_code = info.min_err_code;
   2064			cqp_request->compl_info.op_ret_val = info.op_ret_val;
   2065			cqp_request->compl_info.error = info.error;
   2066
   2067			if (cqp_request->waiting) {
   2068				cqp_request->request_done = true;
   2069				wake_up(&cqp_request->waitq);
   2070				irdma_put_cqp_request(&rf->cqp, cqp_request);
   2071			} else {
   2072				if (cqp_request->callback_fcn)
   2073					cqp_request->callback_fcn(cqp_request);
   2074				irdma_put_cqp_request(&rf->cqp, cqp_request);
   2075			}
   2076		}
   2077
   2078		cqe_count++;
   2079	} while (1);
   2080
   2081	if (cqe_count) {
   2082		irdma_process_bh(dev);
   2083		irdma_sc_ccq_arm(cq);
   2084	}
   2085}
   2086
   2087/**
   2088 * cqp_compl_worker - Handle cqp completions
   2089 * @work: Pointer to work structure
   2090 */
   2091void cqp_compl_worker(struct work_struct *work)
   2092{
   2093	struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
   2094					      cqp_cmpl_work);
   2095	struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
   2096
   2097	irdma_cqp_ce_handler(rf, cq);
   2098}
   2099
   2100/**
   2101 * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
   2102 * @cm_core: cm's core
   2103 * @port: port to identify apbvt entry
   2104 */
   2105static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
   2106							  u16 port)
   2107{
   2108	struct irdma_apbvt_entry *entry;
   2109
   2110	hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
   2111		if (entry->port == port) {
   2112			entry->use_cnt++;
   2113			return entry;
   2114		}
   2115	}
   2116
   2117	return NULL;
   2118}
   2119
   2120/**
   2121 * irdma_next_iw_state - modify qp state
   2122 * @iwqp: iwarp qp to modify
   2123 * @state: next state for qp
   2124 * @del_hash: del hash
   2125 * @term: term message
   2126 * @termlen: length of term message
   2127 */
   2128void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
   2129			 u8 termlen)
   2130{
   2131	struct irdma_modify_qp_info info = {};
   2132
   2133	info.next_iwarp_state = state;
   2134	info.remove_hash_idx = del_hash;
   2135	info.cq_num_valid = true;
   2136	info.arp_cache_idx_valid = true;
   2137	info.dont_send_term = true;
   2138	info.dont_send_fin = true;
   2139	info.termlen = termlen;
   2140
   2141	if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
   2142		info.dont_send_term = false;
   2143	if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
   2144		info.dont_send_fin = false;
   2145	if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
   2146		info.reset_tcp_conn = true;
   2147	iwqp->hw_iwarp_state = state;
   2148	irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
   2149	iwqp->iwarp_state = info.next_iwarp_state;
   2150}
   2151
   2152/**
   2153 * irdma_del_local_mac_entry - remove a mac entry from the hw
   2154 * table
   2155 * @rf: RDMA PCI function
   2156 * @idx: the index of the mac ip address to delete
   2157 */
   2158void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
   2159{
   2160	struct irdma_cqp *iwcqp = &rf->cqp;
   2161	struct irdma_cqp_request *cqp_request;
   2162	struct cqp_cmds_info *cqp_info;
   2163
   2164	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
   2165	if (!cqp_request)
   2166		return;
   2167
   2168	cqp_info = &cqp_request->info;
   2169	cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
   2170	cqp_info->post_sq = 1;
   2171	cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
   2172	cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
   2173	cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
   2174	cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
   2175
   2176	irdma_handle_cqp_op(rf, cqp_request);
   2177	irdma_put_cqp_request(iwcqp, cqp_request);
   2178}
   2179
   2180/**
   2181 * irdma_add_local_mac_entry - add a mac ip address entry to the
   2182 * hw table
   2183 * @rf: RDMA PCI function
   2184 * @mac_addr: pointer to mac address
   2185 * @idx: the index of the mac ip address to add
   2186 */
   2187int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
   2188{
   2189	struct irdma_local_mac_entry_info *info;
   2190	struct irdma_cqp *iwcqp = &rf->cqp;
   2191	struct irdma_cqp_request *cqp_request;
   2192	struct cqp_cmds_info *cqp_info;
   2193	int status;
   2194
   2195	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
   2196	if (!cqp_request)
   2197		return -ENOMEM;
   2198
   2199	cqp_info = &cqp_request->info;
   2200	cqp_info->post_sq = 1;
   2201	info = &cqp_info->in.u.add_local_mac_entry.info;
   2202	ether_addr_copy(info->mac_addr, mac_addr);
   2203	info->entry_idx = idx;
   2204	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
   2205	cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
   2206	cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
   2207	cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
   2208
   2209	status = irdma_handle_cqp_op(rf, cqp_request);
   2210	irdma_put_cqp_request(iwcqp, cqp_request);
   2211
   2212	return status;
   2213}
   2214
   2215/**
   2216 * irdma_alloc_local_mac_entry - allocate a mac entry
   2217 * @rf: RDMA PCI function
   2218 * @mac_tbl_idx: the index of the new mac address
   2219 *
   2220 * Allocate a mac address entry and update the mac_tbl_idx
   2221 * to hold the index of the newly created mac address
   2222 * Return 0 if successful, otherwise return error
   2223 */
   2224int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
   2225{
   2226	struct irdma_cqp *iwcqp = &rf->cqp;
   2227	struct irdma_cqp_request *cqp_request;
   2228	struct cqp_cmds_info *cqp_info;
   2229	int status = 0;
   2230
   2231	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
   2232	if (!cqp_request)
   2233		return -ENOMEM;
   2234
   2235	cqp_info = &cqp_request->info;
   2236	cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
   2237	cqp_info->post_sq = 1;
   2238	cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
   2239	cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
   2240	status = irdma_handle_cqp_op(rf, cqp_request);
   2241	if (!status)
   2242		*mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
   2243
   2244	irdma_put_cqp_request(iwcqp, cqp_request);
   2245
   2246	return status;
   2247}
   2248
   2249/**
   2250 * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
   2251 * @iwdev: irdma device
   2252 * @accel_local_port: port for apbvt
   2253 * @add_port: add ordelete port
   2254 */
   2255static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
   2256				      u16 accel_local_port, bool add_port)
   2257{
   2258	struct irdma_apbvt_info *info;
   2259	struct irdma_cqp_request *cqp_request;
   2260	struct cqp_cmds_info *cqp_info;
   2261	int status;
   2262
   2263	cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
   2264	if (!cqp_request)
   2265		return -ENOMEM;
   2266
   2267	cqp_info = &cqp_request->info;
   2268	info = &cqp_info->in.u.manage_apbvt_entry.info;
   2269	memset(info, 0, sizeof(*info));
   2270	info->add = add_port;
   2271	info->port = accel_local_port;
   2272	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
   2273	cqp_info->post_sq = 1;
   2274	cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
   2275	cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
   2276	ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
   2277		  (!add_port) ? "DELETE" : "ADD", accel_local_port);
   2278
   2279	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
   2280	irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
   2281
   2282	return status;
   2283}
   2284
   2285/**
   2286 * irdma_add_apbvt - add tcp port to HW apbvt table
   2287 * @iwdev: irdma device
   2288 * @port: port for apbvt
   2289 */
   2290struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
   2291{
   2292	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   2293	struct irdma_apbvt_entry *entry;
   2294	unsigned long flags;
   2295
   2296	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
   2297	entry = irdma_lookup_apbvt_entry(cm_core, port);
   2298	if (entry) {
   2299		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
   2300		return entry;
   2301	}
   2302
   2303	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
   2304	if (!entry) {
   2305		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
   2306		return NULL;
   2307	}
   2308
   2309	entry->port = port;
   2310	entry->use_cnt = 1;
   2311	hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
   2312	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
   2313
   2314	if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
   2315		kfree(entry);
   2316		return NULL;
   2317	}
   2318
   2319	return entry;
   2320}
   2321
   2322/**
   2323 * irdma_del_apbvt - delete tcp port from HW apbvt table
   2324 * @iwdev: irdma device
   2325 * @entry: apbvt entry object
   2326 */
   2327void irdma_del_apbvt(struct irdma_device *iwdev,
   2328		     struct irdma_apbvt_entry *entry)
   2329{
   2330	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   2331	unsigned long flags;
   2332
   2333	spin_lock_irqsave(&cm_core->apbvt_lock, flags);
   2334	if (--entry->use_cnt) {
   2335		spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
   2336		return;
   2337	}
   2338
   2339	hash_del(&entry->hlist);
   2340	/* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
   2341	 * protect against race where add APBVT CQP can race ahead of the delete
   2342	 * APBVT for same port.
   2343	 */
   2344	irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
   2345	kfree(entry);
   2346	spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
   2347}
   2348
   2349/**
   2350 * irdma_manage_arp_cache - manage hw arp cache
   2351 * @rf: RDMA PCI function
   2352 * @mac_addr: mac address ptr
   2353 * @ip_addr: ip addr for arp cache
   2354 * @ipv4: flag inicating IPv4
   2355 * @action: add, delete or modify
   2356 */
   2357void irdma_manage_arp_cache(struct irdma_pci_f *rf,
   2358			    const unsigned char *mac_addr,
   2359			    u32 *ip_addr, bool ipv4, u32 action)
   2360{
   2361	struct irdma_add_arp_cache_entry_info *info;
   2362	struct irdma_cqp_request *cqp_request;
   2363	struct cqp_cmds_info *cqp_info;
   2364	int arp_index;
   2365
   2366	arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
   2367	if (arp_index == -1)
   2368		return;
   2369
   2370	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
   2371	if (!cqp_request)
   2372		return;
   2373
   2374	cqp_info = &cqp_request->info;
   2375	if (action == IRDMA_ARP_ADD) {
   2376		cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
   2377		info = &cqp_info->in.u.add_arp_cache_entry.info;
   2378		memset(info, 0, sizeof(*info));
   2379		info->arp_index = (u16)arp_index;
   2380		info->permanent = true;
   2381		ether_addr_copy(info->mac_addr, mac_addr);
   2382		cqp_info->in.u.add_arp_cache_entry.scratch =
   2383			(uintptr_t)cqp_request;
   2384		cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
   2385	} else {
   2386		cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
   2387		cqp_info->in.u.del_arp_cache_entry.scratch =
   2388			(uintptr_t)cqp_request;
   2389		cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
   2390		cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
   2391	}
   2392
   2393	cqp_info->post_sq = 1;
   2394	irdma_handle_cqp_op(rf, cqp_request);
   2395	irdma_put_cqp_request(&rf->cqp, cqp_request);
   2396}
   2397
   2398/**
   2399 * irdma_send_syn_cqp_callback - do syn/ack after qhash
   2400 * @cqp_request: qhash cqp completion
   2401 */
   2402static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
   2403{
   2404	struct irdma_cm_node *cm_node = cqp_request->param;
   2405
   2406	irdma_send_syn(cm_node, 1);
   2407	irdma_rem_ref_cm_node(cm_node);
   2408}
   2409
   2410/**
   2411 * irdma_manage_qhash - add or modify qhash
   2412 * @iwdev: irdma device
   2413 * @cminfo: cm info for qhash
   2414 * @etype: type (syn or quad)
   2415 * @mtype: type of qhash
   2416 * @cmnode: cmnode associated with connection
   2417 * @wait: wait for completion
   2418 */
   2419int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
   2420		       enum irdma_quad_entry_type etype,
   2421		       enum irdma_quad_hash_manage_type mtype, void *cmnode,
   2422		       bool wait)
   2423{
   2424	struct irdma_qhash_table_info *info;
   2425	struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
   2426	struct irdma_cqp_request *cqp_request;
   2427	struct cqp_cmds_info *cqp_info;
   2428	struct irdma_cm_node *cm_node = cmnode;
   2429	int status;
   2430
   2431	cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
   2432	if (!cqp_request)
   2433		return -ENOMEM;
   2434
   2435	cqp_info = &cqp_request->info;
   2436	info = &cqp_info->in.u.manage_qhash_table_entry.info;
   2437	memset(info, 0, sizeof(*info));
   2438	info->vsi = &iwdev->vsi;
   2439	info->manage = mtype;
   2440	info->entry_type = etype;
   2441	if (cminfo->vlan_id < VLAN_N_VID) {
   2442		info->vlan_valid = true;
   2443		info->vlan_id = cminfo->vlan_id;
   2444	} else {
   2445		info->vlan_valid = false;
   2446	}
   2447	info->ipv4_valid = cminfo->ipv4;
   2448	info->user_pri = cminfo->user_pri;
   2449	ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
   2450	info->qp_num = cminfo->qh_qpid;
   2451	info->dest_port = cminfo->loc_port;
   2452	info->dest_ip[0] = cminfo->loc_addr[0];
   2453	info->dest_ip[1] = cminfo->loc_addr[1];
   2454	info->dest_ip[2] = cminfo->loc_addr[2];
   2455	info->dest_ip[3] = cminfo->loc_addr[3];
   2456	if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
   2457	    etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
   2458	    etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
   2459	    etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
   2460	    etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
   2461		info->src_port = cminfo->rem_port;
   2462		info->src_ip[0] = cminfo->rem_addr[0];
   2463		info->src_ip[1] = cminfo->rem_addr[1];
   2464		info->src_ip[2] = cminfo->rem_addr[2];
   2465		info->src_ip[3] = cminfo->rem_addr[3];
   2466	}
   2467	if (cmnode) {
   2468		cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
   2469		cqp_request->param = cmnode;
   2470		if (!wait)
   2471			refcount_inc(&cm_node->refcnt);
   2472	}
   2473	if (info->ipv4_valid)
   2474		ibdev_dbg(&iwdev->ibdev,
   2475			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
   2476			  (!mtype) ? "DELETE" : "ADD",
   2477			  __builtin_return_address(0), info->dest_port,
   2478			  info->src_port, info->dest_ip, info->src_ip,
   2479			  info->mac_addr, cminfo->vlan_id,
   2480			  cmnode ? cmnode : NULL);
   2481	else
   2482		ibdev_dbg(&iwdev->ibdev,
   2483			  "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
   2484			  (!mtype) ? "DELETE" : "ADD",
   2485			  __builtin_return_address(0), info->dest_port,
   2486			  info->src_port, info->dest_ip, info->src_ip,
   2487			  info->mac_addr, cminfo->vlan_id,
   2488			  cmnode ? cmnode : NULL);
   2489
   2490	cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
   2491	cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
   2492	cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
   2493	cqp_info->post_sq = 1;
   2494	status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
   2495	if (status && cm_node && !wait)
   2496		irdma_rem_ref_cm_node(cm_node);
   2497
   2498	irdma_put_cqp_request(iwcqp, cqp_request);
   2499
   2500	return status;
   2501}
   2502
   2503/**
   2504 * irdma_hw_flush_wqes_callback - Check return code after flush
   2505 * @cqp_request: qhash cqp completion
   2506 */
   2507static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
   2508{
   2509	struct irdma_qp_flush_info *hw_info;
   2510	struct irdma_sc_qp *qp;
   2511	struct irdma_qp *iwqp;
   2512	struct cqp_cmds_info *cqp_info;
   2513
   2514	cqp_info = &cqp_request->info;
   2515	hw_info = &cqp_info->in.u.qp_flush_wqes.info;
   2516	qp = cqp_info->in.u.qp_flush_wqes.qp;
   2517	iwqp = qp->qp_uk.back_qp;
   2518
   2519	if (cqp_request->compl_info.maj_err_code)
   2520		return;
   2521
   2522	if (hw_info->rq &&
   2523	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
   2524	     cqp_request->compl_info.min_err_code == 0)) {
   2525		/* RQ WQE flush was requested but did not happen */
   2526		qp->qp_uk.rq_flush_complete = true;
   2527	}
   2528	if (hw_info->sq &&
   2529	    (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
   2530	     cqp_request->compl_info.min_err_code == 0)) {
   2531		if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
   2532			ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
   2533				  qp->qp_uk.qp_id);
   2534			irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
   2535		}
   2536		qp->qp_uk.sq_flush_complete = true;
   2537	}
   2538}
   2539
   2540/**
   2541 * irdma_hw_flush_wqes - flush qp's wqe
   2542 * @rf: RDMA PCI function
   2543 * @qp: hardware control qp
   2544 * @info: info for flush
   2545 * @wait: flag wait for completion
   2546 */
   2547int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
   2548			struct irdma_qp_flush_info *info, bool wait)
   2549{
   2550	int status;
   2551	struct irdma_qp_flush_info *hw_info;
   2552	struct irdma_cqp_request *cqp_request;
   2553	struct cqp_cmds_info *cqp_info;
   2554	struct irdma_qp *iwqp = qp->qp_uk.back_qp;
   2555
   2556	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
   2557	if (!cqp_request)
   2558		return -ENOMEM;
   2559
   2560	cqp_info = &cqp_request->info;
   2561	if (!wait)
   2562		cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
   2563	hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
   2564	memcpy(hw_info, info, sizeof(*hw_info));
   2565	cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
   2566	cqp_info->post_sq = 1;
   2567	cqp_info->in.u.qp_flush_wqes.qp = qp;
   2568	cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
   2569	status = irdma_handle_cqp_op(rf, cqp_request);
   2570	if (status) {
   2571		qp->qp_uk.sq_flush_complete = true;
   2572		qp->qp_uk.rq_flush_complete = true;
   2573		irdma_put_cqp_request(&rf->cqp, cqp_request);
   2574		return status;
   2575	}
   2576
   2577	if (!wait || cqp_request->compl_info.maj_err_code)
   2578		goto put_cqp;
   2579
   2580	if (info->rq) {
   2581		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
   2582		    cqp_request->compl_info.min_err_code == 0) {
   2583			/* RQ WQE flush was requested but did not happen */
   2584			qp->qp_uk.rq_flush_complete = true;
   2585		}
   2586	}
   2587	if (info->sq) {
   2588		if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
   2589		    cqp_request->compl_info.min_err_code == 0) {
   2590			/*
   2591			 * Handling case where WQE is posted to empty SQ when
   2592			 * flush has not completed
   2593			 */
   2594			if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
   2595				struct irdma_cqp_request *new_req;
   2596
   2597				if (!qp->qp_uk.sq_flush_complete)
   2598					goto put_cqp;
   2599				qp->qp_uk.sq_flush_complete = false;
   2600				qp->flush_sq = false;
   2601
   2602				info->rq = false;
   2603				info->sq = true;
   2604				new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
   2605				if (!new_req) {
   2606					status = -ENOMEM;
   2607					goto put_cqp;
   2608				}
   2609				cqp_info = &new_req->info;
   2610				hw_info = &new_req->info.in.u.qp_flush_wqes.info;
   2611				memcpy(hw_info, info, sizeof(*hw_info));
   2612				cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
   2613				cqp_info->post_sq = 1;
   2614				cqp_info->in.u.qp_flush_wqes.qp = qp;
   2615				cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
   2616
   2617				status = irdma_handle_cqp_op(rf, new_req);
   2618				if (new_req->compl_info.maj_err_code ||
   2619				    new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
   2620				    status) {
   2621					ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
   2622						  iwqp->ibqp.qp_num);
   2623					qp->qp_uk.sq_flush_complete = false;
   2624					irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
   2625				}
   2626				irdma_put_cqp_request(&rf->cqp, new_req);
   2627			} else {
   2628				/* SQ WQE flush was requested but did not happen */
   2629				qp->qp_uk.sq_flush_complete = true;
   2630			}
   2631		} else {
   2632			if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
   2633				qp->qp_uk.sq_flush_complete = true;
   2634		}
   2635	}
   2636
   2637	ibdev_dbg(&rf->iwdev->ibdev,
   2638		  "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
   2639		  iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
   2640		  iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
   2641		  cqp_request->compl_info.maj_err_code,
   2642		  cqp_request->compl_info.min_err_code);
   2643put_cqp:
   2644	irdma_put_cqp_request(&rf->cqp, cqp_request);
   2645
   2646	return status;
   2647}
   2648
   2649/**
   2650 * irdma_gen_ae - generate AE
   2651 * @rf: RDMA PCI function
   2652 * @qp: qp associated with AE
   2653 * @info: info for ae
   2654 * @wait: wait for completion
   2655 */
   2656void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
   2657		  struct irdma_gen_ae_info *info, bool wait)
   2658{
   2659	struct irdma_gen_ae_info *ae_info;
   2660	struct irdma_cqp_request *cqp_request;
   2661	struct cqp_cmds_info *cqp_info;
   2662
   2663	cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
   2664	if (!cqp_request)
   2665		return;
   2666
   2667	cqp_info = &cqp_request->info;
   2668	ae_info = &cqp_request->info.in.u.gen_ae.info;
   2669	memcpy(ae_info, info, sizeof(*ae_info));
   2670	cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
   2671	cqp_info->post_sq = 1;
   2672	cqp_info->in.u.gen_ae.qp = qp;
   2673	cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
   2674
   2675	irdma_handle_cqp_op(rf, cqp_request);
   2676	irdma_put_cqp_request(&rf->cqp, cqp_request);
   2677}
   2678
   2679void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
   2680{
   2681	struct irdma_qp_flush_info info = {};
   2682	struct irdma_pci_f *rf = iwqp->iwdev->rf;
   2683	u8 flush_code = iwqp->sc_qp.flush_code;
   2684
   2685	if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
   2686		return;
   2687
   2688	/* Set flush info fields*/
   2689	info.sq = flush_mask & IRDMA_FLUSH_SQ;
   2690	info.rq = flush_mask & IRDMA_FLUSH_RQ;
   2691
   2692	/* Generate userflush errors in CQE */
   2693	info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
   2694	info.sq_minor_code = FLUSH_GENERAL_ERR;
   2695	info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
   2696	info.rq_minor_code = FLUSH_GENERAL_ERR;
   2697	info.userflushcode = true;
   2698
   2699	if (flush_mask & IRDMA_REFLUSH) {
   2700		if (info.sq)
   2701			iwqp->sc_qp.flush_sq = false;
   2702		if (info.rq)
   2703			iwqp->sc_qp.flush_rq = false;
   2704	} else {
   2705		if (flush_code) {
   2706			if (info.sq && iwqp->sc_qp.sq_flush_code)
   2707				info.sq_minor_code = flush_code;
   2708			if (info.rq && iwqp->sc_qp.rq_flush_code)
   2709				info.rq_minor_code = flush_code;
   2710		}
   2711		if (!iwqp->user_mode)
   2712			queue_delayed_work(iwqp->iwdev->cleanup_wq,
   2713					   &iwqp->dwork_flush,
   2714					   msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
   2715	}
   2716
   2717	/* Issue flush */
   2718	(void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
   2719				  flush_mask & IRDMA_FLUSH_WAIT);
   2720	iwqp->flush_issued = true;
   2721}