cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ctrl.c (174878B)


      1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
      2/* Copyright (c) 2015 - 2021 Intel Corporation */
      3#include <linux/etherdevice.h>
      4
      5#include "osdep.h"
      6#include "hmc.h"
      7#include "defs.h"
      8#include "type.h"
      9#include "ws.h"
     10#include "protos.h"
     11
     12/**
     13 * irdma_get_qp_from_list - get next qp from a list
     14 * @head: Listhead of qp's
     15 * @qp: current qp
     16 */
     17struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
     18					   struct irdma_sc_qp *qp)
     19{
     20	struct list_head *lastentry;
     21	struct list_head *entry = NULL;
     22
     23	if (list_empty(head))
     24		return NULL;
     25
     26	if (!qp) {
     27		entry = head->next;
     28	} else {
     29		lastentry = &qp->list;
     30		entry = lastentry->next;
     31		if (entry == head)
     32			return NULL;
     33	}
     34
     35	return container_of(entry, struct irdma_sc_qp, list);
     36}
     37
     38/**
     39 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
     40 * @vsi: the VSI struct pointer
     41 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
     42 */
     43void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
     44{
     45	struct irdma_sc_qp *qp = NULL;
     46	u8 i;
     47
     48	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
     49		mutex_lock(&vsi->qos[i].qos_mutex);
     50		qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
     51		while (qp) {
     52			if (op == IRDMA_OP_RESUME) {
     53				if (!qp->dev->ws_add(vsi, i)) {
     54					qp->qs_handle =
     55						vsi->qos[qp->user_pri].qs_handle;
     56					irdma_cqp_qp_suspend_resume(qp, op);
     57				} else {
     58					irdma_cqp_qp_suspend_resume(qp, op);
     59					irdma_modify_qp_to_err(qp);
     60				}
     61			} else if (op == IRDMA_OP_SUSPEND) {
     62				/* issue cqp suspend command */
     63				if (!irdma_cqp_qp_suspend_resume(qp, op))
     64					atomic_inc(&vsi->qp_suspend_reqs);
     65			}
     66			qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
     67		}
     68		mutex_unlock(&vsi->qos[i].qos_mutex);
     69	}
     70}
     71
     72static void irdma_set_qos_info(struct irdma_sc_vsi  *vsi,
     73			       struct irdma_l2params *l2p)
     74{
     75	u8 i;
     76
     77	vsi->qos_rel_bw = l2p->vsi_rel_bw;
     78	vsi->qos_prio_type = l2p->vsi_prio_type;
     79	vsi->dscp_mode = l2p->dscp_mode;
     80	if (l2p->dscp_mode) {
     81		memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
     82		for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
     83			l2p->up2tc[i] = i;
     84	}
     85	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
     86		if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
     87			vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
     88		vsi->qos[i].traffic_class = l2p->up2tc[i];
     89		vsi->qos[i].rel_bw =
     90			l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
     91		vsi->qos[i].prio_type =
     92			l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
     93		vsi->qos[i].valid = false;
     94	}
     95}
     96
     97/**
     98 * irdma_change_l2params - given the new l2 parameters, change all qp
     99 * @vsi: RDMA VSI pointer
    100 * @l2params: New parameters from l2
    101 */
    102void irdma_change_l2params(struct irdma_sc_vsi *vsi,
    103			   struct irdma_l2params *l2params)
    104{
    105	if (l2params->mtu_changed) {
    106		vsi->mtu = l2params->mtu;
    107		if (vsi->ieq)
    108			irdma_reinitialize_ieq(vsi);
    109	}
    110
    111	if (!l2params->tc_changed)
    112		return;
    113
    114	vsi->tc_change_pending = false;
    115	irdma_set_qos_info(vsi, l2params);
    116	irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
    117}
    118
    119/**
    120 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
    121 * @qp: qp to be removed from qos
    122 */
    123void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
    124{
    125	struct irdma_sc_vsi *vsi = qp->vsi;
    126
    127	ibdev_dbg(to_ibdev(qp->dev),
    128		  "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
    129		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
    130		  qp->on_qoslist);
    131	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
    132	if (qp->on_qoslist) {
    133		qp->on_qoslist = false;
    134		list_del(&qp->list);
    135	}
    136	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
    137}
    138
    139/**
    140 * irdma_qp_add_qos - called during setctx for qp to be added to qos
    141 * @qp: qp to be added to qos
    142 */
    143void irdma_qp_add_qos(struct irdma_sc_qp *qp)
    144{
    145	struct irdma_sc_vsi *vsi = qp->vsi;
    146
    147	ibdev_dbg(to_ibdev(qp->dev),
    148		  "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
    149		  qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
    150		  qp->on_qoslist);
    151	mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
    152	if (!qp->on_qoslist) {
    153		list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
    154		qp->on_qoslist = true;
    155		qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
    156	}
    157	mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
    158}
    159
    160/**
    161 * irdma_sc_pd_init - initialize sc pd struct
    162 * @dev: sc device struct
    163 * @pd: sc pd ptr
    164 * @pd_id: pd_id for allocated pd
    165 * @abi_ver: User/Kernel ABI version
    166 */
    167void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
    168		      int abi_ver)
    169{
    170	pd->pd_id = pd_id;
    171	pd->abi_ver = abi_ver;
    172	pd->dev = dev;
    173}
    174
    175/**
    176 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
    177 * @cqp: struct for cqp hw
    178 * @info: arp entry information
    179 * @scratch: u64 saved to be used during cqp completion
    180 * @post_sq: flag for cqp db to ring
    181 */
    182static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
    183					struct irdma_add_arp_cache_entry_info *info,
    184					u64 scratch, bool post_sq)
    185{
    186	__le64 *wqe;
    187	u64 hdr;
    188
    189	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    190	if (!wqe)
    191		return -ENOMEM;
    192	set_64bit_val(wqe, 8, info->reach_max);
    193	set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
    194
    195	hdr = info->arp_index |
    196	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
    197	      FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
    198	      FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
    199	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    200	dma_wmb(); /* make sure WQE is written before valid bit is set */
    201
    202	set_64bit_val(wqe, 24, hdr);
    203
    204	print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
    205			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    206	if (post_sq)
    207		irdma_sc_cqp_post_sq(cqp);
    208
    209	return 0;
    210}
    211
    212/**
    213 * irdma_sc_del_arp_cache_entry - dele arp cache entry
    214 * @cqp: struct for cqp hw
    215 * @scratch: u64 saved to be used during cqp completion
    216 * @arp_index: arp index to delete arp entry
    217 * @post_sq: flag for cqp db to ring
    218 */
    219static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
    220					u16 arp_index, bool post_sq)
    221{
    222	__le64 *wqe;
    223	u64 hdr;
    224
    225	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    226	if (!wqe)
    227		return -ENOMEM;
    228
    229	hdr = arp_index |
    230	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
    231	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    232	dma_wmb(); /* make sure WQE is written before valid bit is set */
    233
    234	set_64bit_val(wqe, 24, hdr);
    235
    236	print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
    237			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
    238			     IRDMA_CQP_WQE_SIZE * 8, false);
    239	if (post_sq)
    240		irdma_sc_cqp_post_sq(cqp);
    241
    242	return 0;
    243}
    244
    245/**
    246 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
    247 * @cqp: struct for cqp hw
    248 * @info: info for apbvt entry to add or delete
    249 * @scratch: u64 saved to be used during cqp completion
    250 * @post_sq: flag for cqp db to ring
    251 */
    252static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
    253				       struct irdma_apbvt_info *info,
    254				       u64 scratch, bool post_sq)
    255{
    256	__le64 *wqe;
    257	u64 hdr;
    258
    259	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    260	if (!wqe)
    261		return -ENOMEM;
    262
    263	set_64bit_val(wqe, 16, info->port);
    264
    265	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
    266	      FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
    267	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    268	dma_wmb(); /* make sure WQE is written before valid bit is set */
    269
    270	set_64bit_val(wqe, 24, hdr);
    271
    272	print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
    273			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    274	if (post_sq)
    275		irdma_sc_cqp_post_sq(cqp);
    276
    277	return 0;
    278}
    279
    280/**
    281 * irdma_sc_manage_qhash_table_entry - manage quad hash entries
    282 * @cqp: struct for cqp hw
    283 * @info: info for quad hash to manage
    284 * @scratch: u64 saved to be used during cqp completion
    285 * @post_sq: flag for cqp db to ring
    286 *
    287 * This is called before connection establishment is started.
    288 * For passive connections, when listener is created, it will
    289 * call with entry type of  IRDMA_QHASH_TYPE_TCP_SYN with local
    290 * ip address and tcp port. When SYN is received (passive
    291 * connections) or sent (active connections), this routine is
    292 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
    293 * and quad is passed in info.
    294 *
    295 * When iwarp connection is done and its state moves to RTS, the
    296 * quad hash entry in the hardware will point to iwarp's qp
    297 * number and requires no calls from the driver.
    298 */
    299static int
    300irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
    301				  struct irdma_qhash_table_info *info,
    302				  u64 scratch, bool post_sq)
    303{
    304	__le64 *wqe;
    305	u64 qw1 = 0;
    306	u64 qw2 = 0;
    307	u64 temp;
    308	struct irdma_sc_vsi *vsi = info->vsi;
    309
    310	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    311	if (!wqe)
    312		return -ENOMEM;
    313
    314	set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
    315
    316	qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
    317	      FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
    318	if (info->ipv4_valid) {
    319		set_64bit_val(wqe, 48,
    320			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
    321	} else {
    322		set_64bit_val(wqe, 56,
    323			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
    324			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
    325
    326		set_64bit_val(wqe, 48,
    327			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
    328			      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
    329	}
    330	qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
    331			 vsi->qos[info->user_pri].qs_handle);
    332	if (info->vlan_valid)
    333		qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
    334	set_64bit_val(wqe, 16, qw2);
    335	if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
    336		qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
    337		if (!info->ipv4_valid) {
    338			set_64bit_val(wqe, 40,
    339				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
    340				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
    341			set_64bit_val(wqe, 32,
    342				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
    343				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
    344		} else {
    345			set_64bit_val(wqe, 32,
    346				      FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
    347		}
    348	}
    349
    350	set_64bit_val(wqe, 8, qw1);
    351	temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
    352	       FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
    353			  IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
    354	       FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
    355	       FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
    356	       FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
    357	       FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
    358	dma_wmb(); /* make sure WQE is written before valid bit is set */
    359
    360	set_64bit_val(wqe, 24, temp);
    361
    362	print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
    363			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    364	if (post_sq)
    365		irdma_sc_cqp_post_sq(cqp);
    366
    367	return 0;
    368}
    369
    370/**
    371 * irdma_sc_qp_init - initialize qp
    372 * @qp: sc qp
    373 * @info: initialization qp info
    374 */
    375int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
    376{
    377	int ret_code;
    378	u32 pble_obj_cnt;
    379	u16 wqe_size;
    380
    381	if (info->qp_uk_init_info.max_sq_frag_cnt >
    382	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
    383	    info->qp_uk_init_info.max_rq_frag_cnt >
    384	    info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
    385		return -EINVAL;
    386
    387	qp->dev = info->pd->dev;
    388	qp->vsi = info->vsi;
    389	qp->ieq_qp = info->vsi->exception_lan_q;
    390	qp->sq_pa = info->sq_pa;
    391	qp->rq_pa = info->rq_pa;
    392	qp->hw_host_ctx_pa = info->host_ctx_pa;
    393	qp->q2_pa = info->q2_pa;
    394	qp->shadow_area_pa = info->shadow_area_pa;
    395	qp->q2_buf = info->q2;
    396	qp->pd = info->pd;
    397	qp->hw_host_ctx = info->host_ctx;
    398	info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
    399	ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
    400	if (ret_code)
    401		return ret_code;
    402
    403	qp->virtual_map = info->virtual_map;
    404	pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
    405
    406	if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
    407	    (info->virtual_map && info->rq_pa >= pble_obj_cnt))
    408		return -EINVAL;
    409
    410	qp->llp_stream_handle = (void *)(-1);
    411	qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
    412						    IRDMA_QUEUE_TYPE_SQ_RQ);
    413	ibdev_dbg(to_ibdev(qp->dev),
    414		  "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
    415		  qp->hw_sq_size, qp->qp_uk.sq_ring.size);
    416	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
    417		wqe_size = IRDMA_WQE_SIZE_128;
    418	else
    419		ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
    420						       &wqe_size);
    421	if (ret_code)
    422		return ret_code;
    423
    424	qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
    425				(wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
    426	ibdev_dbg(to_ibdev(qp->dev),
    427		  "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
    428		  qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
    429	qp->sq_tph_val = info->sq_tph_val;
    430	qp->rq_tph_val = info->rq_tph_val;
    431	qp->sq_tph_en = info->sq_tph_en;
    432	qp->rq_tph_en = info->rq_tph_en;
    433	qp->rcv_tph_en = info->rcv_tph_en;
    434	qp->xmit_tph_en = info->xmit_tph_en;
    435	qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
    436	qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
    437
    438	return 0;
    439}
    440
    441/**
    442 * irdma_sc_qp_create - create qp
    443 * @qp: sc qp
    444 * @info: qp create info
    445 * @scratch: u64 saved to be used during cqp completion
    446 * @post_sq: flag for cqp db to ring
    447 */
    448int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
    449		       u64 scratch, bool post_sq)
    450{
    451	struct irdma_sc_cqp *cqp;
    452	__le64 *wqe;
    453	u64 hdr;
    454
    455	cqp = qp->dev->cqp;
    456	if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
    457	    qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
    458		return -EINVAL;
    459
    460	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    461	if (!wqe)
    462		return -ENOMEM;
    463
    464	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
    465	set_64bit_val(wqe, 40, qp->shadow_area_pa);
    466
    467	hdr = qp->qp_uk.qp_id |
    468	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
    469	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
    470	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
    471	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
    472	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
    473	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
    474	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
    475	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
    476	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
    477			 info->arp_cache_idx_valid) |
    478	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
    479	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    480	dma_wmb(); /* make sure WQE is written before valid bit is set */
    481
    482	set_64bit_val(wqe, 24, hdr);
    483
    484	print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
    485			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    486	if (post_sq)
    487		irdma_sc_cqp_post_sq(cqp);
    488
    489	return 0;
    490}
    491
    492/**
    493 * irdma_sc_qp_modify - modify qp cqp wqe
    494 * @qp: sc qp
    495 * @info: modify qp info
    496 * @scratch: u64 saved to be used during cqp completion
    497 * @post_sq: flag for cqp db to ring
    498 */
    499int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
    500		       u64 scratch, bool post_sq)
    501{
    502	__le64 *wqe;
    503	struct irdma_sc_cqp *cqp;
    504	u64 hdr;
    505	u8 term_actions = 0;
    506	u8 term_len = 0;
    507
    508	cqp = qp->dev->cqp;
    509	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    510	if (!wqe)
    511		return -ENOMEM;
    512
    513	if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
    514		if (info->dont_send_fin)
    515			term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
    516		if (info->dont_send_term)
    517			term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
    518		if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
    519		    term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
    520			term_len = info->termlen;
    521	}
    522
    523	set_64bit_val(wqe, 8,
    524		      FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
    525		      FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
    526	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
    527	set_64bit_val(wqe, 40, qp->shadow_area_pa);
    528
    529	hdr = qp->qp_uk.qp_id |
    530	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
    531	      FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
    532	      FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
    533	      FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
    534			 info->cached_var_valid) |
    535	      FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
    536	      FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
    537	      FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
    538	      FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
    539	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
    540	      FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
    541	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
    542			 info->remove_hash_idx) |
    543	      FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
    544	      FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
    545	      FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
    546			 info->arp_cache_idx_valid) |
    547	      FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
    548	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    549	dma_wmb(); /* make sure WQE is written before valid bit is set */
    550
    551	set_64bit_val(wqe, 24, hdr);
    552
    553	print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
    554			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    555	if (post_sq)
    556		irdma_sc_cqp_post_sq(cqp);
    557
    558	return 0;
    559}
    560
    561/**
    562 * irdma_sc_qp_destroy - cqp destroy qp
    563 * @qp: sc qp
    564 * @scratch: u64 saved to be used during cqp completion
    565 * @remove_hash_idx: flag if to remove hash idx
    566 * @ignore_mw_bnd: memory window bind flag
    567 * @post_sq: flag for cqp db to ring
    568 */
    569int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
    570			bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
    571{
    572	__le64 *wqe;
    573	struct irdma_sc_cqp *cqp;
    574	u64 hdr;
    575
    576	cqp = qp->dev->cqp;
    577	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    578	if (!wqe)
    579		return -ENOMEM;
    580
    581	set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
    582	set_64bit_val(wqe, 40, qp->shadow_area_pa);
    583
    584	hdr = qp->qp_uk.qp_id |
    585	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
    586	      FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
    587	      FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
    588	      FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
    589	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    590	dma_wmb(); /* make sure WQE is written before valid bit is set */
    591
    592	set_64bit_val(wqe, 24, hdr);
    593
    594	print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
    595			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    596	if (post_sq)
    597		irdma_sc_cqp_post_sq(cqp);
    598
    599	return 0;
    600}
    601
    602/**
    603 * irdma_sc_get_encoded_ird_size -
    604 * @ird_size: IRD size
    605 * The ird from the connection is rounded to a supported HW setting and then encoded
    606 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
    607 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
    608 */
    609static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
    610{
    611	switch (ird_size ?
    612		roundup_pow_of_two(2 * ird_size) : 4) {
    613	case 256:
    614		return IRDMA_IRD_HW_SIZE_256;
    615	case 128:
    616		return IRDMA_IRD_HW_SIZE_128;
    617	case 64:
    618	case 32:
    619		return IRDMA_IRD_HW_SIZE_64;
    620	case 16:
    621	case 8:
    622		return IRDMA_IRD_HW_SIZE_16;
    623	case 4:
    624	default:
    625		break;
    626	}
    627
    628	return IRDMA_IRD_HW_SIZE_4;
    629}
    630
    631/**
    632 * irdma_sc_qp_setctx_roce - set qp's context
    633 * @qp: sc qp
    634 * @qp_ctx: context ptr
    635 * @info: ctx info
    636 */
    637void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
    638			     struct irdma_qp_host_ctx_info *info)
    639{
    640	struct irdma_roce_offload_info *roce_info;
    641	struct irdma_udp_offload_info *udp;
    642	u8 push_mode_en;
    643	u32 push_idx;
    644
    645	roce_info = info->roce_info;
    646	udp = info->udp_info;
    647	qp->user_pri = info->user_pri;
    648	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
    649		push_mode_en = 0;
    650		push_idx = 0;
    651	} else {
    652		push_mode_en = 1;
    653		push_idx = qp->push_idx;
    654	}
    655	set_64bit_val(qp_ctx, 0,
    656		      FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
    657		      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
    658		      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
    659		      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
    660		      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
    661		      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
    662		      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
    663		      FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
    664		      FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
    665		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
    666		      FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
    667		      FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
    668		      FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
    669		      FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
    670	set_64bit_val(qp_ctx, 8, qp->sq_pa);
    671	set_64bit_val(qp_ctx, 16, qp->rq_pa);
    672	if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
    673	    !(udp->tos & 0x03))
    674		udp->tos |= ECN_CODE_PT_VAL;
    675	set_64bit_val(qp_ctx, 24,
    676		      FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
    677		      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
    678		      FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
    679		      FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
    680		      FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
    681	set_64bit_val(qp_ctx, 32,
    682		      FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
    683		      FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
    684	set_64bit_val(qp_ctx, 40,
    685		      FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
    686		      FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
    687	set_64bit_val(qp_ctx, 48,
    688		      FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
    689		      FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
    690		      FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
    691	set_64bit_val(qp_ctx, 56,
    692		      FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
    693		      FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
    694		      FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
    695		      FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
    696	set_64bit_val(qp_ctx, 64,
    697		      FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
    698		      FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
    699	set_64bit_val(qp_ctx, 80,
    700		      FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
    701		      FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
    702	set_64bit_val(qp_ctx, 88,
    703		      FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
    704	set_64bit_val(qp_ctx, 96,
    705		      FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
    706		      FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
    707	set_64bit_val(qp_ctx, 112,
    708		      FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
    709	set_64bit_val(qp_ctx, 128,
    710		      FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
    711		      FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
    712		      FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
    713		      FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
    714	set_64bit_val(qp_ctx, 136,
    715		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
    716		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
    717	set_64bit_val(qp_ctx, 144,
    718		      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
    719	set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
    720	set_64bit_val(qp_ctx, 160,
    721		      FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
    722		      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
    723		      FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
    724		      FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
    725		      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
    726		      FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
    727		      FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
    728		      FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
    729		      FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
    730		      FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
    731		      FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
    732		      FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
    733		      FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
    734	set_64bit_val(qp_ctx, 168,
    735		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
    736	set_64bit_val(qp_ctx, 176,
    737		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
    738		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
    739		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
    740	set_64bit_val(qp_ctx, 184,
    741		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
    742		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
    743	set_64bit_val(qp_ctx, 192,
    744		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
    745		      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
    746	set_64bit_val(qp_ctx, 200,
    747		      FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
    748		      FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
    749	set_64bit_val(qp_ctx, 208,
    750		      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
    751
    752	print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
    753			     8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
    754}
    755
    756/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
    757 * @cqp: struct for cqp hw
    758 * @scratch: u64 saved to be used during cqp completion
    759 * @post_sq: flag for cqp db to ring
    760 */
    761static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
    762					  bool post_sq)
    763{
    764	__le64 *wqe;
    765	u64 hdr;
    766
    767	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    768	if (!wqe)
    769		return -ENOMEM;
    770
    771	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
    772			 IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
    773	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    774
    775	dma_wmb(); /* make sure WQE is written before valid bit is set */
    776
    777	set_64bit_val(wqe, 24, hdr);
    778
    779	print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
    780			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
    781			     IRDMA_CQP_WQE_SIZE * 8, false);
    782
    783	if (post_sq)
    784		irdma_sc_cqp_post_sq(cqp);
    785	return 0;
    786}
    787
    788/**
    789 * irdma_sc_add_local_mac_entry - add mac enry
    790 * @cqp: struct for cqp hw
    791 * @info:mac addr info
    792 * @scratch: u64 saved to be used during cqp completion
    793 * @post_sq: flag for cqp db to ring
    794 */
    795static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
    796					struct irdma_local_mac_entry_info *info,
    797					u64 scratch, bool post_sq)
    798{
    799	__le64 *wqe;
    800	u64 header;
    801
    802	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    803	if (!wqe)
    804		return -ENOMEM;
    805
    806	set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
    807
    808	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
    809		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
    810			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
    811		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
    812
    813	dma_wmb(); /* make sure WQE is written before valid bit is set */
    814
    815	set_64bit_val(wqe, 24, header);
    816
    817	print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
    818			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
    819
    820	if (post_sq)
    821		irdma_sc_cqp_post_sq(cqp);
    822	return 0;
    823}
    824
    825/**
    826 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
    827 * @cqp: struct for cqp hw
    828 * @scratch: u64 saved to be used during cqp completion
    829 * @entry_idx: index of mac entry
    830 * @ignore_ref_count: to force mac adde delete
    831 * @post_sq: flag for cqp db to ring
    832 */
    833static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
    834					u16 entry_idx, u8 ignore_ref_count,
    835					bool post_sq)
    836{
    837	__le64 *wqe;
    838	u64 header;
    839
    840	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
    841	if (!wqe)
    842		return -ENOMEM;
    843	header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
    844		 FIELD_PREP(IRDMA_CQPSQ_OPCODE,
    845			    IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
    846		 FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
    847		 FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
    848		 FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
    849
    850	dma_wmb(); /* make sure WQE is written before valid bit is set */
    851
    852	set_64bit_val(wqe, 24, header);
    853
    854	print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
    855			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
    856			     IRDMA_CQP_WQE_SIZE * 8, false);
    857
    858	if (post_sq)
    859		irdma_sc_cqp_post_sq(cqp);
    860	return 0;
    861}
    862
    863/**
    864 * irdma_sc_qp_setctx - set qp's context
    865 * @qp: sc qp
    866 * @qp_ctx: context ptr
    867 * @info: ctx info
    868 */
    869void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
    870			struct irdma_qp_host_ctx_info *info)
    871{
    872	struct irdma_iwarp_offload_info *iw;
    873	struct irdma_tcp_offload_info *tcp;
    874	struct irdma_sc_dev *dev;
    875	u8 push_mode_en;
    876	u32 push_idx;
    877	u64 qw0, qw3, qw7 = 0, qw16 = 0;
    878	u64 mac = 0;
    879
    880	iw = info->iwarp_info;
    881	tcp = info->tcp_info;
    882	dev = qp->dev;
    883	if (iw->rcv_mark_en) {
    884		qp->pfpdu.marker_len = 4;
    885		qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
    886	}
    887	qp->user_pri = info->user_pri;
    888	if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
    889		push_mode_en = 0;
    890		push_idx = 0;
    891	} else {
    892		push_mode_en = 1;
    893		push_idx = qp->push_idx;
    894	}
    895	qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
    896	      FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
    897	      FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
    898	      FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
    899	      FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
    900	      FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
    901	      FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
    902
    903	set_64bit_val(qp_ctx, 8, qp->sq_pa);
    904	set_64bit_val(qp_ctx, 16, qp->rq_pa);
    905
    906	qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
    907	      FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
    908	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
    909		qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
    910				  qp->src_mac_addr_idx);
    911	set_64bit_val(qp_ctx, 136,
    912		      FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
    913		      FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
    914	set_64bit_val(qp_ctx, 168,
    915		      FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
    916	set_64bit_val(qp_ctx, 176,
    917		      FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
    918		      FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
    919		      FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
    920		      FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
    921	if (info->iwarp_info_valid) {
    922		qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
    923		       FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
    924		       FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
    925		       FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
    926		       FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
    927		       FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
    928		       FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
    929				  iw->err_rq_idx_valid);
    930		qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
    931		qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
    932			FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
    933		set_64bit_val(qp_ctx, 144,
    934			      FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
    935			      FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
    936
    937		if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
    938			mac = ether_addr_to_u64(iw->mac_addr);
    939
    940		set_64bit_val(qp_ctx, 152,
    941			      mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
    942		set_64bit_val(qp_ctx, 160,
    943			      FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
    944			      FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
    945			      FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
    946			      FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
    947			      FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
    948			      FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
    949			      FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
    950			      FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
    951			      FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
    952			      FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
    953			      FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
    954			      FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
    955			      FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
    956			      FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
    957			      FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
    958			      FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
    959	}
    960	if (info->tcp_info_valid) {
    961		qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
    962		       FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
    963		       FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
    964				  tcp->insert_vlan_tag) |
    965		       FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
    966		       FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
    967		       FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
    968		       FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
    969
    970		if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
    971			tcp->tos |= ECN_CODE_PT_VAL;
    972
    973		qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
    974		       FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
    975		       FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
    976		       FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
    977		       FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
    978		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
    979			qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
    980
    981			qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
    982		}
    983		set_64bit_val(qp_ctx, 32,
    984			      FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
    985			      FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
    986		set_64bit_val(qp_ctx, 40,
    987			      FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
    988			      FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
    989		set_64bit_val(qp_ctx, 48,
    990			      FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
    991			      FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
    992			      FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
    993			      FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
    994		qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
    995		       FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
    996		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
    997				  tcp->ignore_tcp_opt) |
    998		       FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
    999				  tcp->ignore_tcp_uns_opt) |
   1000		       FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
   1001		       FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
   1002		       FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
   1003		set_64bit_val(qp_ctx, 72,
   1004			      FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
   1005			      FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
   1006		set_64bit_val(qp_ctx, 80,
   1007			      FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
   1008			      FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
   1009		set_64bit_val(qp_ctx, 88,
   1010			      FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
   1011			      FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
   1012		set_64bit_val(qp_ctx, 96,
   1013			      FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
   1014			      FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
   1015		set_64bit_val(qp_ctx, 104,
   1016			      FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
   1017			      FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
   1018		set_64bit_val(qp_ctx, 112,
   1019			      FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
   1020			      FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
   1021		set_64bit_val(qp_ctx, 120,
   1022			      FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
   1023			      FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
   1024		qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
   1025			FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
   1026		set_64bit_val(qp_ctx, 184,
   1027			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
   1028			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
   1029		set_64bit_val(qp_ctx, 192,
   1030			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
   1031			      FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
   1032		set_64bit_val(qp_ctx, 200,
   1033			      FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
   1034			      FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
   1035		set_64bit_val(qp_ctx, 208,
   1036			      FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
   1037	}
   1038
   1039	set_64bit_val(qp_ctx, 0, qw0);
   1040	set_64bit_val(qp_ctx, 24, qw3);
   1041	set_64bit_val(qp_ctx, 56, qw7);
   1042	set_64bit_val(qp_ctx, 128, qw16);
   1043
   1044	print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
   1045			     qp_ctx, IRDMA_QP_CTX_SIZE, false);
   1046}
   1047
   1048/**
   1049 * irdma_sc_alloc_stag - mr stag alloc
   1050 * @dev: sc device struct
   1051 * @info: stag info
   1052 * @scratch: u64 saved to be used during cqp completion
   1053 * @post_sq: flag for cqp db to ring
   1054 */
   1055static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
   1056			       struct irdma_allocate_stag_info *info,
   1057			       u64 scratch, bool post_sq)
   1058{
   1059	__le64 *wqe;
   1060	struct irdma_sc_cqp *cqp;
   1061	u64 hdr;
   1062	enum irdma_page_size page_size;
   1063
   1064	if (info->page_size == 0x40000000)
   1065		page_size = IRDMA_PAGE_SIZE_1G;
   1066	else if (info->page_size == 0x200000)
   1067		page_size = IRDMA_PAGE_SIZE_2M;
   1068	else
   1069		page_size = IRDMA_PAGE_SIZE_4K;
   1070
   1071	cqp = dev->cqp;
   1072	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   1073	if (!wqe)
   1074		return -ENOMEM;
   1075
   1076	set_64bit_val(wqe, 8,
   1077		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
   1078		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
   1079	set_64bit_val(wqe, 16,
   1080		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
   1081	set_64bit_val(wqe, 40,
   1082		      FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
   1083
   1084	if (info->chunk_size)
   1085		set_64bit_val(wqe, 48,
   1086			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
   1087
   1088	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
   1089	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
   1090	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
   1091	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
   1092	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
   1093	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
   1094	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
   1095	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
   1096	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   1097	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1098
   1099	set_64bit_val(wqe, 24, hdr);
   1100
   1101	print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1102			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   1103	if (post_sq)
   1104		irdma_sc_cqp_post_sq(cqp);
   1105
   1106	return 0;
   1107}
   1108
   1109/**
   1110 * irdma_sc_mr_reg_non_shared - non-shared mr registration
   1111 * @dev: sc device struct
   1112 * @info: mr info
   1113 * @scratch: u64 saved to be used during cqp completion
   1114 * @post_sq: flag for cqp db to ring
   1115 */
   1116static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
   1117				      struct irdma_reg_ns_stag_info *info,
   1118				      u64 scratch, bool post_sq)
   1119{
   1120	__le64 *wqe;
   1121	u64 fbo;
   1122	struct irdma_sc_cqp *cqp;
   1123	u64 hdr;
   1124	u32 pble_obj_cnt;
   1125	bool remote_access;
   1126	u8 addr_type;
   1127	enum irdma_page_size page_size;
   1128
   1129	if (info->page_size == 0x40000000)
   1130		page_size = IRDMA_PAGE_SIZE_1G;
   1131	else if (info->page_size == 0x200000)
   1132		page_size = IRDMA_PAGE_SIZE_2M;
   1133	else if (info->page_size == 0x1000)
   1134		page_size = IRDMA_PAGE_SIZE_4K;
   1135	else
   1136		return -EINVAL;
   1137
   1138	if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
   1139				   IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
   1140		remote_access = true;
   1141	else
   1142		remote_access = false;
   1143
   1144	pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   1145	if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
   1146		return -EINVAL;
   1147
   1148	cqp = dev->cqp;
   1149	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   1150	if (!wqe)
   1151		return -ENOMEM;
   1152	fbo = info->va & (info->page_size - 1);
   1153
   1154	set_64bit_val(wqe, 0,
   1155		      (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
   1156		      info->va : fbo));
   1157	set_64bit_val(wqe, 8,
   1158		      FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
   1159		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
   1160	set_64bit_val(wqe, 16,
   1161		      FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
   1162		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
   1163	if (!info->chunk_size) {
   1164		set_64bit_val(wqe, 32, info->reg_addr_pa);
   1165		set_64bit_val(wqe, 48, 0);
   1166	} else {
   1167		set_64bit_val(wqe, 32, 0);
   1168		set_64bit_val(wqe, 48,
   1169			      FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
   1170	}
   1171	set_64bit_val(wqe, 40, info->hmc_fcn_index);
   1172	set_64bit_val(wqe, 56, 0);
   1173
   1174	addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
   1175	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
   1176	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
   1177	      FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
   1178	      FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
   1179	      FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
   1180	      FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
   1181	      FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
   1182	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
   1183	      FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
   1184	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   1185	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1186
   1187	set_64bit_val(wqe, 24, hdr);
   1188
   1189	print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1190			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   1191	if (post_sq)
   1192		irdma_sc_cqp_post_sq(cqp);
   1193
   1194	return 0;
   1195}
   1196
   1197/**
   1198 * irdma_sc_dealloc_stag - deallocate stag
   1199 * @dev: sc device struct
   1200 * @info: dealloc stag info
   1201 * @scratch: u64 saved to be used during cqp completion
   1202 * @post_sq: flag for cqp db to ring
   1203 */
   1204static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
   1205				 struct irdma_dealloc_stag_info *info,
   1206				 u64 scratch, bool post_sq)
   1207{
   1208	u64 hdr;
   1209	__le64 *wqe;
   1210	struct irdma_sc_cqp *cqp;
   1211
   1212	cqp = dev->cqp;
   1213	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   1214	if (!wqe)
   1215		return -ENOMEM;
   1216
   1217	set_64bit_val(wqe, 8,
   1218		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
   1219	set_64bit_val(wqe, 16,
   1220		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
   1221
   1222	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
   1223	      FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
   1224	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   1225	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1226
   1227	set_64bit_val(wqe, 24, hdr);
   1228
   1229	print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
   1230			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   1231	if (post_sq)
   1232		irdma_sc_cqp_post_sq(cqp);
   1233
   1234	return 0;
   1235}
   1236
   1237/**
   1238 * irdma_sc_mw_alloc - mw allocate
   1239 * @dev: sc device struct
   1240 * @info: memory window allocation information
   1241 * @scratch: u64 saved to be used during cqp completion
   1242 * @post_sq: flag for cqp db to ring
   1243 */
   1244static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
   1245			     struct irdma_mw_alloc_info *info, u64 scratch,
   1246			     bool post_sq)
   1247{
   1248	u64 hdr;
   1249	struct irdma_sc_cqp *cqp;
   1250	__le64 *wqe;
   1251
   1252	cqp = dev->cqp;
   1253	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   1254	if (!wqe)
   1255		return -ENOMEM;
   1256
   1257	set_64bit_val(wqe, 8,
   1258		      FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
   1259	set_64bit_val(wqe, 16,
   1260		      FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
   1261
   1262	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
   1263	      FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
   1264	      FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
   1265			 info->mw1_bind_dont_vldt_key) |
   1266	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   1267	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1268
   1269	set_64bit_val(wqe, 24, hdr);
   1270
   1271	print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1272			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   1273	if (post_sq)
   1274		irdma_sc_cqp_post_sq(cqp);
   1275
   1276	return 0;
   1277}
   1278
   1279/**
   1280 * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
   1281 * @qp: sc qp struct
   1282 * @info: fast mr info
   1283 * @post_sq: flag for cqp db to ring
   1284 */
   1285int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
   1286			      struct irdma_fast_reg_stag_info *info,
   1287			      bool post_sq)
   1288{
   1289	u64 temp, hdr;
   1290	__le64 *wqe;
   1291	u32 wqe_idx;
   1292	enum irdma_page_size page_size;
   1293	struct irdma_post_sq_info sq_info = {};
   1294
   1295	if (info->page_size == 0x40000000)
   1296		page_size = IRDMA_PAGE_SIZE_1G;
   1297	else if (info->page_size == 0x200000)
   1298		page_size = IRDMA_PAGE_SIZE_2M;
   1299	else
   1300		page_size = IRDMA_PAGE_SIZE_4K;
   1301
   1302	sq_info.wr_id = info->wr_id;
   1303	sq_info.signaled = info->signaled;
   1304	sq_info.push_wqe = info->push_wqe;
   1305
   1306	wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
   1307					 IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
   1308	if (!wqe)
   1309		return -ENOMEM;
   1310
   1311	irdma_clr_wqes(&qp->qp_uk, wqe_idx);
   1312
   1313	ibdev_dbg(to_ibdev(qp->dev),
   1314		  "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
   1315		  info->wr_id, wqe_idx,
   1316		  &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
   1317
   1318	temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
   1319		(uintptr_t)info->va : info->fbo;
   1320	set_64bit_val(wqe, 0, temp);
   1321
   1322	temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
   1323			 info->first_pm_pbl_index >> 16);
   1324	set_64bit_val(wqe, 8,
   1325		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
   1326		      FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
   1327	set_64bit_val(wqe, 16,
   1328		      info->total_len |
   1329		      FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
   1330
   1331	hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
   1332	      FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
   1333	      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
   1334	      FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
   1335	      FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
   1336	      FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
   1337	      FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
   1338	      FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
   1339	      FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
   1340	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
   1341	      FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
   1342	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1343	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1344
   1345	set_64bit_val(wqe, 24, hdr);
   1346
   1347	print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1348			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
   1349	if (sq_info.push_wqe) {
   1350		irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
   1351				  wqe_idx, post_sq);
   1352	} else {
   1353		if (post_sq)
   1354			irdma_uk_qp_post_wr(&qp->qp_uk);
   1355	}
   1356
   1357	return 0;
   1358}
   1359
   1360/**
   1361 * irdma_sc_gen_rts_ae - request AE generated after RTS
   1362 * @qp: sc qp struct
   1363 */
   1364static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
   1365{
   1366	__le64 *wqe;
   1367	u64 hdr;
   1368	struct irdma_qp_uk *qp_uk;
   1369
   1370	qp_uk = &qp->qp_uk;
   1371
   1372	wqe = qp_uk->sq_base[1].elem;
   1373
   1374	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
   1375	      FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
   1376	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1377	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1378
   1379	set_64bit_val(wqe, 24, hdr);
   1380	print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
   1381			     16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
   1382
   1383	wqe = qp_uk->sq_base[2].elem;
   1384	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
   1385	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1386	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1387
   1388	set_64bit_val(wqe, 24, hdr);
   1389	print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1390			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
   1391}
   1392
   1393/**
   1394 * irdma_sc_send_lsmm - send last streaming mode message
   1395 * @qp: sc qp struct
   1396 * @lsmm_buf: buffer with lsmm message
   1397 * @size: size of lsmm buffer
   1398 * @stag: stag of lsmm buffer
   1399 */
   1400void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
   1401			irdma_stag stag)
   1402{
   1403	__le64 *wqe;
   1404	u64 hdr;
   1405	struct irdma_qp_uk *qp_uk;
   1406
   1407	qp_uk = &qp->qp_uk;
   1408	wqe = qp_uk->sq_base->elem;
   1409
   1410	set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
   1411	if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
   1412		set_64bit_val(wqe, 8,
   1413			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
   1414			      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
   1415	} else {
   1416		set_64bit_val(wqe, 8,
   1417			      FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
   1418			      FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
   1419			      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
   1420	}
   1421	set_64bit_val(wqe, 16, 0);
   1422
   1423	hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
   1424	      FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
   1425	      FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
   1426	      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1427	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1428
   1429	set_64bit_val(wqe, 24, hdr);
   1430
   1431	print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
   1432			     wqe, IRDMA_QP_WQE_MIN_SIZE, false);
   1433
   1434	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
   1435		irdma_sc_gen_rts_ae(qp);
   1436}
   1437
   1438/**
   1439 * irdma_sc_send_rtt - send last read0 or write0
   1440 * @qp: sc qp struct
   1441 * @read: Do read0 or write0
   1442 */
   1443void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
   1444{
   1445	__le64 *wqe;
   1446	u64 hdr;
   1447	struct irdma_qp_uk *qp_uk;
   1448
   1449	qp_uk = &qp->qp_uk;
   1450	wqe = qp_uk->sq_base->elem;
   1451
   1452	set_64bit_val(wqe, 0, 0);
   1453	set_64bit_val(wqe, 16, 0);
   1454	if (read) {
   1455		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
   1456			set_64bit_val(wqe, 8,
   1457				      FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
   1458		} else {
   1459			set_64bit_val(wqe, 8,
   1460				      (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
   1461		}
   1462		hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
   1463		      FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
   1464		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1465
   1466	} else {
   1467		if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
   1468			set_64bit_val(wqe, 8, 0);
   1469		} else {
   1470			set_64bit_val(wqe, 8,
   1471				      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
   1472		}
   1473		hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
   1474		      FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
   1475	}
   1476
   1477	dma_wmb(); /* make sure WQE is written before valid bit is set */
   1478
   1479	set_64bit_val(wqe, 24, hdr);
   1480
   1481	print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
   1482			     IRDMA_QP_WQE_MIN_SIZE, false);
   1483
   1484	if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
   1485		irdma_sc_gen_rts_ae(qp);
   1486}
   1487
   1488/**
   1489 * irdma_iwarp_opcode - determine if incoming is rdma layer
   1490 * @info: aeq info for the packet
   1491 * @pkt: packet for error
   1492 */
   1493static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
   1494{
   1495	__be16 *mpa;
   1496	u32 opcode = 0xffffffff;
   1497
   1498	if (info->q2_data_written) {
   1499		mpa = (__be16 *)pkt;
   1500		opcode = ntohs(mpa[1]) & 0xf;
   1501	}
   1502
   1503	return opcode;
   1504}
   1505
   1506/**
   1507 * irdma_locate_mpa - return pointer to mpa in the pkt
   1508 * @pkt: packet with data
   1509 */
   1510static u8 *irdma_locate_mpa(u8 *pkt)
   1511{
   1512	/* skip over ethernet header */
   1513	pkt += IRDMA_MAC_HLEN;
   1514
   1515	/* Skip over IP and TCP headers */
   1516	pkt += 4 * (pkt[0] & 0x0f);
   1517	pkt += 4 * ((pkt[12] >> 4) & 0x0f);
   1518
   1519	return pkt;
   1520}
   1521
   1522/**
   1523 * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
   1524 * @qp: sc qp ptr for pkt
   1525 * @hdr: term hdr
   1526 * @opcode: flush opcode for termhdr
   1527 * @layer_etype: error layer + error type
   1528 * @err: error cod ein the header
   1529 */
   1530static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
   1531				   struct irdma_terminate_hdr *hdr,
   1532				   enum irdma_flush_opcode opcode,
   1533				   u8 layer_etype, u8 err)
   1534{
   1535	qp->flush_code = opcode;
   1536	hdr->layer_etype = layer_etype;
   1537	hdr->error_code = err;
   1538}
   1539
   1540/**
   1541 * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
   1542 * @pkt: ptr to mpa in offending pkt
   1543 * @hdr: term hdr
   1544 * @copy_len: offending pkt length to be copied to term hdr
   1545 * @is_tagged: DDP tagged or untagged
   1546 */
   1547static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
   1548				       int *copy_len, u8 *is_tagged)
   1549{
   1550	u16 ddp_seg_len;
   1551
   1552	ddp_seg_len = ntohs(*(__be16 *)pkt);
   1553	if (ddp_seg_len) {
   1554		*copy_len = 2;
   1555		hdr->hdrct = DDP_LEN_FLAG;
   1556		if (pkt[2] & 0x80) {
   1557			*is_tagged = 1;
   1558			if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
   1559				*copy_len += TERM_DDP_LEN_TAGGED;
   1560				hdr->hdrct |= DDP_HDR_FLAG;
   1561			}
   1562		} else {
   1563			if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
   1564				*copy_len += TERM_DDP_LEN_UNTAGGED;
   1565				hdr->hdrct |= DDP_HDR_FLAG;
   1566			}
   1567			if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
   1568			    ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
   1569				*copy_len += TERM_RDMA_LEN;
   1570				hdr->hdrct |= RDMA_HDR_FLAG;
   1571			}
   1572		}
   1573	}
   1574}
   1575
   1576/**
   1577 * irdma_bld_terminate_hdr - build terminate message header
   1578 * @qp: qp associated with received terminate AE
   1579 * @info: the struct contiaing AE information
   1580 */
   1581static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
   1582				   struct irdma_aeqe_info *info)
   1583{
   1584	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
   1585	int copy_len = 0;
   1586	u8 is_tagged = 0;
   1587	u32 opcode;
   1588	struct irdma_terminate_hdr *termhdr;
   1589
   1590	termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
   1591	memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
   1592
   1593	if (info->q2_data_written) {
   1594		pkt = irdma_locate_mpa(pkt);
   1595		irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
   1596	}
   1597
   1598	opcode = irdma_iwarp_opcode(info, pkt);
   1599	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
   1600	qp->sq_flush_code = info->sq;
   1601	qp->rq_flush_code = info->rq;
   1602
   1603	switch (info->ae_id) {
   1604	case IRDMA_AE_AMP_UNALLOCATED_STAG:
   1605		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1606		if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
   1607			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
   1608					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
   1609					       DDP_TAGGED_INV_STAG);
   1610		else
   1611			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1612					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1613					       RDMAP_INV_STAG);
   1614		break;
   1615	case IRDMA_AE_AMP_BOUNDS_VIOLATION:
   1616		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1617		if (info->q2_data_written)
   1618			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
   1619					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
   1620					       DDP_TAGGED_BOUNDS);
   1621		else
   1622			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1623					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1624					       RDMAP_INV_BOUNDS);
   1625		break;
   1626	case IRDMA_AE_AMP_BAD_PD:
   1627		switch (opcode) {
   1628		case IRDMA_OP_TYPE_RDMA_WRITE:
   1629			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
   1630					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
   1631					       DDP_TAGGED_UNASSOC_STAG);
   1632			break;
   1633		case IRDMA_OP_TYPE_SEND_INV:
   1634		case IRDMA_OP_TYPE_SEND_SOL_INV:
   1635			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1636					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1637					       RDMAP_CANT_INV_STAG);
   1638			break;
   1639		default:
   1640			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1641					       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1642					       RDMAP_UNASSOC_STAG);
   1643		}
   1644		break;
   1645	case IRDMA_AE_AMP_INVALID_STAG:
   1646		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1647		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1648				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1649				       RDMAP_INV_STAG);
   1650		break;
   1651	case IRDMA_AE_AMP_BAD_QP:
   1652		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
   1653				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1654				       DDP_UNTAGGED_INV_QN);
   1655		break;
   1656	case IRDMA_AE_AMP_BAD_STAG_KEY:
   1657	case IRDMA_AE_AMP_BAD_STAG_INDEX:
   1658		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1659		switch (opcode) {
   1660		case IRDMA_OP_TYPE_SEND_INV:
   1661		case IRDMA_OP_TYPE_SEND_SOL_INV:
   1662			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
   1663					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
   1664					       RDMAP_CANT_INV_STAG);
   1665			break;
   1666		default:
   1667			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1668					       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
   1669					       RDMAP_INV_STAG);
   1670		}
   1671		break;
   1672	case IRDMA_AE_AMP_RIGHTS_VIOLATION:
   1673	case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
   1674	case IRDMA_AE_PRIV_OPERATION_DENIED:
   1675		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1676		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1677				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1678				       RDMAP_ACCESS);
   1679		break;
   1680	case IRDMA_AE_AMP_TO_WRAP:
   1681		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1682		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
   1683				       (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
   1684				       RDMAP_TO_WRAP);
   1685		break;
   1686	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
   1687		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1688				       (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
   1689		break;
   1690	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
   1691		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
   1692				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
   1693				       DDP_CATASTROPHIC_LOCAL);
   1694		break;
   1695	case IRDMA_AE_LCE_QP_CATASTROPHIC:
   1696	case IRDMA_AE_DDP_NO_L_BIT:
   1697		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
   1698				       (LAYER_DDP << 4) | DDP_CATASTROPHIC,
   1699				       DDP_CATASTROPHIC_LOCAL);
   1700		break;
   1701	case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
   1702		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1703				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1704				       DDP_UNTAGGED_INV_MSN_RANGE);
   1705		break;
   1706	case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
   1707		qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
   1708		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
   1709				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1710				       DDP_UNTAGGED_INV_TOO_LONG);
   1711		break;
   1712	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
   1713		if (is_tagged)
   1714			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1715					       (LAYER_DDP << 4) | DDP_TAGGED_BUF,
   1716					       DDP_TAGGED_INV_DDP_VER);
   1717		else
   1718			irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1719					       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1720					       DDP_UNTAGGED_INV_DDP_VER);
   1721		break;
   1722	case IRDMA_AE_DDP_UBE_INVALID_MO:
   1723		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1724				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1725				       DDP_UNTAGGED_INV_MO);
   1726		break;
   1727	case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
   1728		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
   1729				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1730				       DDP_UNTAGGED_INV_MSN_NO_BUF);
   1731		break;
   1732	case IRDMA_AE_DDP_UBE_INVALID_QN:
   1733		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1734				       (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
   1735				       DDP_UNTAGGED_INV_QN);
   1736		break;
   1737	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
   1738		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
   1739				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
   1740				       RDMAP_INV_RDMAP_VER);
   1741		break;
   1742	default:
   1743		irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
   1744				       (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
   1745				       RDMAP_UNSPECIFIED);
   1746		break;
   1747	}
   1748
   1749	if (copy_len)
   1750		memcpy(termhdr + 1, pkt, copy_len);
   1751
   1752	return sizeof(struct irdma_terminate_hdr) + copy_len;
   1753}
   1754
   1755/**
   1756 * irdma_terminate_send_fin() - Send fin for terminate message
   1757 * @qp: qp associated with received terminate AE
   1758 */
   1759void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
   1760{
   1761	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
   1762			     IRDMAQP_TERM_SEND_FIN_ONLY, 0);
   1763}
   1764
   1765/**
   1766 * irdma_terminate_connection() - Bad AE and send terminate to remote QP
   1767 * @qp: qp associated with received terminate AE
   1768 * @info: the struct contiaing AE information
   1769 */
   1770void irdma_terminate_connection(struct irdma_sc_qp *qp,
   1771				struct irdma_aeqe_info *info)
   1772{
   1773	u8 termlen = 0;
   1774
   1775	if (qp->term_flags & IRDMA_TERM_SENT)
   1776		return;
   1777
   1778	termlen = irdma_bld_terminate_hdr(qp, info);
   1779	irdma_terminate_start_timer(qp);
   1780	qp->term_flags |= IRDMA_TERM_SENT;
   1781	irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
   1782			     IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
   1783}
   1784
   1785/**
   1786 * irdma_terminate_received - handle terminate received AE
   1787 * @qp: qp associated with received terminate AE
   1788 * @info: the struct contiaing AE information
   1789 */
   1790void irdma_terminate_received(struct irdma_sc_qp *qp,
   1791			      struct irdma_aeqe_info *info)
   1792{
   1793	u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
   1794	__be32 *mpa;
   1795	u8 ddp_ctl;
   1796	u8 rdma_ctl;
   1797	u16 aeq_id = 0;
   1798	struct irdma_terminate_hdr *termhdr;
   1799
   1800	mpa = (__be32 *)irdma_locate_mpa(pkt);
   1801	if (info->q2_data_written) {
   1802		/* did not validate the frame - do it now */
   1803		ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
   1804		rdma_ctl = ntohl(mpa[0]) & 0xff;
   1805		if ((ddp_ctl & 0xc0) != 0x40)
   1806			aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
   1807		else if ((ddp_ctl & 0x03) != 1)
   1808			aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
   1809		else if (ntohl(mpa[2]) != 2)
   1810			aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
   1811		else if (ntohl(mpa[3]) != 1)
   1812			aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
   1813		else if (ntohl(mpa[4]) != 0)
   1814			aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
   1815		else if ((rdma_ctl & 0xc0) != 0x40)
   1816			aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
   1817
   1818		info->ae_id = aeq_id;
   1819		if (info->ae_id) {
   1820			/* Bad terminate recvd - send back a terminate */
   1821			irdma_terminate_connection(qp, info);
   1822			return;
   1823		}
   1824	}
   1825
   1826	qp->term_flags |= IRDMA_TERM_RCVD;
   1827	qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
   1828	termhdr = (struct irdma_terminate_hdr *)&mpa[5];
   1829	if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
   1830	    termhdr->layer_etype == RDMAP_REMOTE_OP) {
   1831		irdma_terminate_done(qp, 0);
   1832	} else {
   1833		irdma_terminate_start_timer(qp);
   1834		irdma_terminate_send_fin(qp);
   1835	}
   1836}
   1837
   1838static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
   1839{
   1840	return 0;
   1841}
   1842
   1843static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
   1844{
   1845	/* do nothing */
   1846}
   1847
   1848static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
   1849{
   1850	/* do nothing */
   1851}
   1852
   1853/**
   1854 * irdma_sc_vsi_init - Init the vsi structure
   1855 * @vsi: pointer to vsi structure to initialize
   1856 * @info: the info used to initialize the vsi struct
   1857 */
   1858void irdma_sc_vsi_init(struct irdma_sc_vsi  *vsi,
   1859		       struct irdma_vsi_init_info *info)
   1860{
   1861	int i;
   1862
   1863	vsi->dev = info->dev;
   1864	vsi->back_vsi = info->back_vsi;
   1865	vsi->register_qset = info->register_qset;
   1866	vsi->unregister_qset = info->unregister_qset;
   1867	vsi->mtu = info->params->mtu;
   1868	vsi->exception_lan_q = info->exception_lan_q;
   1869	vsi->vsi_idx = info->pf_data_vsi_num;
   1870	if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
   1871		vsi->fcn_id = info->dev->hmc_fn_id;
   1872
   1873	irdma_set_qos_info(vsi, info->params);
   1874	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
   1875		mutex_init(&vsi->qos[i].qos_mutex);
   1876		INIT_LIST_HEAD(&vsi->qos[i].qplist);
   1877	}
   1878	if (vsi->register_qset) {
   1879		vsi->dev->ws_add = irdma_ws_add;
   1880		vsi->dev->ws_remove = irdma_ws_remove;
   1881		vsi->dev->ws_reset = irdma_ws_reset;
   1882	} else {
   1883		vsi->dev->ws_add = irdma_null_ws_add;
   1884		vsi->dev->ws_remove = irdma_null_ws_remove;
   1885		vsi->dev->ws_reset = irdma_null_ws_reset;
   1886	}
   1887}
   1888
   1889/**
   1890 * irdma_get_fcn_id - Return the function id
   1891 * @vsi: pointer to the vsi
   1892 */
   1893static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi)
   1894{
   1895	struct irdma_stats_inst_info stats_info = {};
   1896	struct irdma_sc_dev *dev = vsi->dev;
   1897	u8 fcn_id = IRDMA_INVALID_FCN_ID;
   1898	u8 start_idx, max_stats, i;
   1899
   1900	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
   1901		if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
   1902					      &stats_info))
   1903			return stats_info.stats_idx;
   1904	}
   1905
   1906	start_idx = 1;
   1907	max_stats = 16;
   1908	for (i = start_idx; i < max_stats; i++)
   1909		if (!dev->fcn_id_array[i]) {
   1910			fcn_id = i;
   1911			dev->fcn_id_array[i] = true;
   1912			break;
   1913		}
   1914
   1915	return fcn_id;
   1916}
   1917
   1918/**
   1919 * irdma_vsi_stats_init - Initialize the vsi statistics
   1920 * @vsi: pointer to the vsi structure
   1921 * @info: The info structure used for initialization
   1922 */
   1923int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
   1924			 struct irdma_vsi_stats_info *info)
   1925{
   1926	u8 fcn_id = info->fcn_id;
   1927	struct irdma_dma_mem *stats_buff_mem;
   1928
   1929	vsi->pestat = info->pestat;
   1930	vsi->pestat->hw = vsi->dev->hw;
   1931	vsi->pestat->vsi = vsi;
   1932	stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
   1933	stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
   1934	stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
   1935						stats_buff_mem->size,
   1936						&stats_buff_mem->pa,
   1937						GFP_KERNEL);
   1938	if (!stats_buff_mem->va)
   1939		return -ENOMEM;
   1940
   1941	vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
   1942	vsi->pestat->gather_info.last_gather_stats_va =
   1943		(void *)((uintptr_t)stats_buff_mem->va +
   1944			 IRDMA_GATHER_STATS_BUF_SIZE);
   1945
   1946	irdma_hw_stats_start_timer(vsi);
   1947	if (info->alloc_fcn_id)
   1948		fcn_id = irdma_get_fcn_id(vsi);
   1949	if (fcn_id == IRDMA_INVALID_FCN_ID)
   1950		goto stats_error;
   1951
   1952	vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
   1953	vsi->fcn_id = fcn_id;
   1954	if (info->alloc_fcn_id) {
   1955		vsi->pestat->gather_info.use_stats_inst = true;
   1956		vsi->pestat->gather_info.stats_inst_index = fcn_id;
   1957	}
   1958
   1959	return 0;
   1960
   1961stats_error:
   1962	dma_free_coherent(vsi->pestat->hw->device, stats_buff_mem->size,
   1963			  stats_buff_mem->va, stats_buff_mem->pa);
   1964	stats_buff_mem->va = NULL;
   1965
   1966	return -EIO;
   1967}
   1968
   1969/**
   1970 * irdma_vsi_stats_free - Free the vsi stats
   1971 * @vsi: pointer to the vsi structure
   1972 */
   1973void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
   1974{
   1975	struct irdma_stats_inst_info stats_info = {};
   1976	u8 fcn_id = vsi->fcn_id;
   1977	struct irdma_sc_dev *dev = vsi->dev;
   1978
   1979	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
   1980		if (vsi->stats_fcn_id_alloc) {
   1981			stats_info.stats_idx = vsi->fcn_id;
   1982			irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
   1983						 &stats_info);
   1984		}
   1985	} else {
   1986		if (vsi->stats_fcn_id_alloc &&
   1987		    fcn_id < vsi->dev->hw_attrs.max_stat_inst)
   1988			vsi->dev->fcn_id_array[fcn_id] = false;
   1989	}
   1990
   1991	if (!vsi->pestat)
   1992		return;
   1993	irdma_hw_stats_stop_timer(vsi);
   1994	dma_free_coherent(vsi->pestat->hw->device,
   1995			  vsi->pestat->gather_info.stats_buff_mem.size,
   1996			  vsi->pestat->gather_info.stats_buff_mem.va,
   1997			  vsi->pestat->gather_info.stats_buff_mem.pa);
   1998	vsi->pestat->gather_info.stats_buff_mem.va = NULL;
   1999}
   2000
   2001/**
   2002 * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
   2003 * @wqsize: size of the wq (sq, rq) to encoded_size
   2004 * @queue_type: queue type selected for the calculation algorithm
   2005 */
   2006u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
   2007{
   2008	u8 encoded_size = 0;
   2009
   2010	/* cqp sq's hw coded value starts from 1 for size of 4
   2011	 * while it starts from 0 for qp' wq's.
   2012	 */
   2013	if (queue_type == IRDMA_QUEUE_TYPE_CQP)
   2014		encoded_size = 1;
   2015	wqsize >>= 2;
   2016	while (wqsize >>= 1)
   2017		encoded_size++;
   2018
   2019	return encoded_size;
   2020}
   2021
   2022/**
   2023 * irdma_sc_gather_stats - collect the statistics
   2024 * @cqp: struct for cqp hw
   2025 * @info: gather stats info structure
   2026 * @scratch: u64 saved to be used during cqp completion
   2027 */
   2028static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
   2029				 struct irdma_stats_gather_info *info,
   2030				 u64 scratch)
   2031{
   2032	__le64 *wqe;
   2033	u64 temp;
   2034
   2035	if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
   2036		return -ENOMEM;
   2037
   2038	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2039	if (!wqe)
   2040		return -ENOMEM;
   2041
   2042	set_64bit_val(wqe, 40,
   2043		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
   2044	set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
   2045
   2046	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
   2047	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
   2048	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
   2049			  info->stats_inst_index) |
   2050	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
   2051			  info->use_hmc_fcn_index) |
   2052	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
   2053	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2054
   2055	set_64bit_val(wqe, 24, temp);
   2056
   2057	print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
   2058			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2059
   2060	irdma_sc_cqp_post_sq(cqp);
   2061	ibdev_dbg(to_ibdev(cqp->dev),
   2062		  "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
   2063		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
   2064
   2065	return 0;
   2066}
   2067
   2068/**
   2069 * irdma_sc_manage_stats_inst - allocate or free stats instance
   2070 * @cqp: struct for cqp hw
   2071 * @info: stats info structure
   2072 * @alloc: alloc vs. delete flag
   2073 * @scratch: u64 saved to be used during cqp completion
   2074 */
   2075static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
   2076				      struct irdma_stats_inst_info *info,
   2077				      bool alloc, u64 scratch)
   2078{
   2079	__le64 *wqe;
   2080	u64 temp;
   2081
   2082	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2083	if (!wqe)
   2084		return -ENOMEM;
   2085
   2086	set_64bit_val(wqe, 40,
   2087		      FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
   2088	temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
   2089	       FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
   2090	       FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
   2091			  info->use_hmc_fcn_index) |
   2092	       FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
   2093	       FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
   2094
   2095	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2096
   2097	set_64bit_val(wqe, 24, temp);
   2098
   2099	print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
   2100			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2101
   2102	irdma_sc_cqp_post_sq(cqp);
   2103	return 0;
   2104}
   2105
   2106/**
   2107 * irdma_sc_set_up_map - set the up map table
   2108 * @cqp: struct for cqp hw
   2109 * @info: User priority map info
   2110 * @scratch: u64 saved to be used during cqp completion
   2111 */
   2112static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
   2113			       struct irdma_up_info *info, u64 scratch)
   2114{
   2115	__le64 *wqe;
   2116	u64 temp = 0;
   2117	int i;
   2118
   2119	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2120	if (!wqe)
   2121		return -ENOMEM;
   2122
   2123	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
   2124		temp |= (u64)info->map[i] << (i * 8);
   2125
   2126	set_64bit_val(wqe, 0, temp);
   2127	set_64bit_val(wqe, 40,
   2128		      FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
   2129		      FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
   2130
   2131	temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
   2132	       FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
   2133	       FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
   2134			  info->use_cnp_up_override) |
   2135	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
   2136	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2137
   2138	set_64bit_val(wqe, 24, temp);
   2139
   2140	print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
   2141			     IRDMA_CQP_WQE_SIZE * 8, false);
   2142	irdma_sc_cqp_post_sq(cqp);
   2143
   2144	return 0;
   2145}
   2146
   2147/**
   2148 * irdma_sc_manage_ws_node - create/modify/destroy WS node
   2149 * @cqp: struct for cqp hw
   2150 * @info: node info structure
   2151 * @node_op: 0 for add 1 for modify, 2 for delete
   2152 * @scratch: u64 saved to be used during cqp completion
   2153 */
   2154static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
   2155				   struct irdma_ws_node_info *info,
   2156				   enum irdma_ws_node_op node_op, u64 scratch)
   2157{
   2158	__le64 *wqe;
   2159	u64 temp = 0;
   2160
   2161	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2162	if (!wqe)
   2163		return -ENOMEM;
   2164
   2165	set_64bit_val(wqe, 32,
   2166		      FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
   2167		      FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
   2168
   2169	temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
   2170	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
   2171	       FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
   2172	       FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
   2173	       FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
   2174	       FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
   2175	       FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
   2176	       FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
   2177	       FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
   2178	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2179
   2180	set_64bit_val(wqe, 24, temp);
   2181
   2182	print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2183			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2184	irdma_sc_cqp_post_sq(cqp);
   2185
   2186	return 0;
   2187}
   2188
   2189/**
   2190 * irdma_sc_qp_flush_wqes - flush qp's wqe
   2191 * @qp: sc qp
   2192 * @info: dlush information
   2193 * @scratch: u64 saved to be used during cqp completion
   2194 * @post_sq: flag for cqp db to ring
   2195 */
   2196int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
   2197			   struct irdma_qp_flush_info *info, u64 scratch,
   2198			   bool post_sq)
   2199{
   2200	u64 temp = 0;
   2201	__le64 *wqe;
   2202	struct irdma_sc_cqp *cqp;
   2203	u64 hdr;
   2204	bool flush_sq = false, flush_rq = false;
   2205
   2206	if (info->rq && !qp->flush_rq)
   2207		flush_rq = true;
   2208	if (info->sq && !qp->flush_sq)
   2209		flush_sq = true;
   2210	qp->flush_sq |= flush_sq;
   2211	qp->flush_rq |= flush_rq;
   2212
   2213	if (!flush_sq && !flush_rq) {
   2214		ibdev_dbg(to_ibdev(qp->dev),
   2215			  "CQP: Additional flush request ignored for qp %x\n",
   2216			  qp->qp_uk.qp_id);
   2217		return -EALREADY;
   2218	}
   2219
   2220	cqp = qp->pd->dev->cqp;
   2221	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2222	if (!wqe)
   2223		return -ENOMEM;
   2224
   2225	if (info->userflushcode) {
   2226		if (flush_rq)
   2227			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
   2228					   info->rq_minor_code) |
   2229				FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
   2230					   info->rq_major_code);
   2231		if (flush_sq)
   2232			temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
   2233					   info->sq_minor_code) |
   2234				FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
   2235					   info->sq_major_code);
   2236	}
   2237	set_64bit_val(wqe, 16, temp);
   2238
   2239	temp = (info->generate_ae) ?
   2240		info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
   2241					   info->ae_src) : 0;
   2242	set_64bit_val(wqe, 8, temp);
   2243
   2244	hdr = qp->qp_uk.qp_id |
   2245	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
   2246	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
   2247	      FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
   2248	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
   2249	      FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
   2250	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2251	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2252
   2253	set_64bit_val(wqe, 24, hdr);
   2254
   2255	print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2256			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2257	if (post_sq)
   2258		irdma_sc_cqp_post_sq(cqp);
   2259
   2260	return 0;
   2261}
   2262
   2263/**
   2264 * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
   2265 * @qp: sc qp
   2266 * @info: gen ae information
   2267 * @scratch: u64 saved to be used during cqp completion
   2268 * @post_sq: flag for cqp db to ring
   2269 */
   2270static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
   2271			   struct irdma_gen_ae_info *info, u64 scratch,
   2272			   bool post_sq)
   2273{
   2274	u64 temp;
   2275	__le64 *wqe;
   2276	struct irdma_sc_cqp *cqp;
   2277	u64 hdr;
   2278
   2279	cqp = qp->pd->dev->cqp;
   2280	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2281	if (!wqe)
   2282		return -ENOMEM;
   2283
   2284	temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
   2285					  info->ae_src);
   2286	set_64bit_val(wqe, 8, temp);
   2287
   2288	hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
   2289					   IRDMA_CQP_OP_GEN_AE) |
   2290	      FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
   2291	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2292	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2293
   2294	set_64bit_val(wqe, 24, hdr);
   2295
   2296	print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2297			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2298	if (post_sq)
   2299		irdma_sc_cqp_post_sq(cqp);
   2300
   2301	return 0;
   2302}
   2303
   2304/*** irdma_sc_qp_upload_context - upload qp's context
   2305 * @dev: sc device struct
   2306 * @info: upload context info ptr for return
   2307 * @scratch: u64 saved to be used during cqp completion
   2308 * @post_sq: flag for cqp db to ring
   2309 */
   2310static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
   2311				      struct irdma_upload_context_info *info,
   2312				      u64 scratch, bool post_sq)
   2313{
   2314	__le64 *wqe;
   2315	struct irdma_sc_cqp *cqp;
   2316	u64 hdr;
   2317
   2318	cqp = dev->cqp;
   2319	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2320	if (!wqe)
   2321		return -ENOMEM;
   2322
   2323	set_64bit_val(wqe, 16, info->buf_pa);
   2324
   2325	hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
   2326	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
   2327	      FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
   2328	      FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
   2329	      FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
   2330	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2331	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2332
   2333	set_64bit_val(wqe, 24, hdr);
   2334
   2335	print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
   2336			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2337	if (post_sq)
   2338		irdma_sc_cqp_post_sq(cqp);
   2339
   2340	return 0;
   2341}
   2342
   2343/**
   2344 * irdma_sc_manage_push_page - Handle push page
   2345 * @cqp: struct for cqp hw
   2346 * @info: push page info
   2347 * @scratch: u64 saved to be used during cqp completion
   2348 * @post_sq: flag for cqp db to ring
   2349 */
   2350static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
   2351				     struct irdma_cqp_manage_push_page_info *info,
   2352				     u64 scratch, bool post_sq)
   2353{
   2354	__le64 *wqe;
   2355	u64 hdr;
   2356
   2357	if (info->free_page &&
   2358	    info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
   2359		return -EINVAL;
   2360
   2361	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2362	if (!wqe)
   2363		return -ENOMEM;
   2364
   2365	set_64bit_val(wqe, 16, info->qs_handle);
   2366	hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
   2367	      FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
   2368	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
   2369	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
   2370	      FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
   2371	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2372
   2373	set_64bit_val(wqe, 24, hdr);
   2374
   2375	print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
   2376			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2377	if (post_sq)
   2378		irdma_sc_cqp_post_sq(cqp);
   2379
   2380	return 0;
   2381}
   2382
   2383/**
   2384 * irdma_sc_suspend_qp - suspend qp for param change
   2385 * @cqp: struct for cqp hw
   2386 * @qp: sc qp struct
   2387 * @scratch: u64 saved to be used during cqp completion
   2388 */
   2389static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
   2390			       u64 scratch)
   2391{
   2392	u64 hdr;
   2393	__le64 *wqe;
   2394
   2395	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2396	if (!wqe)
   2397		return -ENOMEM;
   2398
   2399	hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
   2400	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
   2401	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2402	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2403
   2404	set_64bit_val(wqe, 24, hdr);
   2405
   2406	print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2407			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2408	irdma_sc_cqp_post_sq(cqp);
   2409
   2410	return 0;
   2411}
   2412
   2413/**
   2414 * irdma_sc_resume_qp - resume qp after suspend
   2415 * @cqp: struct for cqp hw
   2416 * @qp: sc qp struct
   2417 * @scratch: u64 saved to be used during cqp completion
   2418 */
   2419static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
   2420			      u64 scratch)
   2421{
   2422	u64 hdr;
   2423	__le64 *wqe;
   2424
   2425	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2426	if (!wqe)
   2427		return -ENOMEM;
   2428
   2429	set_64bit_val(wqe, 16,
   2430		      FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
   2431
   2432	hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
   2433	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
   2434	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2435	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2436
   2437	set_64bit_val(wqe, 24, hdr);
   2438
   2439	print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2440			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2441	irdma_sc_cqp_post_sq(cqp);
   2442
   2443	return 0;
   2444}
   2445
   2446/**
   2447 * irdma_sc_cq_ack - acknowledge completion q
   2448 * @cq: cq struct
   2449 */
   2450static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
   2451{
   2452	writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
   2453}
   2454
   2455/**
   2456 * irdma_sc_cq_init - initialize completion q
   2457 * @cq: cq struct
   2458 * @info: cq initialization info
   2459 */
   2460int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
   2461{
   2462	u32 pble_obj_cnt;
   2463
   2464	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   2465	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
   2466		return -EINVAL;
   2467
   2468	cq->cq_pa = info->cq_base_pa;
   2469	cq->dev = info->dev;
   2470	cq->ceq_id = info->ceq_id;
   2471	info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
   2472	info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
   2473	irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
   2474
   2475	cq->virtual_map = info->virtual_map;
   2476	cq->pbl_chunk_size = info->pbl_chunk_size;
   2477	cq->ceqe_mask = info->ceqe_mask;
   2478	cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
   2479	cq->shadow_area_pa = info->shadow_area_pa;
   2480	cq->shadow_read_threshold = info->shadow_read_threshold;
   2481	cq->ceq_id_valid = info->ceq_id_valid;
   2482	cq->tph_en = info->tph_en;
   2483	cq->tph_val = info->tph_val;
   2484	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
   2485	cq->vsi = info->vsi;
   2486
   2487	return 0;
   2488}
   2489
   2490/**
   2491 * irdma_sc_cq_create - create completion q
   2492 * @cq: cq struct
   2493 * @scratch: u64 saved to be used during cqp completion
   2494 * @check_overflow: flag for overflow check
   2495 * @post_sq: flag for cqp db to ring
   2496 */
   2497static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
   2498			      bool check_overflow, bool post_sq)
   2499{
   2500	__le64 *wqe;
   2501	struct irdma_sc_cqp *cqp;
   2502	u64 hdr;
   2503	struct irdma_sc_ceq *ceq;
   2504	int ret_code = 0;
   2505
   2506	cqp = cq->dev->cqp;
   2507	if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
   2508		return -EINVAL;
   2509
   2510	if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
   2511		return -EINVAL;
   2512
   2513	ceq = cq->dev->ceq[cq->ceq_id];
   2514	if (ceq && ceq->reg_cq)
   2515		ret_code = irdma_sc_add_cq_ctx(ceq, cq);
   2516
   2517	if (ret_code)
   2518		return ret_code;
   2519
   2520	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2521	if (!wqe) {
   2522		if (ceq && ceq->reg_cq)
   2523			irdma_sc_remove_cq_ctx(ceq, cq);
   2524		return -ENOMEM;
   2525	}
   2526
   2527	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
   2528	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
   2529	set_64bit_val(wqe, 16,
   2530		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
   2531	set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
   2532	set_64bit_val(wqe, 40, cq->shadow_area_pa);
   2533	set_64bit_val(wqe, 48,
   2534		      FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
   2535	set_64bit_val(wqe, 56,
   2536		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
   2537		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
   2538
   2539	hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
   2540	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
   2541			IRDMA_CQPSQ_CQ_CEQID) |
   2542	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
   2543	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
   2544	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
   2545	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
   2546	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
   2547	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
   2548	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
   2549	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
   2550			 cq->cq_uk.avoid_mem_cflct) |
   2551	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2552	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2553
   2554	set_64bit_val(wqe, 24, hdr);
   2555
   2556	print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2557			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2558	if (post_sq)
   2559		irdma_sc_cqp_post_sq(cqp);
   2560
   2561	return 0;
   2562}
   2563
   2564/**
   2565 * irdma_sc_cq_destroy - destroy completion q
   2566 * @cq: cq struct
   2567 * @scratch: u64 saved to be used during cqp completion
   2568 * @post_sq: flag for cqp db to ring
   2569 */
   2570int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
   2571{
   2572	struct irdma_sc_cqp *cqp;
   2573	__le64 *wqe;
   2574	u64 hdr;
   2575	struct irdma_sc_ceq *ceq;
   2576
   2577	cqp = cq->dev->cqp;
   2578	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2579	if (!wqe)
   2580		return -ENOMEM;
   2581
   2582	ceq = cq->dev->ceq[cq->ceq_id];
   2583	if (ceq && ceq->reg_cq)
   2584		irdma_sc_remove_cq_ctx(ceq, cq);
   2585
   2586	set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
   2587	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
   2588	set_64bit_val(wqe, 40, cq->shadow_area_pa);
   2589	set_64bit_val(wqe, 48,
   2590		      (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
   2591
   2592	hdr = cq->cq_uk.cq_id |
   2593	      FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
   2594			IRDMA_CQPSQ_CQ_CEQID) |
   2595	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
   2596	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
   2597	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
   2598	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
   2599	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
   2600	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
   2601	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
   2602	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2603	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2604
   2605	set_64bit_val(wqe, 24, hdr);
   2606
   2607	print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2608			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2609	if (post_sq)
   2610		irdma_sc_cqp_post_sq(cqp);
   2611
   2612	return 0;
   2613}
   2614
   2615/**
   2616 * irdma_sc_cq_resize - set resized cq buffer info
   2617 * @cq: resized cq
   2618 * @info: resized cq buffer info
   2619 */
   2620void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
   2621{
   2622	cq->virtual_map = info->virtual_map;
   2623	cq->cq_pa = info->cq_pa;
   2624	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
   2625	cq->pbl_chunk_size = info->pbl_chunk_size;
   2626	irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
   2627}
   2628
   2629/**
   2630 * irdma_sc_cq_modify - modify a Completion Queue
   2631 * @cq: cq struct
   2632 * @info: modification info struct
   2633 * @scratch: u64 saved to be used during cqp completion
   2634 * @post_sq: flag to post to sq
   2635 */
   2636static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
   2637			      struct irdma_modify_cq_info *info, u64 scratch,
   2638			      bool post_sq)
   2639{
   2640	struct irdma_sc_cqp *cqp;
   2641	__le64 *wqe;
   2642	u64 hdr;
   2643	u32 pble_obj_cnt;
   2644
   2645	pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   2646	if (info->cq_resize && info->virtual_map &&
   2647	    info->first_pm_pbl_idx >= pble_obj_cnt)
   2648		return -EINVAL;
   2649
   2650	cqp = cq->dev->cqp;
   2651	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   2652	if (!wqe)
   2653		return -ENOMEM;
   2654
   2655	set_64bit_val(wqe, 0, info->cq_size);
   2656	set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
   2657	set_64bit_val(wqe, 16,
   2658		      FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
   2659	set_64bit_val(wqe, 32, info->cq_pa);
   2660	set_64bit_val(wqe, 40, cq->shadow_area_pa);
   2661	set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
   2662	set_64bit_val(wqe, 56,
   2663		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
   2664		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
   2665
   2666	hdr = cq->cq_uk.cq_id |
   2667	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
   2668	      FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
   2669	      FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
   2670	      FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
   2671	      FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
   2672	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
   2673	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
   2674	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
   2675			 cq->cq_uk.avoid_mem_cflct) |
   2676	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   2677	dma_wmb(); /* make sure WQE is written before valid bit is set */
   2678
   2679	set_64bit_val(wqe, 24, hdr);
   2680
   2681	print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
   2682			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   2683	if (post_sq)
   2684		irdma_sc_cqp_post_sq(cqp);
   2685
   2686	return 0;
   2687}
   2688
   2689/**
   2690 * irdma_check_cqp_progress - check cqp processing progress
   2691 * @timeout: timeout info struct
   2692 * @dev: sc device struct
   2693 */
   2694void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
   2695{
   2696	if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) {
   2697		timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
   2698		timeout->count = 0;
   2699	} else {
   2700		if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] !=
   2701		    timeout->compl_cqp_cmds)
   2702			timeout->count++;
   2703	}
   2704}
   2705
   2706/**
   2707 * irdma_get_cqp_reg_info - get head and tail for cqp using registers
   2708 * @cqp: struct for cqp hw
   2709 * @val: cqp tail register value
   2710 * @tail: wqtail register value
   2711 * @error: cqp processing err
   2712 */
   2713static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
   2714					  u32 *tail, u32 *error)
   2715{
   2716	*val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
   2717	*tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
   2718	*error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
   2719}
   2720
   2721/**
   2722 * irdma_cqp_poll_registers - poll cqp registers
   2723 * @cqp: struct for cqp hw
   2724 * @tail: wqtail register value
   2725 * @count: how many times to try for completion
   2726 */
   2727static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
   2728				    u32 count)
   2729{
   2730	u32 i = 0;
   2731	u32 newtail, error, val;
   2732
   2733	while (i++ < count) {
   2734		irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
   2735		if (error) {
   2736			error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
   2737			ibdev_dbg(to_ibdev(cqp->dev),
   2738				  "CQP: CQPERRCODES error_code[x%08X]\n",
   2739				  error);
   2740			return -EIO;
   2741		}
   2742		if (newtail != tail) {
   2743			/* SUCCESS */
   2744			IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
   2745			cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
   2746			return 0;
   2747		}
   2748		udelay(cqp->dev->hw_attrs.max_sleep_count);
   2749	}
   2750
   2751	return -ETIMEDOUT;
   2752}
   2753
   2754/**
   2755 * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
   2756 * @dev: sc device struct
   2757 * @buf: pointer to commit buffer
   2758 * @buf_idx: buffer index
   2759 * @obj_info: object info pointer
   2760 * @rsrc_idx: indexs of memory resource
   2761 */
   2762static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
   2763				      u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
   2764				      u32 rsrc_idx)
   2765{
   2766	u64 temp;
   2767
   2768	get_64bit_val(buf, buf_idx, &temp);
   2769
   2770	switch (rsrc_idx) {
   2771	case IRDMA_HMC_IW_QP:
   2772		obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
   2773		break;
   2774	case IRDMA_HMC_IW_CQ:
   2775		obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
   2776		break;
   2777	case IRDMA_HMC_IW_APBVT_ENTRY:
   2778		obj_info[rsrc_idx].cnt = 1;
   2779		break;
   2780	default:
   2781		obj_info[rsrc_idx].cnt = (u32)temp;
   2782		break;
   2783	}
   2784
   2785	obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
   2786
   2787	return temp;
   2788}
   2789
   2790/**
   2791 * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
   2792 * @dev: pointer to dev struct
   2793 * @buf: ptr to fpm commit buffer
   2794 * @info: ptr to irdma_hmc_obj_info struct
   2795 * @sd: number of SDs for HMC objects
   2796 *
   2797 * parses fpm commit info and copy base value
   2798 * of hmc objects in hmc_info
   2799 */
   2800static void
   2801irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
   2802			      struct irdma_hmc_obj_info *info, u32 *sd)
   2803{
   2804	u64 size;
   2805	u32 i;
   2806	u64 max_base = 0;
   2807	u32 last_hmc_obj = 0;
   2808
   2809	irdma_sc_decode_fpm_commit(dev, buf, 0, info,
   2810				   IRDMA_HMC_IW_QP);
   2811	irdma_sc_decode_fpm_commit(dev, buf, 8, info,
   2812				   IRDMA_HMC_IW_CQ);
   2813	/* skiping RSRVD */
   2814	irdma_sc_decode_fpm_commit(dev, buf, 24, info,
   2815				   IRDMA_HMC_IW_HTE);
   2816	irdma_sc_decode_fpm_commit(dev, buf, 32, info,
   2817				   IRDMA_HMC_IW_ARP);
   2818	irdma_sc_decode_fpm_commit(dev, buf, 40, info,
   2819				   IRDMA_HMC_IW_APBVT_ENTRY);
   2820	irdma_sc_decode_fpm_commit(dev, buf, 48, info,
   2821				   IRDMA_HMC_IW_MR);
   2822	irdma_sc_decode_fpm_commit(dev, buf, 56, info,
   2823				   IRDMA_HMC_IW_XF);
   2824	irdma_sc_decode_fpm_commit(dev, buf, 64, info,
   2825				   IRDMA_HMC_IW_XFFL);
   2826	irdma_sc_decode_fpm_commit(dev, buf, 72, info,
   2827				   IRDMA_HMC_IW_Q1);
   2828	irdma_sc_decode_fpm_commit(dev, buf, 80, info,
   2829				   IRDMA_HMC_IW_Q1FL);
   2830	irdma_sc_decode_fpm_commit(dev, buf, 88, info,
   2831				   IRDMA_HMC_IW_TIMER);
   2832	irdma_sc_decode_fpm_commit(dev, buf, 112, info,
   2833				   IRDMA_HMC_IW_PBLE);
   2834	/* skipping RSVD. */
   2835	if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
   2836		irdma_sc_decode_fpm_commit(dev, buf, 96, info,
   2837					   IRDMA_HMC_IW_FSIMC);
   2838		irdma_sc_decode_fpm_commit(dev, buf, 104, info,
   2839					   IRDMA_HMC_IW_FSIAV);
   2840		irdma_sc_decode_fpm_commit(dev, buf, 128, info,
   2841					   IRDMA_HMC_IW_RRF);
   2842		irdma_sc_decode_fpm_commit(dev, buf, 136, info,
   2843					   IRDMA_HMC_IW_RRFFL);
   2844		irdma_sc_decode_fpm_commit(dev, buf, 144, info,
   2845					   IRDMA_HMC_IW_HDR);
   2846		irdma_sc_decode_fpm_commit(dev, buf, 152, info,
   2847					   IRDMA_HMC_IW_MD);
   2848		irdma_sc_decode_fpm_commit(dev, buf, 160, info,
   2849					   IRDMA_HMC_IW_OOISC);
   2850		irdma_sc_decode_fpm_commit(dev, buf, 168, info,
   2851					   IRDMA_HMC_IW_OOISCFFL);
   2852	}
   2853
   2854	/* searching for the last object in HMC to find the size of the HMC area. */
   2855	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
   2856		if (info[i].base > max_base) {
   2857			max_base = info[i].base;
   2858			last_hmc_obj = i;
   2859		}
   2860	}
   2861
   2862	size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
   2863	       info[last_hmc_obj].base;
   2864
   2865	if (size & 0x1FFFFF)
   2866		*sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
   2867	else
   2868		*sd = (u32)(size >> 21);
   2869
   2870}
   2871
   2872/**
   2873 * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
   2874 * @buf: ptr to fpm query buffer
   2875 * @buf_idx: index into buf
   2876 * @obj_info: ptr to irdma_hmc_obj_info struct
   2877 * @rsrc_idx: resource index into info
   2878 *
   2879 * Decode a 64 bit value from fpm query buffer into max count and size
   2880 */
   2881static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
   2882				     struct irdma_hmc_obj_info *obj_info,
   2883				     u32 rsrc_idx)
   2884{
   2885	u64 temp;
   2886	u32 size;
   2887
   2888	get_64bit_val(buf, buf_idx, &temp);
   2889	obj_info[rsrc_idx].max_cnt = (u32)temp;
   2890	size = (u32)(temp >> 32);
   2891	obj_info[rsrc_idx].size = BIT_ULL(size);
   2892
   2893	return temp;
   2894}
   2895
   2896/**
   2897 * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
   2898 * @dev: ptr to shared code device
   2899 * @buf: ptr to fpm query buffer
   2900 * @hmc_info: ptr to irdma_hmc_obj_info struct
   2901 * @hmc_fpm_misc: ptr to fpm data
   2902 *
   2903 * parses fpm query buffer and copy max_cnt and
   2904 * size value of hmc objects in hmc_info
   2905 */
   2906static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
   2907					struct irdma_hmc_info *hmc_info,
   2908					struct irdma_hmc_fpm_misc *hmc_fpm_misc)
   2909{
   2910	struct irdma_hmc_obj_info *obj_info;
   2911	u64 temp;
   2912	u32 size;
   2913	u16 max_pe_sds;
   2914
   2915	obj_info = hmc_info->hmc_obj;
   2916
   2917	get_64bit_val(buf, 0, &temp);
   2918	hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
   2919	max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
   2920
   2921	hmc_fpm_misc->max_sds = max_pe_sds;
   2922	hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
   2923	get_64bit_val(buf, 8, &temp);
   2924	obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
   2925	size = (u32)(temp >> 32);
   2926	obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
   2927
   2928	get_64bit_val(buf, 16, &temp);
   2929	obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
   2930	size = (u32)(temp >> 32);
   2931	obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
   2932
   2933	irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
   2934	irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
   2935
   2936	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
   2937	obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
   2938
   2939	irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
   2940	irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
   2941
   2942	get_64bit_val(buf, 64, &temp);
   2943	obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
   2944	obj_info[IRDMA_HMC_IW_XFFL].size = 4;
   2945	hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
   2946	if (!hmc_fpm_misc->xf_block_size)
   2947		return -EINVAL;
   2948
   2949	irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
   2950	get_64bit_val(buf, 80, &temp);
   2951	obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
   2952	obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
   2953
   2954	hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
   2955	if (!hmc_fpm_misc->q1_block_size)
   2956		return -EINVAL;
   2957
   2958	irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
   2959
   2960	get_64bit_val(buf, 112, &temp);
   2961	obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
   2962	obj_info[IRDMA_HMC_IW_PBLE].size = 8;
   2963
   2964	get_64bit_val(buf, 120, &temp);
   2965	hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
   2966	hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
   2967	hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
   2968	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
   2969		return 0;
   2970	irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
   2971	irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
   2972	irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
   2973
   2974	get_64bit_val(buf, 136, &temp);
   2975	obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
   2976	obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
   2977	hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
   2978	if (!hmc_fpm_misc->rrf_block_size &&
   2979	    obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
   2980		return -EINVAL;
   2981
   2982	irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
   2983	irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
   2984	irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
   2985
   2986	get_64bit_val(buf, 168, &temp);
   2987	obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
   2988	obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
   2989	hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
   2990	if (!hmc_fpm_misc->ooiscf_block_size &&
   2991	    obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
   2992		return -EINVAL;
   2993
   2994	return 0;
   2995}
   2996
   2997/**
   2998 * irdma_sc_find_reg_cq - find cq ctx index
   2999 * @ceq: ceq sc structure
   3000 * @cq: cq sc structure
   3001 */
   3002static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
   3003				struct irdma_sc_cq *cq)
   3004{
   3005	u32 i;
   3006
   3007	for (i = 0; i < ceq->reg_cq_size; i++) {
   3008		if (cq == ceq->reg_cq[i])
   3009			return i;
   3010	}
   3011
   3012	return IRDMA_INVALID_CQ_IDX;
   3013}
   3014
   3015/**
   3016 * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
   3017 * @ceq: ceq sc structure
   3018 * @cq: cq sc structure
   3019 */
   3020int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
   3021{
   3022	unsigned long flags;
   3023
   3024	spin_lock_irqsave(&ceq->req_cq_lock, flags);
   3025
   3026	if (ceq->reg_cq_size == ceq->elem_cnt) {
   3027		spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
   3028		return -ENOMEM;
   3029	}
   3030
   3031	ceq->reg_cq[ceq->reg_cq_size++] = cq;
   3032
   3033	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
   3034
   3035	return 0;
   3036}
   3037
   3038/**
   3039 * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
   3040 * @ceq: ceq sc structure
   3041 * @cq: cq sc structure
   3042 */
   3043void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
   3044{
   3045	unsigned long flags;
   3046	u32 cq_ctx_idx;
   3047
   3048	spin_lock_irqsave(&ceq->req_cq_lock, flags);
   3049	cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
   3050	if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
   3051		goto exit;
   3052
   3053	ceq->reg_cq_size--;
   3054	if (cq_ctx_idx != ceq->reg_cq_size)
   3055		ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
   3056	ceq->reg_cq[ceq->reg_cq_size] = NULL;
   3057
   3058exit:
   3059	spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
   3060}
   3061
   3062/**
   3063 * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
   3064 * @cqp: IWARP control queue pair pointer
   3065 * @info: IWARP control queue pair init info pointer
   3066 *
   3067 * Initializes the object and context buffers for a control Queue Pair.
   3068 */
   3069int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
   3070		      struct irdma_cqp_init_info *info)
   3071{
   3072	u8 hw_sq_size;
   3073
   3074	if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
   3075	    info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
   3076	    ((info->sq_size & (info->sq_size - 1))))
   3077		return -EINVAL;
   3078
   3079	hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
   3080						IRDMA_QUEUE_TYPE_CQP);
   3081	cqp->size = sizeof(*cqp);
   3082	cqp->sq_size = info->sq_size;
   3083	cqp->hw_sq_size = hw_sq_size;
   3084	cqp->sq_base = info->sq;
   3085	cqp->host_ctx = info->host_ctx;
   3086	cqp->sq_pa = info->sq_pa;
   3087	cqp->host_ctx_pa = info->host_ctx_pa;
   3088	cqp->dev = info->dev;
   3089	cqp->struct_ver = info->struct_ver;
   3090	cqp->hw_maj_ver = info->hw_maj_ver;
   3091	cqp->hw_min_ver = info->hw_min_ver;
   3092	cqp->scratch_array = info->scratch_array;
   3093	cqp->polarity = 0;
   3094	cqp->en_datacenter_tcp = info->en_datacenter_tcp;
   3095	cqp->ena_vf_count = info->ena_vf_count;
   3096	cqp->hmc_profile = info->hmc_profile;
   3097	cqp->ceqs_per_vf = info->ceqs_per_vf;
   3098	cqp->disable_packed = info->disable_packed;
   3099	cqp->rocev2_rto_policy = info->rocev2_rto_policy;
   3100	cqp->protocol_used = info->protocol_used;
   3101	memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
   3102	info->dev->cqp = cqp;
   3103
   3104	IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
   3105	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0;
   3106	cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0;
   3107	/* for the cqp commands backlog. */
   3108	INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
   3109
   3110	writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
   3111	writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
   3112	writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
   3113
   3114	ibdev_dbg(to_ibdev(cqp->dev),
   3115		  "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
   3116		  cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
   3117		  (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
   3118	return 0;
   3119}
   3120
   3121/**
   3122 * irdma_sc_cqp_create - create cqp during bringup
   3123 * @cqp: struct for cqp hw
   3124 * @maj_err: If error, major err number
   3125 * @min_err: If error, minor err number
   3126 */
   3127int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
   3128{
   3129	u64 temp;
   3130	u8 hw_rev;
   3131	u32 cnt = 0, p1, p2, val = 0, err_code;
   3132	int ret_code;
   3133
   3134	hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
   3135	cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
   3136				IRDMA_SD_BUF_ALIGNMENT);
   3137	cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
   3138					   cqp->sdbuf.size, &cqp->sdbuf.pa,
   3139					   GFP_KERNEL);
   3140	if (!cqp->sdbuf.va)
   3141		return -ENOMEM;
   3142
   3143	spin_lock_init(&cqp->dev->cqp_lock);
   3144
   3145	temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
   3146	       FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
   3147	       FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
   3148	       FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
   3149	if (hw_rev >= IRDMA_GEN_2) {
   3150		temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
   3151				   cqp->rocev2_rto_policy) |
   3152			FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
   3153				   cqp->protocol_used);
   3154	}
   3155
   3156	set_64bit_val(cqp->host_ctx, 0, temp);
   3157	set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
   3158
   3159	temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
   3160	       FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
   3161	set_64bit_val(cqp->host_ctx, 16, temp);
   3162	set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
   3163	temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
   3164	       FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
   3165	if (hw_rev >= IRDMA_GEN_2) {
   3166		temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
   3167			FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
   3168	}
   3169	set_64bit_val(cqp->host_ctx, 32, temp);
   3170	set_64bit_val(cqp->host_ctx, 40, 0);
   3171	temp = 0;
   3172	if (hw_rev >= IRDMA_GEN_2) {
   3173		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
   3174			FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
   3175			FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
   3176	}
   3177	set_64bit_val(cqp->host_ctx, 48, temp);
   3178	temp = 0;
   3179	if (hw_rev >= IRDMA_GEN_2) {
   3180		temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
   3181			FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
   3182			FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
   3183			FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
   3184	}
   3185	set_64bit_val(cqp->host_ctx, 56, temp);
   3186	print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
   3187			     8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
   3188	p1 = cqp->host_ctx_pa >> 32;
   3189	p2 = (u32)cqp->host_ctx_pa;
   3190
   3191	writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
   3192	writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
   3193
   3194	do {
   3195		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
   3196			ret_code = -ETIMEDOUT;
   3197			goto err;
   3198		}
   3199		udelay(cqp->dev->hw_attrs.max_sleep_count);
   3200		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
   3201	} while (!val);
   3202
   3203	if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
   3204		ret_code = -EOPNOTSUPP;
   3205		goto err;
   3206	}
   3207
   3208	cqp->process_cqp_sds = irdma_update_sds_noccq;
   3209	return 0;
   3210
   3211err:
   3212	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
   3213			  cqp->sdbuf.va, cqp->sdbuf.pa);
   3214	cqp->sdbuf.va = NULL;
   3215	err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
   3216	*min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
   3217	*maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
   3218	return ret_code;
   3219}
   3220
   3221/**
   3222 * irdma_sc_cqp_post_sq - post of cqp's sq
   3223 * @cqp: struct for cqp hw
   3224 */
   3225void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
   3226{
   3227	writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
   3228
   3229	ibdev_dbg(to_ibdev(cqp->dev),
   3230		  "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
   3231		  cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
   3232}
   3233
   3234/**
   3235 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
   3236 * and pass back index
   3237 * @cqp: CQP HW structure
   3238 * @scratch: private data for CQP WQE
   3239 * @wqe_idx: WQE index of CQP SQ
   3240 */
   3241__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
   3242					   u32 *wqe_idx)
   3243{
   3244	__le64 *wqe = NULL;
   3245	int ret_code;
   3246
   3247	if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
   3248		ibdev_dbg(to_ibdev(cqp->dev),
   3249			  "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
   3250			  cqp->sq_ring.head, cqp->sq_ring.tail,
   3251			  cqp->sq_ring.size);
   3252		return NULL;
   3253	}
   3254	IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
   3255	if (ret_code)
   3256		return NULL;
   3257
   3258	cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++;
   3259	if (!*wqe_idx)
   3260		cqp->polarity = !cqp->polarity;
   3261	wqe = cqp->sq_base[*wqe_idx].elem;
   3262	cqp->scratch_array[*wqe_idx] = scratch;
   3263	IRDMA_CQP_INIT_WQE(wqe);
   3264
   3265	return wqe;
   3266}
   3267
   3268/**
   3269 * irdma_sc_cqp_destroy - destroy cqp during close
   3270 * @cqp: struct for cqp hw
   3271 */
   3272int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
   3273{
   3274	u32 cnt = 0, val;
   3275	int ret_code = 0;
   3276
   3277	writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
   3278	writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
   3279	do {
   3280		if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
   3281			ret_code = -ETIMEDOUT;
   3282			break;
   3283		}
   3284		udelay(cqp->dev->hw_attrs.max_sleep_count);
   3285		val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
   3286	} while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
   3287
   3288	dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
   3289			  cqp->sdbuf.va, cqp->sdbuf.pa);
   3290	cqp->sdbuf.va = NULL;
   3291	return ret_code;
   3292}
   3293
   3294/**
   3295 * irdma_sc_ccq_arm - enable intr for control cq
   3296 * @ccq: ccq sc struct
   3297 */
   3298void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
   3299{
   3300	u64 temp_val;
   3301	u16 sw_cq_sel;
   3302	u8 arm_next_se;
   3303	u8 arm_seq_num;
   3304
   3305	get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
   3306	sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
   3307	arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
   3308	arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
   3309	arm_seq_num++;
   3310	temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
   3311		   FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
   3312		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
   3313		   FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
   3314	set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
   3315
   3316	dma_wmb(); /* make sure shadow area is updated before arming */
   3317
   3318	writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
   3319}
   3320
   3321/**
   3322 * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
   3323 * @ccq: ccq sc struct
   3324 * @info: completion q entry to return
   3325 */
   3326int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
   3327			      struct irdma_ccq_cqe_info *info)
   3328{
   3329	u64 qp_ctx, temp, temp1;
   3330	__le64 *cqe;
   3331	struct irdma_sc_cqp *cqp;
   3332	u32 wqe_idx;
   3333	u32 error;
   3334	u8 polarity;
   3335	int ret_code = 0;
   3336
   3337	if (ccq->cq_uk.avoid_mem_cflct)
   3338		cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
   3339	else
   3340		cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
   3341
   3342	get_64bit_val(cqe, 24, &temp);
   3343	polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
   3344	if (polarity != ccq->cq_uk.polarity)
   3345		return -ENOENT;
   3346
   3347	get_64bit_val(cqe, 8, &qp_ctx);
   3348	cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
   3349	info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
   3350	info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
   3351	info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
   3352	if (info->error) {
   3353		info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
   3354		error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
   3355		ibdev_dbg(to_ibdev(cqp->dev),
   3356			  "CQP: CQPERRCODES error_code[x%08X]\n", error);
   3357	}
   3358
   3359	wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
   3360	info->scratch = cqp->scratch_array[wqe_idx];
   3361
   3362	get_64bit_val(cqe, 16, &temp1);
   3363	info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
   3364	get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
   3365	info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
   3366	info->cqp = cqp;
   3367
   3368	/*  move the head for cq */
   3369	IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
   3370	if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
   3371		ccq->cq_uk.polarity ^= 1;
   3372
   3373	/* update cq tail in cq shadow memory also */
   3374	IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
   3375	set_64bit_val(ccq->cq_uk.shadow_area, 0,
   3376		      IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
   3377
   3378	dma_wmb(); /* make sure shadow area is updated before moving tail */
   3379
   3380	IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
   3381	ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++;
   3382
   3383	return ret_code;
   3384}
   3385
   3386/**
   3387 * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
   3388 * @cqp: struct for cqp hw
   3389 * @op_code: cqp opcode for completion
   3390 * @compl_info: completion q entry to return
   3391 */
   3392int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
   3393				  struct irdma_ccq_cqe_info *compl_info)
   3394{
   3395	struct irdma_ccq_cqe_info info = {};
   3396	struct irdma_sc_cq *ccq;
   3397	int ret_code = 0;
   3398	u32 cnt = 0;
   3399
   3400	ccq = cqp->dev->ccq;
   3401	while (1) {
   3402		if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
   3403			return -ETIMEDOUT;
   3404
   3405		if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
   3406			udelay(cqp->dev->hw_attrs.max_sleep_count);
   3407			continue;
   3408		}
   3409		if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
   3410			ret_code = -EIO;
   3411			break;
   3412		}
   3413		/* make sure op code matches*/
   3414		if (op_code == info.op_code)
   3415			break;
   3416		ibdev_dbg(to_ibdev(cqp->dev),
   3417			  "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
   3418			  op_code, info.op_code);
   3419	}
   3420
   3421	if (compl_info)
   3422		memcpy(compl_info, &info, sizeof(*compl_info));
   3423
   3424	return ret_code;
   3425}
   3426
   3427/**
   3428 * irdma_sc_manage_hmc_pm_func_table - manage of function table
   3429 * @cqp: struct for cqp hw
   3430 * @scratch: u64 saved to be used during cqp completion
   3431 * @info: info for the manage function table operation
   3432 * @post_sq: flag for cqp db to ring
   3433 */
   3434static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
   3435					     struct irdma_hmc_fcn_info *info,
   3436					     u64 scratch, bool post_sq)
   3437{
   3438	__le64 *wqe;
   3439	u64 hdr;
   3440
   3441	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3442	if (!wqe)
   3443		return -ENOMEM;
   3444
   3445	set_64bit_val(wqe, 0, 0);
   3446	set_64bit_val(wqe, 8, 0);
   3447	set_64bit_val(wqe, 16, 0);
   3448	set_64bit_val(wqe, 32, 0);
   3449	set_64bit_val(wqe, 40, 0);
   3450	set_64bit_val(wqe, 48, 0);
   3451	set_64bit_val(wqe, 56, 0);
   3452
   3453	hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
   3454	      FIELD_PREP(IRDMA_CQPSQ_OPCODE,
   3455			 IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
   3456	      FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
   3457	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3458	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3459
   3460	set_64bit_val(wqe, 24, hdr);
   3461
   3462	print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
   3463			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
   3464			     IRDMA_CQP_WQE_SIZE * 8, false);
   3465	if (post_sq)
   3466		irdma_sc_cqp_post_sq(cqp);
   3467
   3468	return 0;
   3469}
   3470
   3471/**
   3472 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
   3473 * for fpm commit
   3474 * @cqp: struct for cqp hw
   3475 */
   3476static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
   3477{
   3478	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
   3479					     NULL);
   3480}
   3481
   3482/**
   3483 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
   3484 * @cqp: struct for cqp hw
   3485 * @scratch: u64 saved to be used during cqp completion
   3486 * @hmc_fn_id: hmc function id
   3487 * @commit_fpm_mem: Memory for fpm values
   3488 * @post_sq: flag for cqp db to ring
   3489 * @wait_type: poll ccq or cqp registers for cqp completion
   3490 */
   3491static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
   3492				   u8 hmc_fn_id,
   3493				   struct irdma_dma_mem *commit_fpm_mem,
   3494				   bool post_sq, u8 wait_type)
   3495{
   3496	__le64 *wqe;
   3497	u64 hdr;
   3498	u32 tail, val, error;
   3499	int ret_code = 0;
   3500
   3501	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3502	if (!wqe)
   3503		return -ENOMEM;
   3504
   3505	set_64bit_val(wqe, 16, hmc_fn_id);
   3506	set_64bit_val(wqe, 32, commit_fpm_mem->pa);
   3507
   3508	hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
   3509	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
   3510	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3511
   3512	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3513
   3514	set_64bit_val(wqe, 24, hdr);
   3515
   3516	print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
   3517			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3518	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
   3519
   3520	if (post_sq) {
   3521		irdma_sc_cqp_post_sq(cqp);
   3522		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
   3523			ret_code = irdma_cqp_poll_registers(cqp, tail,
   3524							    cqp->dev->hw_attrs.max_done_count);
   3525		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
   3526			ret_code = irdma_sc_commit_fpm_val_done(cqp);
   3527	}
   3528
   3529	return ret_code;
   3530}
   3531
   3532/**
   3533 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
   3534 * query fpm
   3535 * @cqp: struct for cqp hw
   3536 */
   3537static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
   3538{
   3539	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
   3540					     NULL);
   3541}
   3542
   3543/**
   3544 * irdma_sc_query_fpm_val - cqp wqe query fpm values
   3545 * @cqp: struct for cqp hw
   3546 * @scratch: u64 saved to be used during cqp completion
   3547 * @hmc_fn_id: hmc function id
   3548 * @query_fpm_mem: memory for return fpm values
   3549 * @post_sq: flag for cqp db to ring
   3550 * @wait_type: poll ccq or cqp registers for cqp completion
   3551 */
   3552static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
   3553				  u8 hmc_fn_id,
   3554				  struct irdma_dma_mem *query_fpm_mem,
   3555				  bool post_sq, u8 wait_type)
   3556{
   3557	__le64 *wqe;
   3558	u64 hdr;
   3559	u32 tail, val, error;
   3560	int ret_code = 0;
   3561
   3562	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3563	if (!wqe)
   3564		return -ENOMEM;
   3565
   3566	set_64bit_val(wqe, 16, hmc_fn_id);
   3567	set_64bit_val(wqe, 32, query_fpm_mem->pa);
   3568
   3569	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
   3570	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3571	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3572
   3573	set_64bit_val(wqe, 24, hdr);
   3574
   3575	print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
   3576			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3577	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
   3578
   3579	if (post_sq) {
   3580		irdma_sc_cqp_post_sq(cqp);
   3581		if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
   3582			ret_code = irdma_cqp_poll_registers(cqp, tail,
   3583							    cqp->dev->hw_attrs.max_done_count);
   3584		else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
   3585			ret_code = irdma_sc_query_fpm_val_done(cqp);
   3586	}
   3587
   3588	return ret_code;
   3589}
   3590
   3591/**
   3592 * irdma_sc_ceq_init - initialize ceq
   3593 * @ceq: ceq sc structure
   3594 * @info: ceq initialization info
   3595 */
   3596int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
   3597		      struct irdma_ceq_init_info *info)
   3598{
   3599	u32 pble_obj_cnt;
   3600
   3601	if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
   3602	    info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
   3603		return -EINVAL;
   3604
   3605	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
   3606		return -EINVAL;
   3607	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   3608
   3609	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
   3610		return -EINVAL;
   3611
   3612	ceq->size = sizeof(*ceq);
   3613	ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
   3614	ceq->ceq_id = info->ceq_id;
   3615	ceq->dev = info->dev;
   3616	ceq->elem_cnt = info->elem_cnt;
   3617	ceq->ceq_elem_pa = info->ceqe_pa;
   3618	ceq->virtual_map = info->virtual_map;
   3619	ceq->itr_no_expire = info->itr_no_expire;
   3620	ceq->reg_cq = info->reg_cq;
   3621	ceq->reg_cq_size = 0;
   3622	spin_lock_init(&ceq->req_cq_lock);
   3623	ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
   3624	ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
   3625	ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
   3626	ceq->tph_en = info->tph_en;
   3627	ceq->tph_val = info->tph_val;
   3628	ceq->vsi = info->vsi;
   3629	ceq->polarity = 1;
   3630	IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
   3631	ceq->dev->ceq[info->ceq_id] = ceq;
   3632
   3633	return 0;
   3634}
   3635
   3636/**
   3637 * irdma_sc_ceq_create - create ceq wqe
   3638 * @ceq: ceq sc structure
   3639 * @scratch: u64 saved to be used during cqp completion
   3640 * @post_sq: flag for cqp db to ring
   3641 */
   3642
   3643static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
   3644			       bool post_sq)
   3645{
   3646	struct irdma_sc_cqp *cqp;
   3647	__le64 *wqe;
   3648	u64 hdr;
   3649
   3650	cqp = ceq->dev->cqp;
   3651	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3652	if (!wqe)
   3653		return -ENOMEM;
   3654	set_64bit_val(wqe, 16, ceq->elem_cnt);
   3655	set_64bit_val(wqe, 32,
   3656		      (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
   3657	set_64bit_val(wqe, 48,
   3658		      (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
   3659	set_64bit_val(wqe, 56,
   3660		      FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
   3661		      FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
   3662	hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
   3663	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
   3664	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
   3665	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
   3666	      FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
   3667	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
   3668	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3669	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3670
   3671	set_64bit_val(wqe, 24, hdr);
   3672
   3673	print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
   3674			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3675	if (post_sq)
   3676		irdma_sc_cqp_post_sq(cqp);
   3677
   3678	return 0;
   3679}
   3680
   3681/**
   3682 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
   3683 * @ceq: ceq sc structure
   3684 */
   3685static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
   3686{
   3687	struct irdma_sc_cqp *cqp;
   3688
   3689	cqp = ceq->dev->cqp;
   3690	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
   3691					     NULL);
   3692}
   3693
   3694/**
   3695 * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
   3696 * @ceq: ceq sc structure
   3697 */
   3698int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
   3699{
   3700	struct irdma_sc_cqp *cqp;
   3701
   3702	if (ceq->reg_cq)
   3703		irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
   3704
   3705	cqp = ceq->dev->cqp;
   3706	cqp->process_cqp_sds = irdma_update_sds_noccq;
   3707
   3708	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
   3709					     NULL);
   3710}
   3711
   3712/**
   3713 * irdma_sc_cceq_create - create cceq
   3714 * @ceq: ceq sc structure
   3715 * @scratch: u64 saved to be used during cqp completion
   3716 */
   3717int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
   3718{
   3719	int ret_code;
   3720	struct irdma_sc_dev *dev = ceq->dev;
   3721
   3722	dev->ccq->vsi = ceq->vsi;
   3723	if (ceq->reg_cq) {
   3724		ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
   3725		if (ret_code)
   3726			return ret_code;
   3727	}
   3728
   3729	ret_code = irdma_sc_ceq_create(ceq, scratch, true);
   3730	if (!ret_code)
   3731		return irdma_sc_cceq_create_done(ceq);
   3732
   3733	return ret_code;
   3734}
   3735
   3736/**
   3737 * irdma_sc_ceq_destroy - destroy ceq
   3738 * @ceq: ceq sc structure
   3739 * @scratch: u64 saved to be used during cqp completion
   3740 * @post_sq: flag for cqp db to ring
   3741 */
   3742int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
   3743{
   3744	struct irdma_sc_cqp *cqp;
   3745	__le64 *wqe;
   3746	u64 hdr;
   3747
   3748	cqp = ceq->dev->cqp;
   3749	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3750	if (!wqe)
   3751		return -ENOMEM;
   3752
   3753	set_64bit_val(wqe, 16, ceq->elem_cnt);
   3754	set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
   3755	hdr = ceq->ceq_id |
   3756	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
   3757	      FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
   3758	      FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
   3759	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
   3760	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3761	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3762
   3763	set_64bit_val(wqe, 24, hdr);
   3764
   3765	print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
   3766			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3767	if (post_sq)
   3768		irdma_sc_cqp_post_sq(cqp);
   3769
   3770	return 0;
   3771}
   3772
   3773/**
   3774 * irdma_sc_process_ceq - process ceq
   3775 * @dev: sc device struct
   3776 * @ceq: ceq sc structure
   3777 *
   3778 * It is expected caller serializes this function with cleanup_ceqes()
   3779 * because these functions manipulate the same ceq
   3780 */
   3781void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
   3782{
   3783	u64 temp;
   3784	__le64 *ceqe;
   3785	struct irdma_sc_cq *cq = NULL;
   3786	struct irdma_sc_cq *temp_cq;
   3787	u8 polarity;
   3788	u32 cq_idx;
   3789	unsigned long flags;
   3790
   3791	do {
   3792		cq_idx = 0;
   3793		ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
   3794		get_64bit_val(ceqe, 0, &temp);
   3795		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
   3796		if (polarity != ceq->polarity)
   3797			return NULL;
   3798
   3799		temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
   3800		if (!temp_cq) {
   3801			cq_idx = IRDMA_INVALID_CQ_IDX;
   3802			IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
   3803
   3804			if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
   3805				ceq->polarity ^= 1;
   3806			continue;
   3807		}
   3808
   3809		cq = temp_cq;
   3810		if (ceq->reg_cq) {
   3811			spin_lock_irqsave(&ceq->req_cq_lock, flags);
   3812			cq_idx = irdma_sc_find_reg_cq(ceq, cq);
   3813			spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
   3814		}
   3815
   3816		IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
   3817		if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
   3818			ceq->polarity ^= 1;
   3819	} while (cq_idx == IRDMA_INVALID_CQ_IDX);
   3820
   3821	if (cq)
   3822		irdma_sc_cq_ack(cq);
   3823	return cq;
   3824}
   3825
   3826/**
   3827 * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
   3828 * @cq: cq for which the ceqes need to be cleaned up
   3829 * @ceq: ceq ptr
   3830 *
   3831 * The function is called after the cq is destroyed to cleanup
   3832 * its pending ceqe entries. It is expected caller serializes this
   3833 * function with process_ceq() in interrupt context.
   3834 */
   3835void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
   3836{
   3837	struct irdma_sc_cq *next_cq;
   3838	u8 ceq_polarity = ceq->polarity;
   3839	__le64 *ceqe;
   3840	u8 polarity;
   3841	u64 temp;
   3842	int next;
   3843	u32 i;
   3844
   3845	next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
   3846
   3847	for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
   3848		ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
   3849
   3850		get_64bit_val(ceqe, 0, &temp);
   3851		polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
   3852		if (polarity != ceq_polarity)
   3853			return;
   3854
   3855		next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
   3856		if (cq == next_cq)
   3857			set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
   3858
   3859		next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
   3860		if (!next)
   3861			ceq_polarity ^= 1;
   3862	}
   3863}
   3864
   3865/**
   3866 * irdma_sc_aeq_init - initialize aeq
   3867 * @aeq: aeq structure ptr
   3868 * @info: aeq initialization info
   3869 */
   3870int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
   3871		      struct irdma_aeq_init_info *info)
   3872{
   3873	u32 pble_obj_cnt;
   3874
   3875	if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
   3876	    info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
   3877		return -EINVAL;
   3878
   3879	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   3880
   3881	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
   3882		return -EINVAL;
   3883
   3884	aeq->size = sizeof(*aeq);
   3885	aeq->polarity = 1;
   3886	aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
   3887	aeq->dev = info->dev;
   3888	aeq->elem_cnt = info->elem_cnt;
   3889	aeq->aeq_elem_pa = info->aeq_elem_pa;
   3890	IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
   3891	aeq->virtual_map = info->virtual_map;
   3892	aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
   3893	aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
   3894	aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
   3895	aeq->msix_idx = info->msix_idx;
   3896	info->dev->aeq = aeq;
   3897
   3898	return 0;
   3899}
   3900
   3901/**
   3902 * irdma_sc_aeq_create - create aeq
   3903 * @aeq: aeq structure ptr
   3904 * @scratch: u64 saved to be used during cqp completion
   3905 * @post_sq: flag for cqp db to ring
   3906 */
   3907static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
   3908			       bool post_sq)
   3909{
   3910	__le64 *wqe;
   3911	struct irdma_sc_cqp *cqp;
   3912	u64 hdr;
   3913
   3914	cqp = aeq->dev->cqp;
   3915	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3916	if (!wqe)
   3917		return -ENOMEM;
   3918	set_64bit_val(wqe, 16, aeq->elem_cnt);
   3919	set_64bit_val(wqe, 32,
   3920		      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
   3921	set_64bit_val(wqe, 48,
   3922		      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
   3923
   3924	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
   3925	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
   3926	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
   3927	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3928	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3929
   3930	set_64bit_val(wqe, 24, hdr);
   3931
   3932	print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
   3933			     wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3934	if (post_sq)
   3935		irdma_sc_cqp_post_sq(cqp);
   3936
   3937	return 0;
   3938}
   3939
   3940/**
   3941 * irdma_sc_aeq_destroy - destroy aeq during close
   3942 * @aeq: aeq structure ptr
   3943 * @scratch: u64 saved to be used during cqp completion
   3944 * @post_sq: flag for cqp db to ring
   3945 */
   3946static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
   3947				bool post_sq)
   3948{
   3949	__le64 *wqe;
   3950	struct irdma_sc_cqp *cqp;
   3951	struct irdma_sc_dev *dev;
   3952	u64 hdr;
   3953
   3954	dev = aeq->dev;
   3955	writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
   3956
   3957	cqp = dev->cqp;
   3958	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   3959	if (!wqe)
   3960		return -ENOMEM;
   3961	set_64bit_val(wqe, 16, aeq->elem_cnt);
   3962	set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
   3963	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
   3964	      FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
   3965	      FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
   3966	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   3967	dma_wmb(); /* make sure WQE is written before valid bit is set */
   3968
   3969	set_64bit_val(wqe, 24, hdr);
   3970
   3971	print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
   3972			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   3973	if (post_sq)
   3974		irdma_sc_cqp_post_sq(cqp);
   3975	return 0;
   3976}
   3977
   3978/**
   3979 * irdma_sc_get_next_aeqe - get next aeq entry
   3980 * @aeq: aeq structure ptr
   3981 * @info: aeqe info to be returned
   3982 */
   3983int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
   3984			   struct irdma_aeqe_info *info)
   3985{
   3986	u64 temp, compl_ctx;
   3987	__le64 *aeqe;
   3988	u16 wqe_idx;
   3989	u8 ae_src;
   3990	u8 polarity;
   3991
   3992	aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
   3993	get_64bit_val(aeqe, 0, &compl_ctx);
   3994	get_64bit_val(aeqe, 8, &temp);
   3995	polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
   3996
   3997	if (aeq->polarity != polarity)
   3998		return -ENOENT;
   3999
   4000	print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
   4001			     aeqe, 16, false);
   4002
   4003	ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
   4004	wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
   4005	info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
   4006			 ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
   4007	info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
   4008	info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
   4009	info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
   4010	info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
   4011	info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
   4012
   4013	info->ae_src = ae_src;
   4014	switch (info->ae_id) {
   4015	case IRDMA_AE_PRIV_OPERATION_DENIED:
   4016	case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
   4017	case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
   4018	case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
   4019	case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
   4020	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
   4021	case IRDMA_AE_UDA_XMIT_BAD_PD:
   4022	case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
   4023	case IRDMA_AE_BAD_CLOSE:
   4024	case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
   4025	case IRDMA_AE_STAG_ZERO_INVALID:
   4026	case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
   4027	case IRDMA_AE_IB_INVALID_REQUEST:
   4028	case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
   4029	case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
   4030	case IRDMA_AE_IB_REMOTE_OP_ERROR:
   4031	case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
   4032	case IRDMA_AE_DDP_UBE_INVALID_MO:
   4033	case IRDMA_AE_DDP_UBE_INVALID_QN:
   4034	case IRDMA_AE_DDP_NO_L_BIT:
   4035	case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
   4036	case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
   4037	case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
   4038	case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
   4039	case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
   4040	case IRDMA_AE_INVALID_ARP_ENTRY:
   4041	case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
   4042	case IRDMA_AE_STALE_ARP_ENTRY:
   4043	case IRDMA_AE_INVALID_AH_ENTRY:
   4044	case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
   4045	case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
   4046	case IRDMA_AE_LLP_TOO_MANY_RETRIES:
   4047	case IRDMA_AE_LLP_DOUBT_REACHABILITY:
   4048	case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
   4049	case IRDMA_AE_RESET_SENT:
   4050	case IRDMA_AE_TERMINATE_SENT:
   4051	case IRDMA_AE_RESET_NOT_SENT:
   4052	case IRDMA_AE_LCE_QP_CATASTROPHIC:
   4053	case IRDMA_AE_QP_SUSPEND_COMPLETE:
   4054	case IRDMA_AE_UDA_L4LEN_INVALID:
   4055		info->qp = true;
   4056		info->compl_ctx = compl_ctx;
   4057		break;
   4058	case IRDMA_AE_LCE_CQ_CATASTROPHIC:
   4059		info->cq = true;
   4060		info->compl_ctx = compl_ctx << 1;
   4061		ae_src = IRDMA_AE_SOURCE_RSVD;
   4062		break;
   4063	case IRDMA_AE_ROCE_EMPTY_MCG:
   4064	case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
   4065	case IRDMA_AE_ROCE_BAD_MC_QPID:
   4066	case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
   4067		fallthrough;
   4068	case IRDMA_AE_LLP_CONNECTION_RESET:
   4069	case IRDMA_AE_LLP_SYN_RECEIVED:
   4070	case IRDMA_AE_LLP_FIN_RECEIVED:
   4071	case IRDMA_AE_LLP_CLOSE_COMPLETE:
   4072	case IRDMA_AE_LLP_TERMINATE_RECEIVED:
   4073	case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
   4074		ae_src = IRDMA_AE_SOURCE_RSVD;
   4075		info->qp = true;
   4076		info->compl_ctx = compl_ctx;
   4077		break;
   4078	default:
   4079		break;
   4080	}
   4081
   4082	switch (ae_src) {
   4083	case IRDMA_AE_SOURCE_RQ:
   4084	case IRDMA_AE_SOURCE_RQ_0011:
   4085		info->qp = true;
   4086		info->rq = true;
   4087		info->wqe_idx = wqe_idx;
   4088		info->compl_ctx = compl_ctx;
   4089		break;
   4090	case IRDMA_AE_SOURCE_CQ:
   4091	case IRDMA_AE_SOURCE_CQ_0110:
   4092	case IRDMA_AE_SOURCE_CQ_1010:
   4093	case IRDMA_AE_SOURCE_CQ_1110:
   4094		info->cq = true;
   4095		info->compl_ctx = compl_ctx << 1;
   4096		break;
   4097	case IRDMA_AE_SOURCE_SQ:
   4098	case IRDMA_AE_SOURCE_SQ_0111:
   4099		info->qp = true;
   4100		info->sq = true;
   4101		info->wqe_idx = wqe_idx;
   4102		info->compl_ctx = compl_ctx;
   4103		break;
   4104	case IRDMA_AE_SOURCE_IN_RR_WR:
   4105	case IRDMA_AE_SOURCE_IN_RR_WR_1011:
   4106		info->qp = true;
   4107		info->compl_ctx = compl_ctx;
   4108		info->in_rdrsp_wr = true;
   4109		break;
   4110	case IRDMA_AE_SOURCE_OUT_RR:
   4111	case IRDMA_AE_SOURCE_OUT_RR_1111:
   4112		info->qp = true;
   4113		info->compl_ctx = compl_ctx;
   4114		info->out_rdrsp = true;
   4115		break;
   4116	case IRDMA_AE_SOURCE_RSVD:
   4117	default:
   4118		break;
   4119	}
   4120
   4121	IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
   4122	if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
   4123		aeq->polarity ^= 1;
   4124
   4125	return 0;
   4126}
   4127
   4128/**
   4129 * irdma_sc_repost_aeq_entries - repost completed aeq entries
   4130 * @dev: sc device struct
   4131 * @count: allocate count
   4132 */
   4133void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
   4134{
   4135	writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
   4136}
   4137
   4138/**
   4139 * irdma_sc_ccq_init - initialize control cq
   4140 * @cq: sc's cq ctruct
   4141 * @info: info for control cq initialization
   4142 */
   4143int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
   4144{
   4145	u32 pble_obj_cnt;
   4146
   4147	if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
   4148	    info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
   4149		return -EINVAL;
   4150
   4151	if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
   4152		return -EINVAL;
   4153
   4154	pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
   4155
   4156	if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
   4157		return -EINVAL;
   4158
   4159	cq->cq_pa = info->cq_pa;
   4160	cq->cq_uk.cq_base = info->cq_base;
   4161	cq->shadow_area_pa = info->shadow_area_pa;
   4162	cq->cq_uk.shadow_area = info->shadow_area;
   4163	cq->shadow_read_threshold = info->shadow_read_threshold;
   4164	cq->dev = info->dev;
   4165	cq->ceq_id = info->ceq_id;
   4166	cq->cq_uk.cq_size = info->num_elem;
   4167	cq->cq_type = IRDMA_CQ_TYPE_CQP;
   4168	cq->ceqe_mask = info->ceqe_mask;
   4169	IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
   4170	cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
   4171	cq->ceq_id_valid = info->ceq_id_valid;
   4172	cq->tph_en = info->tph_en;
   4173	cq->tph_val = info->tph_val;
   4174	cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
   4175	cq->pbl_list = info->pbl_list;
   4176	cq->virtual_map = info->virtual_map;
   4177	cq->pbl_chunk_size = info->pbl_chunk_size;
   4178	cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
   4179	cq->cq_uk.polarity = true;
   4180	cq->vsi = info->vsi;
   4181	cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
   4182
   4183	/* Only applicable to CQs other than CCQ so initialize to zero */
   4184	cq->cq_uk.cqe_alloc_db = NULL;
   4185
   4186	info->dev->ccq = cq;
   4187	return 0;
   4188}
   4189
   4190/**
   4191 * irdma_sc_ccq_create_done - poll cqp for ccq create
   4192 * @ccq: ccq sc struct
   4193 */
   4194static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
   4195{
   4196	struct irdma_sc_cqp *cqp;
   4197
   4198	cqp = ccq->dev->cqp;
   4199
   4200	return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
   4201}
   4202
   4203/**
   4204 * irdma_sc_ccq_create - create control cq
   4205 * @ccq: ccq sc struct
   4206 * @scratch: u64 saved to be used during cqp completion
   4207 * @check_overflow: overlow flag for ccq
   4208 * @post_sq: flag for cqp db to ring
   4209 */
   4210int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
   4211			bool check_overflow, bool post_sq)
   4212{
   4213	int ret_code;
   4214
   4215	ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
   4216	if (ret_code)
   4217		return ret_code;
   4218
   4219	if (post_sq) {
   4220		ret_code = irdma_sc_ccq_create_done(ccq);
   4221		if (ret_code)
   4222			return ret_code;
   4223	}
   4224	ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
   4225
   4226	return 0;
   4227}
   4228
   4229/**
   4230 * irdma_sc_ccq_destroy - destroy ccq during close
   4231 * @ccq: ccq sc struct
   4232 * @scratch: u64 saved to be used during cqp completion
   4233 * @post_sq: flag for cqp db to ring
   4234 */
   4235int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
   4236{
   4237	struct irdma_sc_cqp *cqp;
   4238	__le64 *wqe;
   4239	u64 hdr;
   4240	int ret_code = 0;
   4241	u32 tail, val, error;
   4242
   4243	cqp = ccq->dev->cqp;
   4244	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   4245	if (!wqe)
   4246		return -ENOMEM;
   4247
   4248	set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
   4249	set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
   4250	set_64bit_val(wqe, 40, ccq->shadow_area_pa);
   4251
   4252	hdr = ccq->cq_uk.cq_id |
   4253	      FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
   4254			IRDMA_CQPSQ_CQ_CEQID) |
   4255	      FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
   4256	      FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
   4257	      FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
   4258	      FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
   4259	      FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
   4260	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   4261	dma_wmb(); /* make sure WQE is written before valid bit is set */
   4262
   4263	set_64bit_val(wqe, 24, hdr);
   4264
   4265	print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
   4266			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   4267	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
   4268
   4269	if (post_sq) {
   4270		irdma_sc_cqp_post_sq(cqp);
   4271		ret_code = irdma_cqp_poll_registers(cqp, tail,
   4272						    cqp->dev->hw_attrs.max_done_count);
   4273	}
   4274
   4275	cqp->process_cqp_sds = irdma_update_sds_noccq;
   4276
   4277	return ret_code;
   4278}
   4279
   4280/**
   4281 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
   4282 * @dev : ptr to irdma_dev struct
   4283 * @hmc_fn_id: hmc function id
   4284 */
   4285int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
   4286{
   4287	struct irdma_hmc_info *hmc_info;
   4288	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
   4289	struct irdma_dma_mem query_fpm_mem;
   4290	int ret_code = 0;
   4291	u8 wait_type;
   4292
   4293	hmc_info = dev->hmc_info;
   4294	hmc_fpm_misc = &dev->hmc_fpm_misc;
   4295	query_fpm_mem.pa = dev->fpm_query_buf_pa;
   4296	query_fpm_mem.va = dev->fpm_query_buf;
   4297	hmc_info->hmc_fn_id = hmc_fn_id;
   4298	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
   4299
   4300	ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
   4301					  &query_fpm_mem, true, wait_type);
   4302	if (ret_code)
   4303		return ret_code;
   4304
   4305	/* parse the fpm_query_buf and fill hmc obj info */
   4306	ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
   4307						hmc_fpm_misc);
   4308
   4309	print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
   4310			     8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
   4311			     false);
   4312	return ret_code;
   4313}
   4314
   4315/**
   4316 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
   4317 * command and populates fpm base address in hmc_info
   4318 * @dev : ptr to irdma_dev struct
   4319 * @hmc_fn_id: hmc function id
   4320 */
   4321static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
   4322{
   4323	struct irdma_hmc_info *hmc_info;
   4324	struct irdma_hmc_obj_info *obj_info;
   4325	__le64 *buf;
   4326	struct irdma_dma_mem commit_fpm_mem;
   4327	int ret_code = 0;
   4328	u8 wait_type;
   4329
   4330	hmc_info = dev->hmc_info;
   4331	obj_info = hmc_info->hmc_obj;
   4332	buf = dev->fpm_commit_buf;
   4333
   4334	set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
   4335	set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
   4336	set_64bit_val(buf, 16, (u64)0); /* RSRVD */
   4337	set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
   4338	set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
   4339	set_64bit_val(buf, 40, (u64)0); /* RSVD */
   4340	set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
   4341	set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
   4342	set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
   4343	set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
   4344	set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
   4345	set_64bit_val(buf, 88,
   4346		      (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
   4347	set_64bit_val(buf, 96,
   4348		      (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
   4349	set_64bit_val(buf, 104,
   4350		      (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
   4351	set_64bit_val(buf, 112,
   4352		      (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
   4353	set_64bit_val(buf, 120, (u64)0); /* RSVD */
   4354	set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
   4355	set_64bit_val(buf, 136,
   4356		      (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
   4357	set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
   4358	set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
   4359	set_64bit_val(buf, 160,
   4360		      (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
   4361	set_64bit_val(buf, 168,
   4362		      (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
   4363
   4364	commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
   4365	commit_fpm_mem.va = dev->fpm_commit_buf;
   4366
   4367	wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
   4368	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
   4369			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
   4370			     false);
   4371	ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
   4372					   &commit_fpm_mem, true, wait_type);
   4373	if (!ret_code)
   4374		irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
   4375					      hmc_info->hmc_obj,
   4376					      &hmc_info->sd_table.sd_cnt);
   4377	print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
   4378			     8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
   4379			     false);
   4380
   4381	return ret_code;
   4382}
   4383
   4384/**
   4385 * cqp_sds_wqe_fill - fill cqp wqe doe sd
   4386 * @cqp: struct for cqp hw
   4387 * @info: sd info for wqe
   4388 * @scratch: u64 saved to be used during cqp completion
   4389 */
   4390static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
   4391			    struct irdma_update_sds_info *info, u64 scratch)
   4392{
   4393	u64 data;
   4394	u64 hdr;
   4395	__le64 *wqe;
   4396	int mem_entries, wqe_entries;
   4397	struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
   4398	u64 offset = 0;
   4399	u32 wqe_idx;
   4400
   4401	wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
   4402	if (!wqe)
   4403		return -ENOMEM;
   4404
   4405	wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
   4406	mem_entries = info->cnt - wqe_entries;
   4407
   4408	if (mem_entries) {
   4409		offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
   4410		memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
   4411
   4412		data = (u64)sdbuf->pa + offset;
   4413	} else {
   4414		data = 0;
   4415	}
   4416	data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
   4417	set_64bit_val(wqe, 16, data);
   4418
   4419	switch (wqe_entries) {
   4420	case 3:
   4421		set_64bit_val(wqe, 48,
   4422			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
   4423			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
   4424
   4425		set_64bit_val(wqe, 56, info->entry[2].data);
   4426		fallthrough;
   4427	case 2:
   4428		set_64bit_val(wqe, 32,
   4429			      (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
   4430			       FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
   4431
   4432		set_64bit_val(wqe, 40, info->entry[1].data);
   4433		fallthrough;
   4434	case 1:
   4435		set_64bit_val(wqe, 0,
   4436			      FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
   4437
   4438		set_64bit_val(wqe, 8, info->entry[0].data);
   4439		break;
   4440	default:
   4441		break;
   4442	}
   4443
   4444	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
   4445	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
   4446	      FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
   4447	dma_wmb(); /* make sure WQE is written before valid bit is set */
   4448
   4449	set_64bit_val(wqe, 24, hdr);
   4450
   4451	if (mem_entries)
   4452		print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
   4453				     DUMP_PREFIX_OFFSET, 16, 8,
   4454				     (char *)sdbuf->va + offset,
   4455				     mem_entries << 4, false);
   4456
   4457	print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
   4458			     8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   4459
   4460	return 0;
   4461}
   4462
   4463/**
   4464 * irdma_update_pe_sds - cqp wqe for sd
   4465 * @dev: ptr to irdma_dev struct
   4466 * @info: sd info for sd's
   4467 * @scratch: u64 saved to be used during cqp completion
   4468 */
   4469static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
   4470			       struct irdma_update_sds_info *info, u64 scratch)
   4471{
   4472	struct irdma_sc_cqp *cqp = dev->cqp;
   4473	int ret_code;
   4474
   4475	ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
   4476	if (!ret_code)
   4477		irdma_sc_cqp_post_sq(cqp);
   4478
   4479	return ret_code;
   4480}
   4481
   4482/**
   4483 * irdma_update_sds_noccq - update sd before ccq created
   4484 * @dev: sc device struct
   4485 * @info: sd info for sd's
   4486 */
   4487int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
   4488			   struct irdma_update_sds_info *info)
   4489{
   4490	u32 error, val, tail;
   4491	struct irdma_sc_cqp *cqp = dev->cqp;
   4492	int ret_code;
   4493
   4494	ret_code = cqp_sds_wqe_fill(cqp, info, 0);
   4495	if (ret_code)
   4496		return ret_code;
   4497
   4498	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
   4499
   4500	irdma_sc_cqp_post_sq(cqp);
   4501	return irdma_cqp_poll_registers(cqp, tail,
   4502					cqp->dev->hw_attrs.max_done_count);
   4503}
   4504
   4505/**
   4506 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
   4507 * @cqp: struct for cqp hw
   4508 * @scratch: u64 saved to be used during cqp completion
   4509 * @hmc_fn_id: hmc function id
   4510 * @post_sq: flag for cqp db to ring
   4511 * @poll_registers: flag to poll register for cqp completion
   4512 */
   4513int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
   4514					u8 hmc_fn_id, bool post_sq,
   4515					bool poll_registers)
   4516{
   4517	u64 hdr;
   4518	__le64 *wqe;
   4519	u32 tail, val, error;
   4520
   4521	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   4522	if (!wqe)
   4523		return -ENOMEM;
   4524
   4525	set_64bit_val(wqe, 16,
   4526		      FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
   4527
   4528	hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
   4529			 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
   4530	      FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
   4531	dma_wmb(); /* make sure WQE is written before valid bit is set */
   4532
   4533	set_64bit_val(wqe, 24, hdr);
   4534
   4535	print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
   4536			     DUMP_PREFIX_OFFSET, 16, 8, wqe,
   4537			     IRDMA_CQP_WQE_SIZE * 8, false);
   4538	irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
   4539
   4540	if (post_sq) {
   4541		irdma_sc_cqp_post_sq(cqp);
   4542		if (poll_registers)
   4543			/* check for cqp sq tail update */
   4544			return irdma_cqp_poll_registers(cqp, tail,
   4545							cqp->dev->hw_attrs.max_done_count);
   4546		else
   4547			return irdma_sc_poll_for_cqp_op_done(cqp,
   4548							     IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
   4549							     NULL);
   4550	}
   4551
   4552	return 0;
   4553}
   4554
   4555/**
   4556 * irdma_cqp_ring_full - check if cqp ring is full
   4557 * @cqp: struct for cqp hw
   4558 */
   4559static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
   4560{
   4561	return IRDMA_RING_FULL_ERR(cqp->sq_ring);
   4562}
   4563
   4564/**
   4565 * irdma_est_sd - returns approximate number of SDs for HMC
   4566 * @dev: sc device struct
   4567 * @hmc_info: hmc structure, size and count for HMC objects
   4568 */
   4569static u32 irdma_est_sd(struct irdma_sc_dev *dev,
   4570			struct irdma_hmc_info *hmc_info)
   4571{
   4572	int i;
   4573	u64 size = 0;
   4574	u64 sd;
   4575
   4576	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
   4577		if (i != IRDMA_HMC_IW_PBLE)
   4578			size += round_up(hmc_info->hmc_obj[i].cnt *
   4579					 hmc_info->hmc_obj[i].size, 512);
   4580	size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
   4581			 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
   4582	if (size & 0x1FFFFF)
   4583		sd = (size >> 21) + 1; /* add 1 for remainder */
   4584	else
   4585		sd = size >> 21;
   4586	if (sd > 0xFFFFFFFF) {
   4587		ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
   4588		sd = 0xFFFFFFFF - 1;
   4589	}
   4590
   4591	return (u32)sd;
   4592}
   4593
   4594/**
   4595 * irdma_sc_query_rdma_features_done - poll cqp for query features done
   4596 * @cqp: struct for cqp hw
   4597 */
   4598static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
   4599{
   4600	return irdma_sc_poll_for_cqp_op_done(cqp,
   4601					     IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
   4602					     NULL);
   4603}
   4604
   4605/**
   4606 * irdma_sc_query_rdma_features - query RDMA features and FW ver
   4607 * @cqp: struct for cqp hw
   4608 * @buf: buffer to hold query info
   4609 * @scratch: u64 saved to be used during cqp completion
   4610 */
   4611static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
   4612					struct irdma_dma_mem *buf, u64 scratch)
   4613{
   4614	__le64 *wqe;
   4615	u64 temp;
   4616
   4617	wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
   4618	if (!wqe)
   4619		return -ENOMEM;
   4620
   4621	temp = buf->pa;
   4622	set_64bit_val(wqe, 32, temp);
   4623
   4624	temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
   4625			  cqp->polarity) |
   4626	       FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
   4627	       FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
   4628	dma_wmb(); /* make sure WQE is written before valid bit is set */
   4629
   4630	set_64bit_val(wqe, 24, temp);
   4631
   4632	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
   4633			     16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
   4634	irdma_sc_cqp_post_sq(cqp);
   4635
   4636	return 0;
   4637}
   4638
   4639/**
   4640 * irdma_get_rdma_features - get RDMA features
   4641 * @dev: sc device struct
   4642 */
   4643int irdma_get_rdma_features(struct irdma_sc_dev *dev)
   4644{
   4645	int ret_code;
   4646	struct irdma_dma_mem feat_buf;
   4647	u64 temp;
   4648	u16 byte_idx, feat_type, feat_cnt, feat_idx;
   4649
   4650	feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
   4651			      IRDMA_FEATURE_BUF_ALIGNMENT);
   4652	feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
   4653					 &feat_buf.pa, GFP_KERNEL);
   4654	if (!feat_buf.va)
   4655		return -ENOMEM;
   4656
   4657	ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
   4658	if (!ret_code)
   4659		ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
   4660	if (ret_code)
   4661		goto exit;
   4662
   4663	get_64bit_val(feat_buf.va, 0, &temp);
   4664	feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
   4665	if (feat_cnt < 2) {
   4666		ret_code = -EINVAL;
   4667		goto exit;
   4668	} else if (feat_cnt > IRDMA_MAX_FEATURES) {
   4669		ibdev_dbg(to_ibdev(dev),
   4670			  "DEV: feature buf size insufficient, retrying with larger buffer\n");
   4671		dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
   4672				  feat_buf.pa);
   4673		feat_buf.va = NULL;
   4674		feat_buf.size = ALIGN(8 * feat_cnt,
   4675				      IRDMA_FEATURE_BUF_ALIGNMENT);
   4676		feat_buf.va = dma_alloc_coherent(dev->hw->device,
   4677						 feat_buf.size, &feat_buf.pa,
   4678						 GFP_KERNEL);
   4679		if (!feat_buf.va)
   4680			return -ENOMEM;
   4681
   4682		ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
   4683		if (!ret_code)
   4684			ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
   4685		if (ret_code)
   4686			goto exit;
   4687
   4688		get_64bit_val(feat_buf.va, 0, &temp);
   4689		feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
   4690		if (feat_cnt < 2) {
   4691			ret_code = -EINVAL;
   4692			goto exit;
   4693		}
   4694	}
   4695
   4696	print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
   4697			     16, 8, feat_buf.va, feat_cnt * 8, false);
   4698
   4699	for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
   4700	     feat_idx++, byte_idx += 8) {
   4701		get_64bit_val(feat_buf.va, byte_idx, &temp);
   4702		feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
   4703		if (feat_type >= IRDMA_MAX_FEATURES) {
   4704			ibdev_dbg(to_ibdev(dev),
   4705				  "DEV: found unrecognized feature type %d\n",
   4706				  feat_type);
   4707			continue;
   4708		}
   4709		dev->feature_info[feat_type] = temp;
   4710	}
   4711exit:
   4712	dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
   4713			  feat_buf.pa);
   4714	feat_buf.va = NULL;
   4715	return ret_code;
   4716}
   4717
   4718static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
   4719			struct irdma_hmc_info *hmc_info, u32 qpwanted)
   4720{
   4721	u32 q1_cnt;
   4722
   4723	if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
   4724		q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
   4725	} else {
   4726		if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
   4727			q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
   4728		else
   4729			q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
   4730	}
   4731
   4732	return q1_cnt;
   4733}
   4734
   4735static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
   4736				struct irdma_hmc_info *hmc_info, u32 qpwanted)
   4737{
   4738	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
   4739}
   4740
   4741static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
   4742				struct irdma_hmc_info *hmc_info, u32 qpwanted)
   4743{
   4744	struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
   4745
   4746	hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
   4747		4 * hmc_fpm_misc->xf_block_size * qpwanted;
   4748
   4749	hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
   4750
   4751	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
   4752		hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
   4753	if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
   4754		hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
   4755			hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
   4756			hmc_fpm_misc->rrf_block_size;
   4757	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
   4758		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
   4759	if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
   4760		hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
   4761			hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
   4762			hmc_fpm_misc->ooiscf_block_size;
   4763}
   4764
   4765/**
   4766 * irdma_cfg_fpm_val - configure HMC objects
   4767 * @dev: sc device struct
   4768 * @qp_count: desired qp count
   4769 */
   4770int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
   4771{
   4772	struct irdma_virt_mem virt_mem;
   4773	u32 i, mem_size;
   4774	u32 qpwanted, mrwanted, pblewanted;
   4775	u32 powerof2, hte;
   4776	u32 sd_needed;
   4777	u32 sd_diff;
   4778	u32 loop_count = 0;
   4779	struct irdma_hmc_info *hmc_info;
   4780	struct irdma_hmc_fpm_misc *hmc_fpm_misc;
   4781	int ret_code = 0;
   4782
   4783	hmc_info = dev->hmc_info;
   4784	hmc_fpm_misc = &dev->hmc_fpm_misc;
   4785
   4786	ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
   4787	if (ret_code) {
   4788		ibdev_dbg(to_ibdev(dev),
   4789			  "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
   4790			  ret_code);
   4791		return ret_code;
   4792	}
   4793
   4794	for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
   4795		hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
   4796	sd_needed = irdma_est_sd(dev, hmc_info);
   4797	ibdev_dbg(to_ibdev(dev),
   4798		  "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
   4799		  sd_needed, hmc_info->first_sd_index);
   4800	ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
   4801		  hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
   4802
   4803	qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
   4804
   4805	powerof2 = 1;
   4806	while (powerof2 <= qpwanted)
   4807		powerof2 *= 2;
   4808	powerof2 /= 2;
   4809	qpwanted = powerof2;
   4810
   4811	mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
   4812	pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
   4813
   4814	ibdev_dbg(to_ibdev(dev),
   4815		  "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
   4816		  qp_count, hmc_fpm_misc->max_sds,
   4817		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
   4818		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
   4819		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
   4820		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
   4821		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
   4822		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
   4823	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
   4824		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
   4825	hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
   4826		hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
   4827	hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
   4828		hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
   4829
   4830	hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
   4831
   4832	while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
   4833		qpwanted /= 2;
   4834
   4835	do {
   4836		++loop_count;
   4837		hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
   4838		hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
   4839			min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
   4840		hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
   4841		hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
   4842
   4843		hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
   4844		powerof2 = 1;
   4845		while (powerof2 < hte)
   4846			powerof2 *= 2;
   4847		hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
   4848			powerof2 * hmc_fpm_misc->ht_multiplier;
   4849		if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
   4850			cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
   4851		else
   4852			cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
   4853
   4854		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
   4855		hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
   4856			hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
   4857		hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
   4858			hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
   4859		hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
   4860			(round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
   4861
   4862		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
   4863		sd_needed = irdma_est_sd(dev, hmc_info);
   4864		ibdev_dbg(to_ibdev(dev),
   4865			  "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
   4866			  sd_needed, hmc_fpm_misc->max_sds, mrwanted,
   4867			  pblewanted, qpwanted);
   4868
   4869		/* Do not reduce resources further. All objects fit with max SDs */
   4870		if (sd_needed <= hmc_fpm_misc->max_sds)
   4871			break;
   4872
   4873		sd_diff = sd_needed - hmc_fpm_misc->max_sds;
   4874		if (sd_diff > 128) {
   4875			if (qpwanted > 128 && sd_diff > 144)
   4876				qpwanted /= 2;
   4877			mrwanted /= 2;
   4878			pblewanted /= 2;
   4879			continue;
   4880		}
   4881		if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
   4882		    pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
   4883			pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
   4884			continue;
   4885		} else if (pblewanted > (100 * FPM_MULTIPLIER)) {
   4886			pblewanted -= 10 * FPM_MULTIPLIER;
   4887		} else if (pblewanted > FPM_MULTIPLIER) {
   4888			pblewanted -= FPM_MULTIPLIER;
   4889		} else if (qpwanted <= 128) {
   4890			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
   4891				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
   4892			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
   4893				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
   4894		}
   4895		if (mrwanted > FPM_MULTIPLIER)
   4896			mrwanted -= FPM_MULTIPLIER;
   4897		if (!(loop_count % 10) && qpwanted > 128) {
   4898			qpwanted /= 2;
   4899			if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
   4900				hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
   4901		}
   4902	} while (loop_count < 2000);
   4903
   4904	if (sd_needed > hmc_fpm_misc->max_sds) {
   4905		ibdev_dbg(to_ibdev(dev),
   4906			  "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
   4907			  loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
   4908		return -EINVAL;
   4909	}
   4910
   4911	if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
   4912		pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
   4913			      FPM_MULTIPLIER;
   4914		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
   4915		sd_needed = irdma_est_sd(dev, hmc_info);
   4916	}
   4917
   4918	ibdev_dbg(to_ibdev(dev),
   4919		  "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
   4920		  loop_count, sd_needed,
   4921		  hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
   4922		  hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
   4923		  hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
   4924		  hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
   4925		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
   4926		  hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
   4927		  hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
   4928
   4929	ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
   4930	if (ret_code) {
   4931		ibdev_dbg(to_ibdev(dev),
   4932			  "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
   4933			  readl(dev->hw_regs[IRDMA_CQPERRCODES]));
   4934		return ret_code;
   4935	}
   4936
   4937	mem_size = sizeof(struct irdma_hmc_sd_entry) *
   4938		   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
   4939	virt_mem.size = mem_size;
   4940	virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
   4941	if (!virt_mem.va) {
   4942		ibdev_dbg(to_ibdev(dev),
   4943			  "HMC: failed to allocate memory for sd_entry buffer\n");
   4944		return -ENOMEM;
   4945	}
   4946	hmc_info->sd_table.sd_entry = virt_mem.va;
   4947
   4948	return ret_code;
   4949}
   4950
   4951/**
   4952 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
   4953 * @dev: rdma device
   4954 * @pcmdinfo: cqp command info
   4955 */
   4956static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
   4957			      struct cqp_cmds_info *pcmdinfo)
   4958{
   4959	int status;
   4960	struct irdma_dma_mem val_mem;
   4961	bool alloc = false;
   4962
   4963	dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
   4964	switch (pcmdinfo->cqp_cmd) {
   4965	case IRDMA_OP_CEQ_DESTROY:
   4966		status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
   4967					      pcmdinfo->in.u.ceq_destroy.scratch,
   4968					      pcmdinfo->post_sq);
   4969		break;
   4970	case IRDMA_OP_AEQ_DESTROY:
   4971		status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
   4972					      pcmdinfo->in.u.aeq_destroy.scratch,
   4973					      pcmdinfo->post_sq);
   4974
   4975		break;
   4976	case IRDMA_OP_CEQ_CREATE:
   4977		status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
   4978					     pcmdinfo->in.u.ceq_create.scratch,
   4979					     pcmdinfo->post_sq);
   4980		break;
   4981	case IRDMA_OP_AEQ_CREATE:
   4982		status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
   4983					     pcmdinfo->in.u.aeq_create.scratch,
   4984					     pcmdinfo->post_sq);
   4985		break;
   4986	case IRDMA_OP_QP_UPLOAD_CONTEXT:
   4987		status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
   4988						    &pcmdinfo->in.u.qp_upload_context.info,
   4989						    pcmdinfo->in.u.qp_upload_context.scratch,
   4990						    pcmdinfo->post_sq);
   4991		break;
   4992	case IRDMA_OP_CQ_CREATE:
   4993		status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
   4994					    pcmdinfo->in.u.cq_create.scratch,
   4995					    pcmdinfo->in.u.cq_create.check_overflow,
   4996					    pcmdinfo->post_sq);
   4997		break;
   4998	case IRDMA_OP_CQ_MODIFY:
   4999		status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
   5000					    &pcmdinfo->in.u.cq_modify.info,
   5001					    pcmdinfo->in.u.cq_modify.scratch,
   5002					    pcmdinfo->post_sq);
   5003		break;
   5004	case IRDMA_OP_CQ_DESTROY:
   5005		status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
   5006					     pcmdinfo->in.u.cq_destroy.scratch,
   5007					     pcmdinfo->post_sq);
   5008		break;
   5009	case IRDMA_OP_QP_FLUSH_WQES:
   5010		status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
   5011						&pcmdinfo->in.u.qp_flush_wqes.info,
   5012						pcmdinfo->in.u.qp_flush_wqes.scratch,
   5013						pcmdinfo->post_sq);
   5014		break;
   5015	case IRDMA_OP_GEN_AE:
   5016		status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
   5017					 &pcmdinfo->in.u.gen_ae.info,
   5018					 pcmdinfo->in.u.gen_ae.scratch,
   5019					 pcmdinfo->post_sq);
   5020		break;
   5021	case IRDMA_OP_MANAGE_PUSH_PAGE:
   5022		status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
   5023						   &pcmdinfo->in.u.manage_push_page.info,
   5024						   pcmdinfo->in.u.manage_push_page.scratch,
   5025						   pcmdinfo->post_sq);
   5026		break;
   5027	case IRDMA_OP_UPDATE_PE_SDS:
   5028		status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
   5029					     &pcmdinfo->in.u.update_pe_sds.info,
   5030					     pcmdinfo->in.u.update_pe_sds.scratch);
   5031		break;
   5032	case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
   5033		/* switch to calling through the call table */
   5034		status =
   5035			irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
   5036							  &pcmdinfo->in.u.manage_hmc_pm.info,
   5037							  pcmdinfo->in.u.manage_hmc_pm.scratch,
   5038							  true);
   5039		break;
   5040	case IRDMA_OP_SUSPEND:
   5041		status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
   5042					     pcmdinfo->in.u.suspend_resume.qp,
   5043					     pcmdinfo->in.u.suspend_resume.scratch);
   5044		break;
   5045	case IRDMA_OP_RESUME:
   5046		status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
   5047					    pcmdinfo->in.u.suspend_resume.qp,
   5048					    pcmdinfo->in.u.suspend_resume.scratch);
   5049		break;
   5050	case IRDMA_OP_QUERY_FPM_VAL:
   5051		val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
   5052		val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
   5053		status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
   5054						pcmdinfo->in.u.query_fpm_val.scratch,
   5055						pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
   5056						&val_mem, true, IRDMA_CQP_WAIT_EVENT);
   5057		break;
   5058	case IRDMA_OP_COMMIT_FPM_VAL:
   5059		val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
   5060		val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
   5061		status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
   5062						 pcmdinfo->in.u.commit_fpm_val.scratch,
   5063						 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
   5064						 &val_mem,
   5065						 true,
   5066						 IRDMA_CQP_WAIT_EVENT);
   5067		break;
   5068	case IRDMA_OP_STATS_ALLOCATE:
   5069		alloc = true;
   5070		fallthrough;
   5071	case IRDMA_OP_STATS_FREE:
   5072		status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
   5073						    &pcmdinfo->in.u.stats_manage.info,
   5074						    alloc,
   5075						    pcmdinfo->in.u.stats_manage.scratch);
   5076		break;
   5077	case IRDMA_OP_STATS_GATHER:
   5078		status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
   5079					       &pcmdinfo->in.u.stats_gather.info,
   5080					       pcmdinfo->in.u.stats_gather.scratch);
   5081		break;
   5082	case IRDMA_OP_WS_MODIFY_NODE:
   5083		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
   5084						 &pcmdinfo->in.u.ws_node.info,
   5085						 IRDMA_MODIFY_NODE,
   5086						 pcmdinfo->in.u.ws_node.scratch);
   5087		break;
   5088	case IRDMA_OP_WS_DELETE_NODE:
   5089		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
   5090						 &pcmdinfo->in.u.ws_node.info,
   5091						 IRDMA_DEL_NODE,
   5092						 pcmdinfo->in.u.ws_node.scratch);
   5093		break;
   5094	case IRDMA_OP_WS_ADD_NODE:
   5095		status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
   5096						 &pcmdinfo->in.u.ws_node.info,
   5097						 IRDMA_ADD_NODE,
   5098						 pcmdinfo->in.u.ws_node.scratch);
   5099		break;
   5100	case IRDMA_OP_SET_UP_MAP:
   5101		status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
   5102					     &pcmdinfo->in.u.up_map.info,
   5103					     pcmdinfo->in.u.up_map.scratch);
   5104		break;
   5105	case IRDMA_OP_QUERY_RDMA_FEATURES:
   5106		status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
   5107						      &pcmdinfo->in.u.query_rdma.query_buff_mem,
   5108						      pcmdinfo->in.u.query_rdma.scratch);
   5109		break;
   5110	case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
   5111		status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
   5112						      pcmdinfo->in.u.del_arp_cache_entry.scratch,
   5113						      pcmdinfo->in.u.del_arp_cache_entry.arp_index,
   5114						      pcmdinfo->post_sq);
   5115		break;
   5116	case IRDMA_OP_MANAGE_APBVT_ENTRY:
   5117		status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
   5118						     &pcmdinfo->in.u.manage_apbvt_entry.info,
   5119						     pcmdinfo->in.u.manage_apbvt_entry.scratch,
   5120						     pcmdinfo->post_sq);
   5121		break;
   5122	case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
   5123		status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
   5124							   &pcmdinfo->in.u.manage_qhash_table_entry.info,
   5125							   pcmdinfo->in.u.manage_qhash_table_entry.scratch,
   5126							   pcmdinfo->post_sq);
   5127		break;
   5128	case IRDMA_OP_QP_MODIFY:
   5129		status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
   5130					    &pcmdinfo->in.u.qp_modify.info,
   5131					    pcmdinfo->in.u.qp_modify.scratch,
   5132					    pcmdinfo->post_sq);
   5133		break;
   5134	case IRDMA_OP_QP_CREATE:
   5135		status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
   5136					    &pcmdinfo->in.u.qp_create.info,
   5137					    pcmdinfo->in.u.qp_create.scratch,
   5138					    pcmdinfo->post_sq);
   5139		break;
   5140	case IRDMA_OP_QP_DESTROY:
   5141		status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
   5142					     pcmdinfo->in.u.qp_destroy.scratch,
   5143					     pcmdinfo->in.u.qp_destroy.remove_hash_idx,
   5144					     pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
   5145					     pcmdinfo->post_sq);
   5146		break;
   5147	case IRDMA_OP_ALLOC_STAG:
   5148		status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
   5149					     &pcmdinfo->in.u.alloc_stag.info,
   5150					     pcmdinfo->in.u.alloc_stag.scratch,
   5151					     pcmdinfo->post_sq);
   5152		break;
   5153	case IRDMA_OP_MR_REG_NON_SHARED:
   5154		status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
   5155						    &pcmdinfo->in.u.mr_reg_non_shared.info,
   5156						    pcmdinfo->in.u.mr_reg_non_shared.scratch,
   5157						    pcmdinfo->post_sq);
   5158		break;
   5159	case IRDMA_OP_DEALLOC_STAG:
   5160		status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
   5161					       &pcmdinfo->in.u.dealloc_stag.info,
   5162					       pcmdinfo->in.u.dealloc_stag.scratch,
   5163					       pcmdinfo->post_sq);
   5164		break;
   5165	case IRDMA_OP_MW_ALLOC:
   5166		status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
   5167					   &pcmdinfo->in.u.mw_alloc.info,
   5168					   pcmdinfo->in.u.mw_alloc.scratch,
   5169					   pcmdinfo->post_sq);
   5170		break;
   5171	case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
   5172		status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
   5173						      &pcmdinfo->in.u.add_arp_cache_entry.info,
   5174						      pcmdinfo->in.u.add_arp_cache_entry.scratch,
   5175						      pcmdinfo->post_sq);
   5176		break;
   5177	case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
   5178		status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
   5179							pcmdinfo->in.u.alloc_local_mac_entry.scratch,
   5180							pcmdinfo->post_sq);
   5181		break;
   5182	case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
   5183		status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
   5184						      &pcmdinfo->in.u.add_local_mac_entry.info,
   5185						      pcmdinfo->in.u.add_local_mac_entry.scratch,
   5186						      pcmdinfo->post_sq);
   5187		break;
   5188	case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
   5189		status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
   5190						      pcmdinfo->in.u.del_local_mac_entry.scratch,
   5191						      pcmdinfo->in.u.del_local_mac_entry.entry_idx,
   5192						      pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
   5193						      pcmdinfo->post_sq);
   5194		break;
   5195	case IRDMA_OP_AH_CREATE:
   5196		status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
   5197					    &pcmdinfo->in.u.ah_create.info,
   5198					    pcmdinfo->in.u.ah_create.scratch);
   5199		break;
   5200	case IRDMA_OP_AH_DESTROY:
   5201		status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
   5202					     &pcmdinfo->in.u.ah_destroy.info,
   5203					     pcmdinfo->in.u.ah_destroy.scratch);
   5204		break;
   5205	case IRDMA_OP_MC_CREATE:
   5206		status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
   5207						   &pcmdinfo->in.u.mc_create.info,
   5208						   pcmdinfo->in.u.mc_create.scratch);
   5209		break;
   5210	case IRDMA_OP_MC_DESTROY:
   5211		status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
   5212						    &pcmdinfo->in.u.mc_destroy.info,
   5213						    pcmdinfo->in.u.mc_destroy.scratch);
   5214		break;
   5215	case IRDMA_OP_MC_MODIFY:
   5216		status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
   5217						   &pcmdinfo->in.u.mc_modify.info,
   5218						   pcmdinfo->in.u.mc_modify.scratch);
   5219		break;
   5220	default:
   5221		status = -EOPNOTSUPP;
   5222		break;
   5223	}
   5224
   5225	return status;
   5226}
   5227
   5228/**
   5229 * irdma_process_cqp_cmd - process all cqp commands
   5230 * @dev: sc device struct
   5231 * @pcmdinfo: cqp command info
   5232 */
   5233int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
   5234			  struct cqp_cmds_info *pcmdinfo)
   5235{
   5236	int status = 0;
   5237	unsigned long flags;
   5238
   5239	spin_lock_irqsave(&dev->cqp_lock, flags);
   5240	if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
   5241		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
   5242	else
   5243		list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
   5244	spin_unlock_irqrestore(&dev->cqp_lock, flags);
   5245	return status;
   5246}
   5247
   5248/**
   5249 * irdma_process_bh - called from tasklet for cqp list
   5250 * @dev: sc device struct
   5251 */
   5252int irdma_process_bh(struct irdma_sc_dev *dev)
   5253{
   5254	int status = 0;
   5255	struct cqp_cmds_info *pcmdinfo;
   5256	unsigned long flags;
   5257
   5258	spin_lock_irqsave(&dev->cqp_lock, flags);
   5259	while (!list_empty(&dev->cqp_cmd_head) &&
   5260	       !irdma_cqp_ring_full(dev->cqp)) {
   5261		pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
   5262		status = irdma_exec_cqp_cmd(dev, pcmdinfo);
   5263		if (status)
   5264			break;
   5265	}
   5266	spin_unlock_irqrestore(&dev->cqp_lock, flags);
   5267	return status;
   5268}
   5269
   5270/**
   5271 * irdma_cfg_aeq- Configure AEQ interrupt
   5272 * @dev: pointer to the device structure
   5273 * @idx: vector index
   5274 * @enable: True to enable, False disables
   5275 */
   5276void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
   5277{
   5278	u32 reg_val;
   5279
   5280	reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
   5281		  FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
   5282		  FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
   5283	writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
   5284}
   5285
   5286/**
   5287 * sc_vsi_update_stats - Update statistics
   5288 * @vsi: sc_vsi instance to update
   5289 */
   5290void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
   5291{
   5292	struct irdma_gather_stats *gather_stats;
   5293	struct irdma_gather_stats *last_gather_stats;
   5294
   5295	gather_stats = vsi->pestat->gather_info.gather_stats_va;
   5296	last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
   5297	irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
   5298			   last_gather_stats);
   5299}
   5300
   5301/**
   5302 * irdma_wait_pe_ready - Check if firmware is ready
   5303 * @dev: provides access to registers
   5304 */
   5305static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
   5306{
   5307	u32 statuscpu0;
   5308	u32 statuscpu1;
   5309	u32 statuscpu2;
   5310	u32 retrycount = 0;
   5311
   5312	do {
   5313		statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
   5314		statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
   5315		statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
   5316		if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
   5317		    statuscpu2 == 0x80)
   5318			return 0;
   5319		mdelay(1000);
   5320	} while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
   5321	return -1;
   5322}
   5323
   5324static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
   5325{
   5326	switch (dev->hw_attrs.uk_attrs.hw_rev) {
   5327	case IRDMA_GEN_1:
   5328		i40iw_init_hw(dev);
   5329		break;
   5330	case IRDMA_GEN_2:
   5331		icrdma_init_hw(dev);
   5332		break;
   5333	}
   5334}
   5335
   5336/**
   5337 * irdma_sc_dev_init - Initialize control part of device
   5338 * @ver: version
   5339 * @dev: Device pointer
   5340 * @info: Device init info
   5341 */
   5342int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
   5343		      struct irdma_device_init_info *info)
   5344{
   5345	u32 val;
   5346	int ret_code = 0;
   5347	u8 db_size;
   5348
   5349	INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
   5350	mutex_init(&dev->ws_mutex);
   5351	dev->hmc_fn_id = info->hmc_fn_id;
   5352	dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
   5353	dev->fpm_query_buf = info->fpm_query_buf;
   5354	dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
   5355	dev->fpm_commit_buf = info->fpm_commit_buf;
   5356	dev->hw = info->hw;
   5357	dev->hw->hw_addr = info->bar0;
   5358	/* Setup the hardware limits, hmc may limit further */
   5359	dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
   5360	dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
   5361	dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
   5362	dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
   5363	dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
   5364	dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
   5365	dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
   5366	dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
   5367	dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
   5368	dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
   5369	dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
   5370	dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
   5371	dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
   5372	dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
   5373	dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
   5374	dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
   5375
   5376	dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
   5377	dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
   5378	dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
   5379	dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
   5380
   5381	dev->hw_attrs.max_pe_ready_count = 14;
   5382	dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
   5383	dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
   5384	dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
   5385
   5386	dev->hw_attrs.uk_attrs.hw_rev = ver;
   5387	irdma_sc_init_hw(dev);
   5388
   5389	if (irdma_wait_pe_ready(dev))
   5390		return -ETIMEDOUT;
   5391
   5392	val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
   5393	db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
   5394	if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
   5395		ibdev_dbg(to_ibdev(dev),
   5396			  "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
   5397			  val, db_size);
   5398		return -ENODEV;
   5399	}
   5400	dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
   5401
   5402	return ret_code;
   5403}
   5404
   5405/**
   5406 * irdma_update_stats - Update statistics
   5407 * @hw_stats: hw_stats instance to update
   5408 * @gather_stats: updated stat counters
   5409 * @last_gather_stats: last stat counters
   5410 */
   5411void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
   5412			struct irdma_gather_stats *gather_stats,
   5413			struct irdma_gather_stats *last_gather_stats)
   5414{
   5415	u64 *stats_val = hw_stats->stats_val_32;
   5416
   5417	stats_val[IRDMA_HW_STAT_INDEX_RXVLANERR] +=
   5418		IRDMA_STATS_DELTA(gather_stats->rxvlanerr,
   5419				  last_gather_stats->rxvlanerr,
   5420				  IRDMA_MAX_STATS_32);
   5421	stats_val[IRDMA_HW_STAT_INDEX_IP4RXDISCARD] +=
   5422		IRDMA_STATS_DELTA(gather_stats->ip4rxdiscard,
   5423				  last_gather_stats->ip4rxdiscard,
   5424				  IRDMA_MAX_STATS_32);
   5425	stats_val[IRDMA_HW_STAT_INDEX_IP4RXTRUNC] +=
   5426		IRDMA_STATS_DELTA(gather_stats->ip4rxtrunc,
   5427				  last_gather_stats->ip4rxtrunc,
   5428				  IRDMA_MAX_STATS_32);
   5429	stats_val[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] +=
   5430		IRDMA_STATS_DELTA(gather_stats->ip4txnoroute,
   5431				  last_gather_stats->ip4txnoroute,
   5432				  IRDMA_MAX_STATS_32);
   5433	stats_val[IRDMA_HW_STAT_INDEX_IP6RXDISCARD] +=
   5434		IRDMA_STATS_DELTA(gather_stats->ip6rxdiscard,
   5435				  last_gather_stats->ip6rxdiscard,
   5436				  IRDMA_MAX_STATS_32);
   5437	stats_val[IRDMA_HW_STAT_INDEX_IP6RXTRUNC] +=
   5438		IRDMA_STATS_DELTA(gather_stats->ip6rxtrunc,
   5439				  last_gather_stats->ip6rxtrunc,
   5440				  IRDMA_MAX_STATS_32);
   5441	stats_val[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] +=
   5442		IRDMA_STATS_DELTA(gather_stats->ip6txnoroute,
   5443				  last_gather_stats->ip6txnoroute,
   5444				  IRDMA_MAX_STATS_32);
   5445	stats_val[IRDMA_HW_STAT_INDEX_TCPRTXSEG] +=
   5446		IRDMA_STATS_DELTA(gather_stats->tcprtxseg,
   5447				  last_gather_stats->tcprtxseg,
   5448				  IRDMA_MAX_STATS_32);
   5449	stats_val[IRDMA_HW_STAT_INDEX_TCPRXOPTERR] +=
   5450		IRDMA_STATS_DELTA(gather_stats->tcprxopterr,
   5451				  last_gather_stats->tcprxopterr,
   5452				  IRDMA_MAX_STATS_32);
   5453	stats_val[IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] +=
   5454		IRDMA_STATS_DELTA(gather_stats->tcprxprotoerr,
   5455				  last_gather_stats->tcprxprotoerr,
   5456				  IRDMA_MAX_STATS_32);
   5457	stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] +=
   5458		IRDMA_STATS_DELTA(gather_stats->rxrpcnphandled,
   5459				  last_gather_stats->rxrpcnphandled,
   5460				  IRDMA_MAX_STATS_32);
   5461	stats_val[IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] +=
   5462		IRDMA_STATS_DELTA(gather_stats->rxrpcnpignored,
   5463				  last_gather_stats->rxrpcnpignored,
   5464				  IRDMA_MAX_STATS_32);
   5465	stats_val[IRDMA_HW_STAT_INDEX_TXNPCNPSENT] +=
   5466		IRDMA_STATS_DELTA(gather_stats->txnpcnpsent,
   5467				  last_gather_stats->txnpcnpsent,
   5468				  IRDMA_MAX_STATS_32);
   5469	stats_val = hw_stats->stats_val_64;
   5470	stats_val[IRDMA_HW_STAT_INDEX_IP4RXOCTS] +=
   5471		IRDMA_STATS_DELTA(gather_stats->ip4rxocts,
   5472				  last_gather_stats->ip4rxocts,
   5473				  IRDMA_MAX_STATS_48);
   5474	stats_val[IRDMA_HW_STAT_INDEX_IP4RXPKTS] +=
   5475		IRDMA_STATS_DELTA(gather_stats->ip4rxpkts,
   5476				  last_gather_stats->ip4rxpkts,
   5477				  IRDMA_MAX_STATS_48);
   5478	stats_val[IRDMA_HW_STAT_INDEX_IP4RXFRAGS] +=
   5479		IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
   5480				  last_gather_stats->ip4txfrag,
   5481				  IRDMA_MAX_STATS_48);
   5482	stats_val[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] +=
   5483		IRDMA_STATS_DELTA(gather_stats->ip4rxmcpkts,
   5484				  last_gather_stats->ip4rxmcpkts,
   5485				  IRDMA_MAX_STATS_48);
   5486	stats_val[IRDMA_HW_STAT_INDEX_IP4TXOCTS] +=
   5487		IRDMA_STATS_DELTA(gather_stats->ip4txocts,
   5488				  last_gather_stats->ip4txocts,
   5489				  IRDMA_MAX_STATS_48);
   5490	stats_val[IRDMA_HW_STAT_INDEX_IP4TXPKTS] +=
   5491		IRDMA_STATS_DELTA(gather_stats->ip4txpkts,
   5492				  last_gather_stats->ip4txpkts,
   5493				  IRDMA_MAX_STATS_48);
   5494	stats_val[IRDMA_HW_STAT_INDEX_IP4TXFRAGS] +=
   5495		IRDMA_STATS_DELTA(gather_stats->ip4txfrag,
   5496				  last_gather_stats->ip4txfrag,
   5497				  IRDMA_MAX_STATS_48);
   5498	stats_val[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] +=
   5499		IRDMA_STATS_DELTA(gather_stats->ip4txmcpkts,
   5500				  last_gather_stats->ip4txmcpkts,
   5501				  IRDMA_MAX_STATS_48);
   5502	stats_val[IRDMA_HW_STAT_INDEX_IP6RXOCTS] +=
   5503		IRDMA_STATS_DELTA(gather_stats->ip6rxocts,
   5504				  last_gather_stats->ip6rxocts,
   5505				  IRDMA_MAX_STATS_48);
   5506	stats_val[IRDMA_HW_STAT_INDEX_IP6RXPKTS] +=
   5507		IRDMA_STATS_DELTA(gather_stats->ip6rxpkts,
   5508				  last_gather_stats->ip6rxpkts,
   5509				  IRDMA_MAX_STATS_48);
   5510	stats_val[IRDMA_HW_STAT_INDEX_IP6RXFRAGS] +=
   5511		IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
   5512				  last_gather_stats->ip6txfrags,
   5513				  IRDMA_MAX_STATS_48);
   5514	stats_val[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] +=
   5515		IRDMA_STATS_DELTA(gather_stats->ip6rxmcpkts,
   5516				  last_gather_stats->ip6rxmcpkts,
   5517				  IRDMA_MAX_STATS_48);
   5518	stats_val[IRDMA_HW_STAT_INDEX_IP6TXOCTS] +=
   5519		IRDMA_STATS_DELTA(gather_stats->ip6txocts,
   5520				  last_gather_stats->ip6txocts,
   5521				  IRDMA_MAX_STATS_48);
   5522	stats_val[IRDMA_HW_STAT_INDEX_IP6TXPKTS] +=
   5523		IRDMA_STATS_DELTA(gather_stats->ip6txpkts,
   5524				  last_gather_stats->ip6txpkts,
   5525				  IRDMA_MAX_STATS_48);
   5526	stats_val[IRDMA_HW_STAT_INDEX_IP6TXFRAGS] +=
   5527		IRDMA_STATS_DELTA(gather_stats->ip6txfrags,
   5528				  last_gather_stats->ip6txfrags,
   5529				  IRDMA_MAX_STATS_48);
   5530	stats_val[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] +=
   5531		IRDMA_STATS_DELTA(gather_stats->ip6txmcpkts,
   5532				  last_gather_stats->ip6txmcpkts,
   5533				  IRDMA_MAX_STATS_48);
   5534	stats_val[IRDMA_HW_STAT_INDEX_TCPRXSEGS] +=
   5535		IRDMA_STATS_DELTA(gather_stats->tcprxsegs,
   5536				  last_gather_stats->tcprxsegs,
   5537				  IRDMA_MAX_STATS_48);
   5538	stats_val[IRDMA_HW_STAT_INDEX_TCPTXSEG] +=
   5539		IRDMA_STATS_DELTA(gather_stats->tcptxsegs,
   5540				  last_gather_stats->tcptxsegs,
   5541				  IRDMA_MAX_STATS_48);
   5542	stats_val[IRDMA_HW_STAT_INDEX_RDMARXRDS] +=
   5543		IRDMA_STATS_DELTA(gather_stats->rdmarxrds,
   5544				  last_gather_stats->rdmarxrds,
   5545				  IRDMA_MAX_STATS_48);
   5546	stats_val[IRDMA_HW_STAT_INDEX_RDMARXSNDS] +=
   5547		IRDMA_STATS_DELTA(gather_stats->rdmarxsnds,
   5548				  last_gather_stats->rdmarxsnds,
   5549				  IRDMA_MAX_STATS_48);
   5550	stats_val[IRDMA_HW_STAT_INDEX_RDMARXWRS] +=
   5551		IRDMA_STATS_DELTA(gather_stats->rdmarxwrs,
   5552				  last_gather_stats->rdmarxwrs,
   5553				  IRDMA_MAX_STATS_48);
   5554	stats_val[IRDMA_HW_STAT_INDEX_RDMATXRDS] +=
   5555		IRDMA_STATS_DELTA(gather_stats->rdmatxrds,
   5556				  last_gather_stats->rdmatxrds,
   5557				  IRDMA_MAX_STATS_48);
   5558	stats_val[IRDMA_HW_STAT_INDEX_RDMATXSNDS] +=
   5559		IRDMA_STATS_DELTA(gather_stats->rdmatxsnds,
   5560				  last_gather_stats->rdmatxsnds,
   5561				  IRDMA_MAX_STATS_48);
   5562	stats_val[IRDMA_HW_STAT_INDEX_RDMATXWRS] +=
   5563		IRDMA_STATS_DELTA(gather_stats->rdmatxwrs,
   5564				  last_gather_stats->rdmatxwrs,
   5565				  IRDMA_MAX_STATS_48);
   5566	stats_val[IRDMA_HW_STAT_INDEX_RDMAVBND] +=
   5567		IRDMA_STATS_DELTA(gather_stats->rdmavbn,
   5568				  last_gather_stats->rdmavbn,
   5569				  IRDMA_MAX_STATS_48);
   5570	stats_val[IRDMA_HW_STAT_INDEX_RDMAVINV] +=
   5571		IRDMA_STATS_DELTA(gather_stats->rdmavinv,
   5572				  last_gather_stats->rdmavinv,
   5573				  IRDMA_MAX_STATS_48);
   5574	stats_val[IRDMA_HW_STAT_INDEX_UDPRXPKTS] +=
   5575		IRDMA_STATS_DELTA(gather_stats->udprxpkts,
   5576				  last_gather_stats->udprxpkts,
   5577				  IRDMA_MAX_STATS_48);
   5578	stats_val[IRDMA_HW_STAT_INDEX_UDPTXPKTS] +=
   5579		IRDMA_STATS_DELTA(gather_stats->udptxpkts,
   5580				  last_gather_stats->udptxpkts,
   5581				  IRDMA_MAX_STATS_48);
   5582	stats_val[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] +=
   5583		IRDMA_STATS_DELTA(gather_stats->rxnpecnmrkpkts,
   5584				  last_gather_stats->rxnpecnmrkpkts,
   5585				  IRDMA_MAX_STATS_48);
   5586	memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
   5587}