cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qed_rdma.c (58414B)


      1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
      2/* QLogic qed NIC Driver
      3 * Copyright (c) 2015-2017  QLogic Corporation
      4 * Copyright (c) 2019-2020 Marvell International Ltd.
      5 */
      6
      7#include <linux/types.h>
      8#include <asm/byteorder.h>
      9#include <linux/bitops.h>
     10#include <linux/delay.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/errno.h>
     13#include <linux/io.h>
     14#include <linux/kernel.h>
     15#include <linux/list.h>
     16#include <linux/module.h>
     17#include <linux/mutex.h>
     18#include <linux/pci.h>
     19#include <linux/slab.h>
     20#include <linux/spinlock.h>
     21#include <linux/string.h>
     22#include <net/addrconf.h>
     23#include "qed.h"
     24#include "qed_cxt.h"
     25#include "qed_hsi.h"
     26#include "qed_iro_hsi.h"
     27#include "qed_hw.h"
     28#include "qed_init_ops.h"
     29#include "qed_int.h"
     30#include "qed_ll2.h"
     31#include "qed_mcp.h"
     32#include "qed_reg_addr.h"
     33#include <linux/qed/qed_rdma_if.h>
     34#include "qed_rdma.h"
     35#include "qed_roce.h"
     36#include "qed_sp.h"
     37
     38int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
     39			struct qed_bmap *bmap, u32 max_count, char *name)
     40{
     41	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
     42
     43	bmap->max_count = max_count;
     44
     45	bmap->bitmap = kcalloc(BITS_TO_LONGS(max_count), sizeof(long),
     46			       GFP_KERNEL);
     47	if (!bmap->bitmap)
     48		return -ENOMEM;
     49
     50	snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
     51
     52	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
     53	return 0;
     54}
     55
     56int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
     57			   struct qed_bmap *bmap, u32 *id_num)
     58{
     59	*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
     60	if (*id_num >= bmap->max_count)
     61		return -EINVAL;
     62
     63	__set_bit(*id_num, bmap->bitmap);
     64
     65	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
     66		   bmap->name, *id_num);
     67
     68	return 0;
     69}
     70
     71void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
     72		     struct qed_bmap *bmap, u32 id_num)
     73{
     74	if (id_num >= bmap->max_count)
     75		return;
     76
     77	__set_bit(id_num, bmap->bitmap);
     78}
     79
     80void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
     81			 struct qed_bmap *bmap, u32 id_num)
     82{
     83	bool b_acquired;
     84
     85	if (id_num >= bmap->max_count)
     86		return;
     87
     88	b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
     89	if (!b_acquired) {
     90		DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
     91			  bmap->name, id_num);
     92		return;
     93	}
     94
     95	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
     96		   bmap->name, id_num);
     97}
     98
     99int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
    100		     struct qed_bmap *bmap, u32 id_num)
    101{
    102	if (id_num >= bmap->max_count)
    103		return -1;
    104
    105	return test_bit(id_num, bmap->bitmap);
    106}
    107
    108static bool qed_bmap_is_empty(struct qed_bmap *bmap)
    109{
    110	return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
    111}
    112
    113static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
    114{
    115	/* First sb id for RoCE is after all the l2 sb */
    116	return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
    117}
    118
    119int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
    120{
    121	struct qed_rdma_info *p_rdma_info;
    122
    123	p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
    124	if (!p_rdma_info)
    125		return -ENOMEM;
    126
    127	spin_lock_init(&p_rdma_info->lock);
    128
    129	p_hwfn->p_rdma_info = p_rdma_info;
    130	return 0;
    131}
    132
    133void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
    134{
    135	kfree(p_hwfn->p_rdma_info);
    136	p_hwfn->p_rdma_info = NULL;
    137}
    138
    139static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
    140{
    141	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
    142	u32 num_cons, num_tasks;
    143	int rc = -ENOMEM;
    144
    145	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
    146
    147	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    148		p_rdma_info->proto = PROTOCOLID_IWARP;
    149	else
    150		p_rdma_info->proto = PROTOCOLID_ROCE;
    151
    152	num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
    153					       NULL);
    154
    155	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    156		p_rdma_info->num_qps = num_cons;
    157	else
    158		p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
    159
    160	num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
    161
    162	/* Each MR uses a single task */
    163	p_rdma_info->num_mrs = num_tasks;
    164
    165	/* Queue zone lines are shared between RoCE and L2 in such a way that
    166	 * they can be used by each without obstructing the other.
    167	 */
    168	p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
    169	p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
    170
    171	/* Allocate a struct with device params and fill it */
    172	p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
    173	if (!p_rdma_info->dev)
    174		return rc;
    175
    176	/* Allocate a struct with port params and fill it */
    177	p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
    178	if (!p_rdma_info->port)
    179		goto free_rdma_dev;
    180
    181	/* Allocate bit map for pd's */
    182	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
    183				 "PD");
    184	if (rc) {
    185		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    186			   "Failed to allocate pd_map, rc = %d\n",
    187			   rc);
    188		goto free_rdma_port;
    189	}
    190
    191	/* Allocate bit map for XRC Domains */
    192	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
    193				 QED_RDMA_MAX_XRCDS, "XRCD");
    194	if (rc) {
    195		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    196			   "Failed to allocate xrcd_map,rc = %d\n", rc);
    197		goto free_pd_map;
    198	}
    199
    200	/* Allocate DPI bitmap */
    201	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
    202				 p_hwfn->dpi_count, "DPI");
    203	if (rc) {
    204		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    205			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
    206		goto free_xrcd_map;
    207	}
    208
    209	/* Allocate bitmap for cq's. The maximum number of CQs is bound to
    210	 * the number of connections we support. (num_qps in iWARP or
    211	 * num_qps/2 in RoCE).
    212	 */
    213	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
    214	if (rc) {
    215		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    216			   "Failed to allocate cq bitmap, rc = %d\n", rc);
    217		goto free_dpi_map;
    218	}
    219
    220	/* Allocate bitmap for toggle bit for cq icids
    221	 * We toggle the bit every time we create or resize cq for a given icid.
    222	 * Size needs to equal the size of the cq bmap.
    223	 */
    224	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
    225				 num_cons, "Toggle");
    226	if (rc) {
    227		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    228			   "Failed to allocate toggle bits, rc = %d\n", rc);
    229		goto free_cq_map;
    230	}
    231
    232	/* Allocate bitmap for itids */
    233	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
    234				 p_rdma_info->num_mrs, "MR");
    235	if (rc) {
    236		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    237			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
    238		goto free_toggle_map;
    239	}
    240
    241	/* Allocate bitmap for cids used for qps. */
    242	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
    243				 "CID");
    244	if (rc) {
    245		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    246			   "Failed to allocate cid bitmap, rc = %d\n", rc);
    247		goto free_tid_map;
    248	}
    249
    250	/* Allocate bitmap for cids used for responders/requesters. */
    251	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
    252				 "REAL_CID");
    253	if (rc) {
    254		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    255			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
    256		goto free_cid_map;
    257	}
    258
    259	/* The first SRQ follows the last XRC SRQ. This means that the
    260	 * SRQ IDs start from an offset equals to max_xrc_srqs.
    261	 */
    262	p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
    263	rc = qed_rdma_bmap_alloc(p_hwfn,
    264				 &p_rdma_info->xrc_srq_map,
    265				 p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
    266	if (rc) {
    267		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    268			   "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
    269		goto free_real_cid_map;
    270	}
    271
    272	/* Allocate bitmap for srqs */
    273	p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
    274	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
    275				 p_rdma_info->num_srqs, "SRQ");
    276	if (rc) {
    277		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
    278			   "Failed to allocate srq bitmap, rc = %d\n", rc);
    279		goto free_xrc_srq_map;
    280	}
    281
    282	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    283		rc = qed_iwarp_alloc(p_hwfn);
    284
    285	if (rc)
    286		goto free_srq_map;
    287
    288	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
    289	return 0;
    290
    291free_srq_map:
    292	kfree(p_rdma_info->srq_map.bitmap);
    293free_xrc_srq_map:
    294	kfree(p_rdma_info->xrc_srq_map.bitmap);
    295free_real_cid_map:
    296	kfree(p_rdma_info->real_cid_map.bitmap);
    297free_cid_map:
    298	kfree(p_rdma_info->cid_map.bitmap);
    299free_tid_map:
    300	kfree(p_rdma_info->tid_map.bitmap);
    301free_toggle_map:
    302	kfree(p_rdma_info->toggle_bits.bitmap);
    303free_cq_map:
    304	kfree(p_rdma_info->cq_map.bitmap);
    305free_dpi_map:
    306	kfree(p_rdma_info->dpi_map.bitmap);
    307free_xrcd_map:
    308	kfree(p_rdma_info->xrcd_map.bitmap);
    309free_pd_map:
    310	kfree(p_rdma_info->pd_map.bitmap);
    311free_rdma_port:
    312	kfree(p_rdma_info->port);
    313free_rdma_dev:
    314	kfree(p_rdma_info->dev);
    315
    316	return rc;
    317}
    318
    319void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
    320			struct qed_bmap *bmap, bool check)
    321{
    322	unsigned int bit, weight, nbits;
    323	unsigned long *b;
    324
    325	if (!check)
    326		goto end;
    327
    328	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
    329	if (!weight)
    330		goto end;
    331
    332	DP_NOTICE(p_hwfn,
    333		  "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
    334		  bmap->name, bmap->max_count, weight);
    335
    336	for (bit = 0; bit < bmap->max_count; bit += 512) {
    337		b =  bmap->bitmap + BITS_TO_LONGS(bit);
    338		nbits = min(bmap->max_count - bit, 512U);
    339
    340		if (!bitmap_empty(b, nbits))
    341			DP_NOTICE(p_hwfn,
    342				  "line 0x%04x: %*pb\n", bit / 512, nbits, b);
    343	}
    344
    345end:
    346	kfree(bmap->bitmap);
    347	bmap->bitmap = NULL;
    348}
    349
    350static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
    351{
    352	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
    353
    354	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    355		qed_iwarp_resc_free(p_hwfn);
    356
    357	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
    358	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
    359	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
    360	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
    361	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
    362	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
    363	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
    364	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
    365	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
    366	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
    367
    368	kfree(p_rdma_info->port);
    369	kfree(p_rdma_info->dev);
    370}
    371
    372static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
    373{
    374	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    375
    376	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
    377
    378	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    379	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
    380	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    381}
    382
    383static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
    384{
    385	qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
    386}
    387
    388static void qed_rdma_free(struct qed_hwfn *p_hwfn)
    389{
    390	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
    391
    392	qed_rdma_free_reserved_lkey(p_hwfn);
    393	qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
    394	qed_rdma_resc_free(p_hwfn);
    395}
    396
    397static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
    398				 struct qed_rdma_start_in_params *params)
    399{
    400	struct qed_rdma_events *events;
    401
    402	events = &p_hwfn->p_rdma_info->events;
    403
    404	events->unaffiliated_event = params->events->unaffiliated_event;
    405	events->affiliated_event = params->events->affiliated_event;
    406	events->context = params->events->context;
    407}
    408
    409static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
    410				  struct qed_rdma_start_in_params *params)
    411{
    412	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
    413	struct qed_dev *cdev = p_hwfn->cdev;
    414	u32 pci_status_control;
    415	u32 num_qps;
    416
    417	/* Vendor specific information */
    418	dev->vendor_id = cdev->vendor_id;
    419	dev->vendor_part_id = cdev->device_id;
    420	dev->hw_ver = cdev->chip_rev;
    421	dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
    422		      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
    423
    424	addrconf_addr_eui48((u8 *)&dev->sys_image_guid,
    425			    p_hwfn->hw_info.hw_mac_addr);
    426
    427	dev->node_guid = dev->sys_image_guid;
    428
    429	dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
    430			     RDMA_MAX_SGE_PER_RQ_WQE);
    431
    432	if (cdev->rdma_max_sge)
    433		dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
    434
    435	dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
    436	if (p_hwfn->cdev->rdma_max_srq_sge) {
    437		dev->max_srq_sge = min_t(u32,
    438					 p_hwfn->cdev->rdma_max_srq_sge,
    439					 dev->max_srq_sge);
    440	}
    441	dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
    442
    443	dev->max_inline = (cdev->rdma_max_inline) ?
    444			  min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
    445			  dev->max_inline;
    446
    447	dev->max_wqe = QED_RDMA_MAX_WQE;
    448	dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
    449
    450	/* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
    451	 * it is up-aligned to 16 and then to ILT page size within qed cxt.
    452	 * This is OK in terms of ILT but we don't want to configure the FW
    453	 * above its abilities
    454	 */
    455	num_qps = ROCE_MAX_QPS;
    456	num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
    457	dev->max_qp = num_qps;
    458
    459	/* CQs uses the same icids that QPs use hence they are limited by the
    460	 * number of icids. There are two icids per QP.
    461	 */
    462	dev->max_cq = num_qps * 2;
    463
    464	/* The number of mrs is smaller by 1 since the first is reserved */
    465	dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
    466	dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
    467
    468	/* The maximum CQE capacity per CQ supported.
    469	 * max number of cqes will be in two layer pbl,
    470	 * 8 is the pointer size in bytes
    471	 * 32 is the size of cq element in bytes
    472	 */
    473	if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
    474		dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
    475	else
    476		dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
    477
    478	dev->max_mw = 0;
    479	dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
    480	dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
    481	if (QED_IS_ROCE_PERSONALITY(p_hwfn))
    482		dev->max_pkey = QED_RDMA_MAX_P_KEY;
    483
    484	dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
    485	dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
    486	dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
    487					  (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
    488	dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
    489					 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
    490	dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
    491					   p_hwfn->p_rdma_info->num_qps;
    492	dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
    493	dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
    494	dev->max_pd = RDMA_MAX_PDS;
    495	dev->max_ah = p_hwfn->p_rdma_info->num_qps;
    496	dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
    497
    498	/* Set capablities */
    499	dev->dev_caps = 0;
    500	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
    501	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
    502	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
    503	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
    504	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
    505	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
    506	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
    507	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
    508
    509	/* Check atomic operations support in PCI configuration space. */
    510	pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
    511				   &pci_status_control);
    512
    513	if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
    514		SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
    515
    516	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    517		qed_iwarp_init_devinfo(p_hwfn);
    518}
    519
    520static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
    521{
    522	struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
    523	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
    524
    525	port->port_state = p_hwfn->mcp_info->link_output.link_up ?
    526			   QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
    527
    528	port->max_msg_size = min_t(u64,
    529				   (dev->max_mr_mw_fmr_size *
    530				    p_hwfn->cdev->rdma_max_sge),
    531				   BIT(31));
    532
    533	port->pkey_bad_counter = 0;
    534}
    535
    536static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
    537{
    538	int rc = 0;
    539
    540	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
    541	p_hwfn->b_rdma_enabled_in_prs = false;
    542
    543	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
    544		qed_iwarp_init_hw(p_hwfn, p_ptt);
    545	else
    546		rc = qed_roce_init_hw(p_hwfn, p_ptt);
    547
    548	return rc;
    549}
    550
    551static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
    552			     struct qed_rdma_start_in_params *params,
    553			     struct qed_ptt *p_ptt)
    554{
    555	struct rdma_init_func_ramrod_data *p_ramrod;
    556	struct qed_rdma_cnq_params *p_cnq_pbl_list;
    557	struct rdma_init_func_hdr *p_params_header;
    558	struct rdma_cnq_params *p_cnq_params;
    559	struct qed_sp_init_data init_data;
    560	struct qed_spq_entry *p_ent;
    561	u32 cnq_id, sb_id;
    562	u16 igu_sb_id;
    563	int rc;
    564
    565	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
    566
    567	/* Save the number of cnqs for the function close ramrod */
    568	p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
    569
    570	/* Get SPQ entry */
    571	memset(&init_data, 0, sizeof(init_data));
    572	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
    573	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
    574
    575	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
    576				 p_hwfn->p_rdma_info->proto, &init_data);
    577	if (rc)
    578		return rc;
    579
    580	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
    581		qed_iwarp_init_fw_ramrod(p_hwfn,
    582					 &p_ent->ramrod.iwarp_init_func);
    583		p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
    584	} else {
    585		p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
    586	}
    587
    588	p_params_header = &p_ramrod->params_header;
    589	p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
    590							   QED_RDMA_CNQ_RAM);
    591	p_params_header->num_cnqs = params->desired_cnq;
    592	p_params_header->first_reg_srq_id =
    593	    cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
    594	p_params_header->reg_srq_base_addr =
    595	    cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
    596	if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
    597		p_params_header->cq_ring_mode = 1;
    598	else
    599		p_params_header->cq_ring_mode = 0;
    600
    601	for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
    602		sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
    603		igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
    604		p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
    605		p_cnq_params = &p_ramrod->cnq_params[cnq_id];
    606		p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
    607
    608		p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
    609		p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
    610
    611		DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
    612			       p_cnq_pbl_list->pbl_ptr);
    613
    614		/* we assume here that cnq_id and qz_offset are the same */
    615		p_cnq_params->queue_zone_num =
    616			cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
    617				    cnq_id);
    618	}
    619
    620	return qed_spq_post(p_hwfn, p_ent, NULL);
    621}
    622
    623static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
    624{
    625	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    626	int rc;
    627
    628	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
    629
    630	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    631	rc = qed_rdma_bmap_alloc_id(p_hwfn,
    632				    &p_hwfn->p_rdma_info->tid_map, itid);
    633	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    634	if (rc)
    635		goto out;
    636
    637	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
    638out:
    639	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
    640	return rc;
    641}
    642
    643static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
    644{
    645	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
    646
    647	/* Tid 0 will be used as the key for "reserved MR".
    648	 * The driver should allocate memory for it so it can be loaded but no
    649	 * ramrod should be passed on it.
    650	 */
    651	qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
    652	if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
    653		DP_NOTICE(p_hwfn,
    654			  "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
    655		return -EINVAL;
    656	}
    657
    658	return 0;
    659}
    660
    661static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
    662			  struct qed_ptt *p_ptt,
    663			  struct qed_rdma_start_in_params *params)
    664{
    665	int rc;
    666
    667	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
    668
    669	qed_rdma_init_devinfo(p_hwfn, params);
    670	qed_rdma_init_port(p_hwfn);
    671	qed_rdma_init_events(p_hwfn, params);
    672
    673	rc = qed_rdma_reserve_lkey(p_hwfn);
    674	if (rc)
    675		return rc;
    676
    677	rc = qed_rdma_init_hw(p_hwfn, p_ptt);
    678	if (rc)
    679		return rc;
    680
    681	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
    682		rc = qed_iwarp_setup(p_hwfn, params);
    683		if (rc)
    684			return rc;
    685	} else {
    686		rc = qed_roce_setup(p_hwfn);
    687		if (rc)
    688			return rc;
    689	}
    690
    691	return qed_rdma_start_fw(p_hwfn, params, p_ptt);
    692}
    693
    694static int qed_rdma_stop(void *rdma_cxt)
    695{
    696	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    697	struct rdma_close_func_ramrod_data *p_ramrod;
    698	struct qed_sp_init_data init_data;
    699	struct qed_spq_entry *p_ent;
    700	struct qed_ptt *p_ptt;
    701	u32 ll2_ethertype_en;
    702	int rc = -EBUSY;
    703
    704	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
    705
    706	p_ptt = qed_ptt_acquire(p_hwfn);
    707	if (!p_ptt) {
    708		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
    709		return rc;
    710	}
    711
    712	/* Disable RoCE search */
    713	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
    714	p_hwfn->b_rdma_enabled_in_prs = false;
    715	p_hwfn->p_rdma_info->active = 0;
    716	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
    717
    718	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
    719
    720	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
    721	       (ll2_ethertype_en & 0xFFFE));
    722
    723	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
    724		rc = qed_iwarp_stop(p_hwfn);
    725		if (rc) {
    726			qed_ptt_release(p_hwfn, p_ptt);
    727			return rc;
    728		}
    729	} else {
    730		qed_roce_stop(p_hwfn);
    731	}
    732
    733	qed_ptt_release(p_hwfn, p_ptt);
    734
    735	/* Get SPQ entry */
    736	memset(&init_data, 0, sizeof(init_data));
    737	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
    738	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
    739
    740	/* Stop RoCE */
    741	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
    742				 p_hwfn->p_rdma_info->proto, &init_data);
    743	if (rc)
    744		goto out;
    745
    746	p_ramrod = &p_ent->ramrod.rdma_close_func;
    747
    748	p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
    749	p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
    750
    751	rc = qed_spq_post(p_hwfn, p_ent, NULL);
    752
    753out:
    754	qed_rdma_free(p_hwfn);
    755
    756	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
    757	return rc;
    758}
    759
    760static int qed_rdma_add_user(void *rdma_cxt,
    761			     struct qed_rdma_add_user_out_params *out_params)
    762{
    763	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    764	u32 dpi_start_offset;
    765	u32 returned_id = 0;
    766	int rc;
    767
    768	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
    769
    770	/* Allocate DPI */
    771	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    772	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
    773				    &returned_id);
    774	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    775
    776	out_params->dpi = (u16)returned_id;
    777
    778	/* Calculate the corresponding DPI address */
    779	dpi_start_offset = p_hwfn->dpi_start_offset;
    780
    781	out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
    782			       out_params->dpi * p_hwfn->dpi_size;
    783
    784	out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
    785				    dpi_start_offset +
    786				    ((out_params->dpi) * p_hwfn->dpi_size);
    787
    788	out_params->dpi_size = p_hwfn->dpi_size;
    789	out_params->wid_count = p_hwfn->wid_count;
    790
    791	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
    792	return rc;
    793}
    794
    795static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
    796{
    797	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    798	struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
    799	struct qed_mcp_link_state *p_link_output;
    800
    801	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
    802
    803	/* The link state is saved only for the leading hwfn */
    804	p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
    805
    806	p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
    807	    : QED_RDMA_PORT_DOWN;
    808
    809	p_port->link_speed = p_link_output->speed;
    810
    811	p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
    812
    813	return p_port;
    814}
    815
    816static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
    817{
    818	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    819
    820	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
    821
    822	/* Return struct with device parameters */
    823	return p_hwfn->p_rdma_info->dev;
    824}
    825
    826static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
    827{
    828	struct qed_hwfn *p_hwfn;
    829	u16 qz_num;
    830	u32 addr;
    831
    832	p_hwfn = (struct qed_hwfn *)rdma_cxt;
    833
    834	if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
    835		DP_NOTICE(p_hwfn,
    836			  "queue zone offset %d is too large (max is %d)\n",
    837			  qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
    838		return;
    839	}
    840
    841	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
    842	addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
    843				USTORM_COMMON_QUEUE_CONS, qz_num);
    844
    845	REG_WR16(p_hwfn, addr, prod);
    846
    847	/* keep prod updates ordered */
    848	wmb();
    849}
    850
    851static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
    852				  struct qed_dev_rdma_info *info)
    853{
    854	struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
    855
    856	memset(info, 0, sizeof(*info));
    857
    858	info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
    859	    QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
    860
    861	info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
    862
    863	qed_fill_dev_info(cdev, &info->common);
    864
    865	return 0;
    866}
    867
    868static int qed_rdma_get_sb_start(struct qed_dev *cdev)
    869{
    870	int feat_num;
    871
    872	if (cdev->num_hwfns > 1)
    873		feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
    874	else
    875		feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
    876			   cdev->num_hwfns;
    877
    878	return feat_num;
    879}
    880
    881static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
    882{
    883	int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
    884	int n_msix = cdev->int_params.rdma_msix_cnt;
    885
    886	return min_t(int, n_cnq, n_msix);
    887}
    888
    889static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
    890{
    891	int limit = 0;
    892
    893	/* Mark the fastpath as free/used */
    894	cdev->int_params.fp_initialized = cnt ? true : false;
    895
    896	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
    897		DP_ERR(cdev,
    898		       "qed roce supports only MSI-X interrupts (detected %d).\n",
    899		       cdev->int_params.out.int_mode);
    900		return -EINVAL;
    901	} else if (cdev->int_params.fp_msix_cnt) {
    902		limit = cdev->int_params.rdma_msix_cnt;
    903	}
    904
    905	if (!limit)
    906		return -ENOMEM;
    907
    908	return min_t(int, cnt, limit);
    909}
    910
    911static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
    912{
    913	memset(info, 0, sizeof(*info));
    914
    915	if (!cdev->int_params.fp_initialized) {
    916		DP_INFO(cdev,
    917			"Protocol driver requested interrupt information, but its support is not yet configured\n");
    918		return -EINVAL;
    919	}
    920
    921	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
    922		int msix_base = cdev->int_params.rdma_msix_base;
    923
    924		info->msix_cnt = cdev->int_params.rdma_msix_cnt;
    925		info->msix = &cdev->int_params.msix_table[msix_base];
    926
    927		DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
    928			   info->msix_cnt, msix_base);
    929	}
    930
    931	return 0;
    932}
    933
    934static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
    935{
    936	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    937	u32 returned_id;
    938	int rc;
    939
    940	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
    941
    942	/* Allocates an unused protection domain */
    943	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    944	rc = qed_rdma_bmap_alloc_id(p_hwfn,
    945				    &p_hwfn->p_rdma_info->pd_map, &returned_id);
    946	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    947
    948	*pd = (u16)returned_id;
    949
    950	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
    951	return rc;
    952}
    953
    954static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
    955{
    956	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    957
    958	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
    959
    960	/* Returns a previously allocated protection domain for reuse */
    961	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    962	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
    963	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    964}
    965
    966static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
    967{
    968	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    969	u32 returned_id;
    970	int rc;
    971
    972	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
    973
    974	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    975	rc = qed_rdma_bmap_alloc_id(p_hwfn,
    976				    &p_hwfn->p_rdma_info->xrcd_map,
    977				    &returned_id);
    978	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    979	if (rc) {
    980		DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
    981		return rc;
    982	}
    983
    984	*xrcd_id = (u16)returned_id;
    985
    986	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
    987	return rc;
    988}
    989
    990static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
    991{
    992	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
    993
    994	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
    995
    996	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
    997	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
    998	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
    999}
   1000
   1001static enum qed_rdma_toggle_bit
   1002qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
   1003{
   1004	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
   1005	enum qed_rdma_toggle_bit toggle_bit;
   1006	u32 bmap_id;
   1007
   1008	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
   1009
   1010	/* the function toggle the bit that is related to a given icid
   1011	 * and returns the new toggle bit's value
   1012	 */
   1013	bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
   1014
   1015	spin_lock_bh(&p_info->lock);
   1016	toggle_bit = !test_and_change_bit(bmap_id,
   1017					  p_info->toggle_bits.bitmap);
   1018	spin_unlock_bh(&p_info->lock);
   1019
   1020	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
   1021		   toggle_bit);
   1022
   1023	return toggle_bit;
   1024}
   1025
   1026static int qed_rdma_create_cq(void *rdma_cxt,
   1027			      struct qed_rdma_create_cq_in_params *params,
   1028			      u16 *icid)
   1029{
   1030	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1031	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
   1032	struct rdma_create_cq_ramrod_data *p_ramrod;
   1033	enum qed_rdma_toggle_bit toggle_bit;
   1034	struct qed_sp_init_data init_data;
   1035	struct qed_spq_entry *p_ent;
   1036	u32 returned_id, start_cid;
   1037	int rc;
   1038
   1039	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
   1040		   params->cq_handle_hi, params->cq_handle_lo);
   1041
   1042	/* Allocate icid */
   1043	spin_lock_bh(&p_info->lock);
   1044	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
   1045	spin_unlock_bh(&p_info->lock);
   1046
   1047	if (rc) {
   1048		DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
   1049		return rc;
   1050	}
   1051
   1052	start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
   1053						p_info->proto);
   1054	*icid = returned_id + start_cid;
   1055
   1056	/* Check if icid requires a page allocation */
   1057	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
   1058	if (rc)
   1059		goto err;
   1060
   1061	/* Get SPQ entry */
   1062	memset(&init_data, 0, sizeof(init_data));
   1063	init_data.cid = *icid;
   1064	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1065	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1066
   1067	/* Send create CQ ramrod */
   1068	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1069				 RDMA_RAMROD_CREATE_CQ,
   1070				 p_info->proto, &init_data);
   1071	if (rc)
   1072		goto err;
   1073
   1074	p_ramrod = &p_ent->ramrod.rdma_create_cq;
   1075
   1076	p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
   1077	p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
   1078	p_ramrod->dpi = cpu_to_le16(params->dpi);
   1079	p_ramrod->is_two_level_pbl = params->pbl_two_level;
   1080	p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
   1081	DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
   1082	p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
   1083	p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
   1084			   params->cnq_id;
   1085	p_ramrod->int_timeout = cpu_to_le16(params->int_timeout);
   1086
   1087	/* toggle the bit for every resize or create cq for a given icid */
   1088	toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
   1089
   1090	p_ramrod->toggle_bit = toggle_bit;
   1091
   1092	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1093	if (rc) {
   1094		/* restore toggle bit */
   1095		qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
   1096		goto err;
   1097	}
   1098
   1099	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
   1100	return rc;
   1101
   1102err:
   1103	/* release allocated icid */
   1104	spin_lock_bh(&p_info->lock);
   1105	qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
   1106	spin_unlock_bh(&p_info->lock);
   1107	DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
   1108
   1109	return rc;
   1110}
   1111
   1112static int
   1113qed_rdma_destroy_cq(void *rdma_cxt,
   1114		    struct qed_rdma_destroy_cq_in_params *in_params,
   1115		    struct qed_rdma_destroy_cq_out_params *out_params)
   1116{
   1117	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1118	struct rdma_destroy_cq_output_params *p_ramrod_res;
   1119	struct rdma_destroy_cq_ramrod_data *p_ramrod;
   1120	struct qed_sp_init_data init_data;
   1121	struct qed_spq_entry *p_ent;
   1122	dma_addr_t ramrod_res_phys;
   1123	enum protocol_type proto;
   1124	int rc = -ENOMEM;
   1125
   1126	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
   1127
   1128	p_ramrod_res =
   1129	    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
   1130			       sizeof(struct rdma_destroy_cq_output_params),
   1131			       &ramrod_res_phys, GFP_KERNEL);
   1132	if (!p_ramrod_res) {
   1133		DP_NOTICE(p_hwfn,
   1134			  "qed destroy cq failed: cannot allocate memory (ramrod)\n");
   1135		return rc;
   1136	}
   1137
   1138	/* Get SPQ entry */
   1139	memset(&init_data, 0, sizeof(init_data));
   1140	init_data.cid = in_params->icid;
   1141	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1142	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1143	proto = p_hwfn->p_rdma_info->proto;
   1144	/* Send destroy CQ ramrod */
   1145	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1146				 RDMA_RAMROD_DESTROY_CQ,
   1147				 proto, &init_data);
   1148	if (rc)
   1149		goto err;
   1150
   1151	p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
   1152	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
   1153
   1154	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1155	if (rc)
   1156		goto err;
   1157
   1158	out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
   1159
   1160	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
   1161			  sizeof(struct rdma_destroy_cq_output_params),
   1162			  p_ramrod_res, ramrod_res_phys);
   1163
   1164	/* Free icid */
   1165	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1166
   1167	qed_bmap_release_id(p_hwfn,
   1168			    &p_hwfn->p_rdma_info->cq_map,
   1169			    (in_params->icid -
   1170			     qed_cxt_get_proto_cid_start(p_hwfn, proto)));
   1171
   1172	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1173
   1174	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
   1175	return rc;
   1176
   1177err:	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
   1178			  sizeof(struct rdma_destroy_cq_output_params),
   1179			  p_ramrod_res, ramrod_res_phys);
   1180
   1181	return rc;
   1182}
   1183
   1184void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac)
   1185{
   1186	p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
   1187	p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
   1188	p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
   1189}
   1190
   1191static int qed_rdma_query_qp(void *rdma_cxt,
   1192			     struct qed_rdma_qp *qp,
   1193			     struct qed_rdma_query_qp_out_params *out_params)
   1194{
   1195	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1196	int rc = 0;
   1197
   1198	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
   1199
   1200	/* The following fields are filled in from qp and not FW as they can't
   1201	 * be modified by FW
   1202	 */
   1203	out_params->mtu = qp->mtu;
   1204	out_params->dest_qp = qp->dest_qp;
   1205	out_params->incoming_atomic_en = qp->incoming_atomic_en;
   1206	out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
   1207	out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
   1208	out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
   1209	out_params->dgid = qp->dgid;
   1210	out_params->flow_label = qp->flow_label;
   1211	out_params->hop_limit_ttl = qp->hop_limit_ttl;
   1212	out_params->traffic_class_tos = qp->traffic_class_tos;
   1213	out_params->timeout = qp->ack_timeout;
   1214	out_params->rnr_retry = qp->rnr_retry_cnt;
   1215	out_params->retry_cnt = qp->retry_cnt;
   1216	out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
   1217	out_params->pkey_index = 0;
   1218	out_params->max_rd_atomic = qp->max_rd_atomic_req;
   1219	out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
   1220	out_params->sqd_async = qp->sqd_async;
   1221
   1222	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
   1223		qed_iwarp_query_qp(qp, out_params);
   1224	else
   1225		rc = qed_roce_query_qp(p_hwfn, qp, out_params);
   1226
   1227	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
   1228	return rc;
   1229}
   1230
   1231static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
   1232{
   1233	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1234	int rc = 0;
   1235
   1236	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
   1237
   1238	if (QED_IS_IWARP_PERSONALITY(p_hwfn))
   1239		rc = qed_iwarp_destroy_qp(p_hwfn, qp);
   1240	else
   1241		rc = qed_roce_destroy_qp(p_hwfn, qp);
   1242
   1243	/* free qp params struct */
   1244	kfree(qp);
   1245
   1246	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
   1247	return rc;
   1248}
   1249
   1250static struct qed_rdma_qp *
   1251qed_rdma_create_qp(void *rdma_cxt,
   1252		   struct qed_rdma_create_qp_in_params *in_params,
   1253		   struct qed_rdma_create_qp_out_params *out_params)
   1254{
   1255	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1256	struct qed_rdma_qp *qp;
   1257	u8 max_stats_queues;
   1258	int rc;
   1259
   1260	if (!rdma_cxt || !in_params || !out_params ||
   1261	    !p_hwfn->p_rdma_info->active) {
   1262		pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
   1263		       rdma_cxt, in_params, out_params);
   1264		return NULL;
   1265	}
   1266
   1267	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1268		   "qed rdma create qp called with qp_handle = %08x%08x\n",
   1269		   in_params->qp_handle_hi, in_params->qp_handle_lo);
   1270
   1271	/* Some sanity checks... */
   1272	max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
   1273	if (in_params->stats_queue >= max_stats_queues) {
   1274		DP_ERR(p_hwfn->cdev,
   1275		       "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
   1276		       in_params->stats_queue, max_stats_queues);
   1277		return NULL;
   1278	}
   1279
   1280	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
   1281		if (in_params->sq_num_pages * sizeof(struct regpair) >
   1282		    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
   1283			DP_NOTICE(p_hwfn->cdev,
   1284				  "Sq num pages: %d exceeds maximum\n",
   1285				  in_params->sq_num_pages);
   1286			return NULL;
   1287		}
   1288		if (in_params->rq_num_pages * sizeof(struct regpair) >
   1289		    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
   1290			DP_NOTICE(p_hwfn->cdev,
   1291				  "Rq num pages: %d exceeds maximum\n",
   1292				  in_params->rq_num_pages);
   1293			return NULL;
   1294		}
   1295	}
   1296
   1297	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
   1298	if (!qp)
   1299		return NULL;
   1300
   1301	qp->cur_state = QED_ROCE_QP_STATE_RESET;
   1302	qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
   1303	qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
   1304	qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
   1305	qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
   1306	qp->use_srq = in_params->use_srq;
   1307	qp->signal_all = in_params->signal_all;
   1308	qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
   1309	qp->pd = in_params->pd;
   1310	qp->dpi = in_params->dpi;
   1311	qp->sq_cq_id = in_params->sq_cq_id;
   1312	qp->sq_num_pages = in_params->sq_num_pages;
   1313	qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
   1314	qp->rq_cq_id = in_params->rq_cq_id;
   1315	qp->rq_num_pages = in_params->rq_num_pages;
   1316	qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
   1317	qp->srq_id = in_params->srq_id;
   1318	qp->req_offloaded = false;
   1319	qp->resp_offloaded = false;
   1320	qp->e2e_flow_control_en = qp->use_srq ? false : true;
   1321	qp->stats_queue = in_params->stats_queue;
   1322	qp->qp_type = in_params->qp_type;
   1323	qp->xrcd_id = in_params->xrcd_id;
   1324
   1325	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
   1326		rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
   1327		qp->qpid = qp->icid;
   1328	} else {
   1329		qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
   1330		rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
   1331		qp->qpid = ((0xFF << 16) | qp->icid);
   1332	}
   1333
   1334	if (rc) {
   1335		kfree(qp);
   1336		return NULL;
   1337	}
   1338
   1339	out_params->icid = qp->icid;
   1340	out_params->qp_id = qp->qpid;
   1341
   1342	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
   1343	return qp;
   1344}
   1345
   1346static int qed_rdma_modify_qp(void *rdma_cxt,
   1347			      struct qed_rdma_qp *qp,
   1348			      struct qed_rdma_modify_qp_in_params *params)
   1349{
   1350	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1351	enum qed_roce_qp_state prev_state;
   1352	int rc = 0;
   1353
   1354	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
   1355		   qp->icid, params->new_state);
   1356
   1357	if (rc) {
   1358		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
   1359		return rc;
   1360	}
   1361
   1362	if (GET_FIELD(params->modify_flags,
   1363		      QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
   1364		qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
   1365		qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
   1366		qp->incoming_atomic_en = params->incoming_atomic_en;
   1367	}
   1368
   1369	/* Update QP structure with the updated values */
   1370	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
   1371		qp->roce_mode = params->roce_mode;
   1372	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
   1373		qp->pkey = params->pkey;
   1374	if (GET_FIELD(params->modify_flags,
   1375		      QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
   1376		qp->e2e_flow_control_en = params->e2e_flow_control_en;
   1377	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
   1378		qp->dest_qp = params->dest_qp;
   1379	if (GET_FIELD(params->modify_flags,
   1380		      QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
   1381		/* Indicates that the following parameters have changed:
   1382		 * Traffic class, flow label, hop limit, source GID,
   1383		 * destination GID, loopback indicator
   1384		 */
   1385		qp->traffic_class_tos = params->traffic_class_tos;
   1386		qp->flow_label = params->flow_label;
   1387		qp->hop_limit_ttl = params->hop_limit_ttl;
   1388
   1389		qp->sgid = params->sgid;
   1390		qp->dgid = params->dgid;
   1391		qp->udp_src_port = 0;
   1392		qp->vlan_id = params->vlan_id;
   1393		qp->mtu = params->mtu;
   1394		qp->lb_indication = params->lb_indication;
   1395		memcpy((u8 *)&qp->remote_mac_addr[0],
   1396		       (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
   1397		if (params->use_local_mac) {
   1398			memcpy((u8 *)&qp->local_mac_addr[0],
   1399			       (u8 *)&params->local_mac_addr[0], ETH_ALEN);
   1400		} else {
   1401			memcpy((u8 *)&qp->local_mac_addr[0],
   1402			       (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
   1403		}
   1404	}
   1405	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
   1406		qp->rq_psn = params->rq_psn;
   1407	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
   1408		qp->sq_psn = params->sq_psn;
   1409	if (GET_FIELD(params->modify_flags,
   1410		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
   1411		qp->max_rd_atomic_req = params->max_rd_atomic_req;
   1412	if (GET_FIELD(params->modify_flags,
   1413		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
   1414		qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
   1415	if (GET_FIELD(params->modify_flags,
   1416		      QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
   1417		qp->ack_timeout = params->ack_timeout;
   1418	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
   1419		qp->retry_cnt = params->retry_cnt;
   1420	if (GET_FIELD(params->modify_flags,
   1421		      QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
   1422		qp->rnr_retry_cnt = params->rnr_retry_cnt;
   1423	if (GET_FIELD(params->modify_flags,
   1424		      QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
   1425		qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
   1426
   1427	qp->sqd_async = params->sqd_async;
   1428
   1429	prev_state = qp->cur_state;
   1430	if (GET_FIELD(params->modify_flags,
   1431		      QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
   1432		qp->cur_state = params->new_state;
   1433		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
   1434			   qp->cur_state);
   1435	}
   1436
   1437	switch (qp->qp_type) {
   1438	case QED_RDMA_QP_TYPE_XRC_INI:
   1439		qp->has_req = true;
   1440		break;
   1441	case QED_RDMA_QP_TYPE_XRC_TGT:
   1442		qp->has_resp = true;
   1443		break;
   1444	default:
   1445		qp->has_req  = true;
   1446		qp->has_resp = true;
   1447	}
   1448
   1449	if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
   1450		enum qed_iwarp_qp_state new_state =
   1451		    qed_roce2iwarp_state(qp->cur_state);
   1452
   1453		rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
   1454	} else {
   1455		rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
   1456	}
   1457
   1458	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
   1459	return rc;
   1460}
   1461
   1462static int
   1463qed_rdma_register_tid(void *rdma_cxt,
   1464		      struct qed_rdma_register_tid_in_params *params)
   1465{
   1466	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1467	struct rdma_register_tid_ramrod_data *p_ramrod;
   1468	struct qed_sp_init_data init_data;
   1469	struct qed_spq_entry *p_ent;
   1470	enum rdma_tid_type tid_type;
   1471	u8 fw_return_code;
   1472	u16 flags = 0;
   1473	int rc;
   1474
   1475	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
   1476
   1477	/* Get SPQ entry */
   1478	memset(&init_data, 0, sizeof(init_data));
   1479	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1480	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1481
   1482	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
   1483				 p_hwfn->p_rdma_info->proto, &init_data);
   1484	if (rc) {
   1485		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
   1486		return rc;
   1487	}
   1488
   1489	if (p_hwfn->p_rdma_info->last_tid < params->itid)
   1490		p_hwfn->p_rdma_info->last_tid = params->itid;
   1491
   1492	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
   1493		  params->pbl_two_level);
   1494
   1495	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
   1496		  false);
   1497
   1498	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
   1499
   1500	/* Don't initialize D/C field, as it may override other bits. */
   1501	if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
   1502		SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
   1503			  params->page_size_log - 12);
   1504
   1505	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
   1506		  params->remote_read);
   1507
   1508	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
   1509		  params->remote_write);
   1510
   1511	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
   1512		  params->remote_atomic);
   1513
   1514	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
   1515		  params->local_write);
   1516
   1517	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
   1518		  params->local_read);
   1519
   1520	SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
   1521		  params->mw_bind);
   1522
   1523	p_ramrod = &p_ent->ramrod.rdma_register_tid;
   1524	p_ramrod->flags = cpu_to_le16(flags);
   1525
   1526	SET_FIELD(p_ramrod->flags1,
   1527		  RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
   1528		  params->pbl_page_size_log - 12);
   1529
   1530	SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
   1531		  params->dma_mr);
   1532
   1533	switch (params->tid_type) {
   1534	case QED_RDMA_TID_REGISTERED_MR:
   1535		tid_type = RDMA_TID_REGISTERED_MR;
   1536		break;
   1537	case QED_RDMA_TID_FMR:
   1538		tid_type = RDMA_TID_FMR;
   1539		break;
   1540	case QED_RDMA_TID_MW:
   1541		tid_type = RDMA_TID_MW;
   1542		break;
   1543	default:
   1544		rc = -EINVAL;
   1545		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
   1546		qed_sp_destroy_request(p_hwfn, p_ent);
   1547		return rc;
   1548	}
   1549
   1550	SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
   1551		  tid_type);
   1552
   1553	p_ramrod->itid = cpu_to_le32(params->itid);
   1554	p_ramrod->key = params->key;
   1555	p_ramrod->pd = cpu_to_le16(params->pd);
   1556	p_ramrod->length_hi = (u8)(params->length >> 32);
   1557	p_ramrod->length_lo = DMA_LO_LE(params->length);
   1558	DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
   1559	DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
   1560
   1561	/* DIF */
   1562	if (params->dif_enabled) {
   1563		SET_FIELD(p_ramrod->flags2,
   1564			  RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
   1565		DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
   1566			       params->dif_error_addr);
   1567	}
   1568
   1569	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
   1570	if (rc)
   1571		return rc;
   1572
   1573	if (fw_return_code != RDMA_RETURN_OK) {
   1574		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
   1575		return -EINVAL;
   1576	}
   1577
   1578	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
   1579	return rc;
   1580}
   1581
   1582static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
   1583{
   1584	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1585	struct rdma_deregister_tid_ramrod_data *p_ramrod;
   1586	struct qed_sp_init_data init_data;
   1587	struct qed_spq_entry *p_ent;
   1588	struct qed_ptt *p_ptt;
   1589	u8 fw_return_code;
   1590	int rc;
   1591
   1592	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
   1593
   1594	/* Get SPQ entry */
   1595	memset(&init_data, 0, sizeof(init_data));
   1596	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1597	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1598
   1599	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
   1600				 p_hwfn->p_rdma_info->proto, &init_data);
   1601	if (rc) {
   1602		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
   1603		return rc;
   1604	}
   1605
   1606	p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
   1607	p_ramrod->itid = cpu_to_le32(itid);
   1608
   1609	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
   1610	if (rc) {
   1611		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
   1612		return rc;
   1613	}
   1614
   1615	if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
   1616		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
   1617		return -EINVAL;
   1618	} else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
   1619		/* Bit indicating that the TID is in use and a nig drain is
   1620		 * required before sending the ramrod again
   1621		 */
   1622		p_ptt = qed_ptt_acquire(p_hwfn);
   1623		if (!p_ptt) {
   1624			rc = -EBUSY;
   1625			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1626				   "Failed to acquire PTT\n");
   1627			return rc;
   1628		}
   1629
   1630		rc = qed_mcp_drain(p_hwfn, p_ptt);
   1631		if (rc) {
   1632			qed_ptt_release(p_hwfn, p_ptt);
   1633			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1634				   "Drain failed\n");
   1635			return rc;
   1636		}
   1637
   1638		qed_ptt_release(p_hwfn, p_ptt);
   1639
   1640		/* Resend the ramrod */
   1641		rc = qed_sp_init_request(p_hwfn, &p_ent,
   1642					 RDMA_RAMROD_DEREGISTER_MR,
   1643					 p_hwfn->p_rdma_info->proto,
   1644					 &init_data);
   1645		if (rc) {
   1646			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1647				   "Failed to init sp-element\n");
   1648			return rc;
   1649		}
   1650
   1651		rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
   1652		if (rc) {
   1653			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1654				   "Ramrod failed\n");
   1655			return rc;
   1656		}
   1657
   1658		if (fw_return_code != RDMA_RETURN_OK) {
   1659			DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
   1660				  fw_return_code);
   1661			return rc;
   1662		}
   1663	}
   1664
   1665	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
   1666	return rc;
   1667}
   1668
   1669static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
   1670{
   1671	return QED_AFFIN_HWFN(cdev);
   1672}
   1673
   1674static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
   1675					      bool is_xrc)
   1676{
   1677	if (is_xrc)
   1678		return &p_hwfn->p_rdma_info->xrc_srq_map;
   1679
   1680	return &p_hwfn->p_rdma_info->srq_map;
   1681}
   1682
   1683static int qed_rdma_modify_srq(void *rdma_cxt,
   1684			       struct qed_rdma_modify_srq_in_params *in_params)
   1685{
   1686	struct rdma_srq_modify_ramrod_data *p_ramrod;
   1687	struct qed_sp_init_data init_data = {};
   1688	struct qed_hwfn *p_hwfn = rdma_cxt;
   1689	struct qed_spq_entry *p_ent;
   1690	u16 opaque_fid;
   1691	int rc;
   1692
   1693	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
   1694	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1695
   1696	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1697				 RDMA_RAMROD_MODIFY_SRQ,
   1698				 p_hwfn->p_rdma_info->proto, &init_data);
   1699	if (rc)
   1700		return rc;
   1701
   1702	p_ramrod = &p_ent->ramrod.rdma_modify_srq;
   1703	p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
   1704	opaque_fid = p_hwfn->hw_info.opaque_fid;
   1705	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
   1706	p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
   1707
   1708	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1709	if (rc)
   1710		return rc;
   1711
   1712	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
   1713		   in_params->srq_id, in_params->is_xrc);
   1714
   1715	return rc;
   1716}
   1717
   1718static int
   1719qed_rdma_destroy_srq(void *rdma_cxt,
   1720		     struct qed_rdma_destroy_srq_in_params *in_params)
   1721{
   1722	struct rdma_srq_destroy_ramrod_data *p_ramrod;
   1723	struct qed_sp_init_data init_data = {};
   1724	struct qed_hwfn *p_hwfn = rdma_cxt;
   1725	struct qed_spq_entry *p_ent;
   1726	struct qed_bmap *bmap;
   1727	u16 opaque_fid;
   1728	u16 offset;
   1729	int rc;
   1730
   1731	opaque_fid = p_hwfn->hw_info.opaque_fid;
   1732
   1733	init_data.opaque_fid = opaque_fid;
   1734	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1735
   1736	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1737				 RDMA_RAMROD_DESTROY_SRQ,
   1738				 p_hwfn->p_rdma_info->proto, &init_data);
   1739	if (rc)
   1740		return rc;
   1741
   1742	p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
   1743	p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
   1744	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
   1745
   1746	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1747	if (rc)
   1748		return rc;
   1749
   1750	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
   1751	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
   1752
   1753	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1754	qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
   1755	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1756
   1757	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1758		   "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
   1759		   in_params->srq_id, in_params->is_xrc);
   1760
   1761	return rc;
   1762}
   1763
   1764static int
   1765qed_rdma_create_srq(void *rdma_cxt,
   1766		    struct qed_rdma_create_srq_in_params *in_params,
   1767		    struct qed_rdma_create_srq_out_params *out_params)
   1768{
   1769	struct rdma_srq_create_ramrod_data *p_ramrod;
   1770	struct qed_sp_init_data init_data = {};
   1771	struct qed_hwfn *p_hwfn = rdma_cxt;
   1772	enum qed_cxt_elem_type elem_type;
   1773	struct qed_spq_entry *p_ent;
   1774	u16 opaque_fid, srq_id;
   1775	struct qed_bmap *bmap;
   1776	u32 returned_id;
   1777	u16 offset;
   1778	int rc;
   1779
   1780	bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
   1781	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1782	rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
   1783	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1784
   1785	if (rc) {
   1786		DP_NOTICE(p_hwfn,
   1787			  "failed to allocate xrc/srq id (is_xrc=%u)\n",
   1788			  in_params->is_xrc);
   1789		return rc;
   1790	}
   1791
   1792	elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
   1793	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
   1794	if (rc)
   1795		goto err;
   1796
   1797	opaque_fid = p_hwfn->hw_info.opaque_fid;
   1798
   1799	opaque_fid = p_hwfn->hw_info.opaque_fid;
   1800	init_data.opaque_fid = opaque_fid;
   1801	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
   1802
   1803	rc = qed_sp_init_request(p_hwfn, &p_ent,
   1804				 RDMA_RAMROD_CREATE_SRQ,
   1805				 p_hwfn->p_rdma_info->proto, &init_data);
   1806	if (rc)
   1807		goto err;
   1808
   1809	p_ramrod = &p_ent->ramrod.rdma_create_srq;
   1810	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
   1811	p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
   1812	p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
   1813	p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
   1814	p_ramrod->page_size = cpu_to_le16(in_params->page_size);
   1815	DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
   1816	offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
   1817	srq_id = (u16)returned_id + offset;
   1818	p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
   1819
   1820	if (in_params->is_xrc) {
   1821		SET_FIELD(p_ramrod->flags,
   1822			  RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
   1823		SET_FIELD(p_ramrod->flags,
   1824			  RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
   1825			  in_params->reserved_key_en);
   1826		p_ramrod->xrc_srq_cq_cid =
   1827			cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
   1828				     in_params->cq_cid);
   1829		p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
   1830	}
   1831	rc = qed_spq_post(p_hwfn, p_ent, NULL);
   1832	if (rc)
   1833		goto err;
   1834
   1835	out_params->srq_id = srq_id;
   1836
   1837	DP_VERBOSE(p_hwfn,
   1838		   QED_MSG_RDMA,
   1839		   "XRC/SRQ created Id = %x (is_xrc=%u)\n",
   1840		   out_params->srq_id, in_params->is_xrc);
   1841	return rc;
   1842
   1843err:
   1844	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1845	qed_bmap_release_id(p_hwfn, bmap, returned_id);
   1846	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1847
   1848	return rc;
   1849}
   1850
   1851bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
   1852{
   1853	bool result;
   1854
   1855	/* if rdma wasn't activated yet, naturally there are no qps */
   1856	if (!p_hwfn->p_rdma_info->active)
   1857		return false;
   1858
   1859	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1860	if (!p_hwfn->p_rdma_info->cid_map.bitmap)
   1861		result = false;
   1862	else
   1863		result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
   1864	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1865	return result;
   1866}
   1867
   1868void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
   1869{
   1870	u32 val;
   1871
   1872	val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
   1873
   1874	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
   1875	DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
   1876		   "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
   1877		   val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
   1878}
   1879
   1880void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
   1881{
   1882	p_hwfn->db_bar_no_edpm = true;
   1883
   1884	qed_rdma_dpm_conf(p_hwfn, p_ptt);
   1885}
   1886
   1887static int qed_rdma_start(void *rdma_cxt,
   1888			  struct qed_rdma_start_in_params *params)
   1889{
   1890	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1891	struct qed_ptt *p_ptt;
   1892	int rc = -EBUSY;
   1893
   1894	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
   1895		   "desired_cnq = %08x\n", params->desired_cnq);
   1896
   1897	p_ptt = qed_ptt_acquire(p_hwfn);
   1898	if (!p_ptt)
   1899		goto err;
   1900
   1901	rc = qed_rdma_alloc(p_hwfn);
   1902	if (rc)
   1903		goto err1;
   1904
   1905	rc = qed_rdma_setup(p_hwfn, p_ptt, params);
   1906	if (rc)
   1907		goto err2;
   1908
   1909	qed_ptt_release(p_hwfn, p_ptt);
   1910	p_hwfn->p_rdma_info->active = 1;
   1911
   1912	return rc;
   1913
   1914err2:
   1915	qed_rdma_free(p_hwfn);
   1916err1:
   1917	qed_ptt_release(p_hwfn, p_ptt);
   1918err:
   1919	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
   1920	return rc;
   1921}
   1922
   1923static int qed_rdma_init(struct qed_dev *cdev,
   1924			 struct qed_rdma_start_in_params *params)
   1925{
   1926	return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
   1927}
   1928
   1929static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
   1930{
   1931	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
   1932
   1933	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
   1934
   1935	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
   1936	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
   1937	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
   1938}
   1939
   1940static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
   1941				       u8 *old_mac_address,
   1942				       const u8 *new_mac_address)
   1943{
   1944	int rc = 0;
   1945
   1946	if (old_mac_address)
   1947		qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
   1948	if (new_mac_address)
   1949		rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
   1950
   1951	if (rc)
   1952		DP_ERR(cdev,
   1953		       "qed roce ll2 mac filter set: failed to add MAC filter\n");
   1954
   1955	return rc;
   1956}
   1957
   1958static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
   1959{
   1960	enum qed_eng eng;
   1961	u8 ppfid = 0;
   1962	int rc;
   1963
   1964	/* Make sure iwarp cmt mode is enabled before setting affinity */
   1965	if (!cdev->iwarp_cmt)
   1966		return -EINVAL;
   1967
   1968	if (b_reset)
   1969		eng = QED_BOTH_ENG;
   1970	else
   1971		eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
   1972
   1973	rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
   1974	if (rc) {
   1975		DP_NOTICE(cdev,
   1976			  "Failed to set the engine affinity of ppfid %d\n",
   1977			  ppfid);
   1978		return rc;
   1979	}
   1980
   1981	DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
   1982		   "LLH: Set the engine affinity of non-RoCE packets as %d\n",
   1983		   eng);
   1984
   1985	return 0;
   1986}
   1987
   1988static const struct qed_rdma_ops qed_rdma_ops_pass = {
   1989	.common = &qed_common_ops_pass,
   1990	.fill_dev_info = &qed_fill_rdma_dev_info,
   1991	.rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
   1992	.rdma_init = &qed_rdma_init,
   1993	.rdma_add_user = &qed_rdma_add_user,
   1994	.rdma_remove_user = &qed_rdma_remove_user,
   1995	.rdma_stop = &qed_rdma_stop,
   1996	.rdma_query_port = &qed_rdma_query_port,
   1997	.rdma_query_device = &qed_rdma_query_device,
   1998	.rdma_get_start_sb = &qed_rdma_get_sb_start,
   1999	.rdma_get_rdma_int = &qed_rdma_get_int,
   2000	.rdma_set_rdma_int = &qed_rdma_set_int,
   2001	.rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
   2002	.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
   2003	.rdma_alloc_pd = &qed_rdma_alloc_pd,
   2004	.rdma_dealloc_pd = &qed_rdma_free_pd,
   2005	.rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
   2006	.rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
   2007	.rdma_create_cq = &qed_rdma_create_cq,
   2008	.rdma_destroy_cq = &qed_rdma_destroy_cq,
   2009	.rdma_create_qp = &qed_rdma_create_qp,
   2010	.rdma_modify_qp = &qed_rdma_modify_qp,
   2011	.rdma_query_qp = &qed_rdma_query_qp,
   2012	.rdma_destroy_qp = &qed_rdma_destroy_qp,
   2013	.rdma_alloc_tid = &qed_rdma_alloc_tid,
   2014	.rdma_free_tid = &qed_rdma_free_tid,
   2015	.rdma_register_tid = &qed_rdma_register_tid,
   2016	.rdma_deregister_tid = &qed_rdma_deregister_tid,
   2017	.rdma_create_srq = &qed_rdma_create_srq,
   2018	.rdma_modify_srq = &qed_rdma_modify_srq,
   2019	.rdma_destroy_srq = &qed_rdma_destroy_srq,
   2020	.ll2_acquire_connection = &qed_ll2_acquire_connection,
   2021	.ll2_establish_connection = &qed_ll2_establish_connection,
   2022	.ll2_terminate_connection = &qed_ll2_terminate_connection,
   2023	.ll2_release_connection = &qed_ll2_release_connection,
   2024	.ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
   2025	.ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
   2026	.ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
   2027	.ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
   2028	.ll2_get_stats = &qed_ll2_get_stats,
   2029	.iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
   2030	.iwarp_connect = &qed_iwarp_connect,
   2031	.iwarp_create_listen = &qed_iwarp_create_listen,
   2032	.iwarp_destroy_listen = &qed_iwarp_destroy_listen,
   2033	.iwarp_accept = &qed_iwarp_accept,
   2034	.iwarp_reject = &qed_iwarp_reject,
   2035	.iwarp_send_rtr = &qed_iwarp_send_rtr,
   2036};
   2037
   2038const struct qed_rdma_ops *qed_get_rdma_ops(void)
   2039{
   2040	return &qed_rdma_ops_pass;
   2041}
   2042EXPORT_SYMBOL(qed_get_rdma_ops);