cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

conn.c (28559B)


      1/*
      2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 *
     32 */
     33
     34#include <net/addrconf.h>
     35#include <linux/etherdevice.h>
     36#include <linux/mlx5/vport.h>
     37
     38#include "mlx5_core.h"
     39#include "lib/mlx5.h"
     40#include "fpga/conn.h"
     41
     42#define MLX5_FPGA_PKEY 0xFFFF
     43#define MLX5_FPGA_PKEY_INDEX 0 /* RoCE PKEY 0xFFFF is always at index 0 */
     44#define MLX5_FPGA_RECV_SIZE 2048
     45#define MLX5_FPGA_PORT_NUM 1
     46#define MLX5_FPGA_CQ_BUDGET 64
     47
     48static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
     49				  struct mlx5_fpga_dma_buf *buf)
     50{
     51	struct device *dma_device;
     52	int err = 0;
     53
     54	if (unlikely(!buf->sg[0].data))
     55		goto out;
     56
     57	dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
     58	buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
     59					     buf->sg[0].size, buf->dma_dir);
     60	err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
     61	if (unlikely(err)) {
     62		mlx5_fpga_warn(conn->fdev, "DMA error on sg 0: %d\n", err);
     63		err = -ENOMEM;
     64		goto out;
     65	}
     66
     67	if (!buf->sg[1].data)
     68		goto out;
     69
     70	buf->sg[1].dma_addr = dma_map_single(dma_device, buf->sg[1].data,
     71					     buf->sg[1].size, buf->dma_dir);
     72	err = dma_mapping_error(dma_device, buf->sg[1].dma_addr);
     73	if (unlikely(err)) {
     74		mlx5_fpga_warn(conn->fdev, "DMA error on sg 1: %d\n", err);
     75		dma_unmap_single(dma_device, buf->sg[0].dma_addr,
     76				 buf->sg[0].size, buf->dma_dir);
     77		err = -ENOMEM;
     78	}
     79
     80out:
     81	return err;
     82}
     83
     84static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
     85				     struct mlx5_fpga_dma_buf *buf)
     86{
     87	struct device *dma_device;
     88
     89	dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
     90	if (buf->sg[1].data)
     91		dma_unmap_single(dma_device, buf->sg[1].dma_addr,
     92				 buf->sg[1].size, buf->dma_dir);
     93
     94	if (likely(buf->sg[0].data))
     95		dma_unmap_single(dma_device, buf->sg[0].dma_addr,
     96				 buf->sg[0].size, buf->dma_dir);
     97}
     98
     99static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
    100				    struct mlx5_fpga_dma_buf *buf)
    101{
    102	struct mlx5_wqe_data_seg *data;
    103	unsigned int ix;
    104	int err = 0;
    105
    106	err = mlx5_fpga_conn_map_buf(conn, buf);
    107	if (unlikely(err))
    108		goto out;
    109
    110	if (unlikely(conn->qp.rq.pc - conn->qp.rq.cc >= conn->qp.rq.size)) {
    111		mlx5_fpga_conn_unmap_buf(conn, buf);
    112		return -EBUSY;
    113	}
    114
    115	ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
    116	data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
    117	data->byte_count = cpu_to_be32(buf->sg[0].size);
    118	data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
    119	data->addr = cpu_to_be64(buf->sg[0].dma_addr);
    120
    121	conn->qp.rq.pc++;
    122	conn->qp.rq.bufs[ix] = buf;
    123
    124	/* Make sure that descriptors are written before doorbell record. */
    125	dma_wmb();
    126	*conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff);
    127out:
    128	return err;
    129}
    130
    131static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
    132{
    133	/* ensure wqe is visible to device before updating doorbell record */
    134	dma_wmb();
    135	*conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
    136	/* Make sure that doorbell record is visible before ringing */
    137	wmb();
    138	mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
    139}
    140
    141static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
    142				     struct mlx5_fpga_dma_buf *buf)
    143{
    144	struct mlx5_wqe_ctrl_seg *ctrl;
    145	struct mlx5_wqe_data_seg *data;
    146	unsigned int ix, sgi;
    147	int size = 1;
    148
    149	ix = conn->qp.sq.pc & (conn->qp.sq.size - 1);
    150
    151	ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix);
    152	data = (void *)(ctrl + 1);
    153
    154	for (sgi = 0; sgi < ARRAY_SIZE(buf->sg); sgi++) {
    155		if (!buf->sg[sgi].data)
    156			break;
    157		data->byte_count = cpu_to_be32(buf->sg[sgi].size);
    158		data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
    159		data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
    160		data++;
    161		size++;
    162	}
    163
    164	ctrl->imm = 0;
    165	ctrl->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
    166	ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) |
    167					     MLX5_OPCODE_SEND);
    168	ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8));
    169
    170	conn->qp.sq.pc++;
    171	conn->qp.sq.bufs[ix] = buf;
    172	mlx5_fpga_conn_notify_hw(conn, ctrl);
    173}
    174
    175int mlx5_fpga_conn_send(struct mlx5_fpga_conn *conn,
    176			struct mlx5_fpga_dma_buf *buf)
    177{
    178	unsigned long flags;
    179	int err;
    180
    181	if (!conn->qp.active)
    182		return -ENOTCONN;
    183
    184	buf->dma_dir = DMA_TO_DEVICE;
    185	err = mlx5_fpga_conn_map_buf(conn, buf);
    186	if (err)
    187		return err;
    188
    189	spin_lock_irqsave(&conn->qp.sq.lock, flags);
    190
    191	if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) {
    192		list_add_tail(&buf->list, &conn->qp.sq.backlog);
    193		goto out_unlock;
    194	}
    195
    196	mlx5_fpga_conn_post_send(conn, buf);
    197
    198out_unlock:
    199	spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
    200	return err;
    201}
    202
    203static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
    204{
    205	struct mlx5_fpga_dma_buf *buf;
    206	int err;
    207
    208	buf = kzalloc(sizeof(*buf) + MLX5_FPGA_RECV_SIZE, 0);
    209	if (!buf)
    210		return -ENOMEM;
    211
    212	buf->sg[0].data = (void *)(buf + 1);
    213	buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
    214	buf->dma_dir = DMA_FROM_DEVICE;
    215
    216	err = mlx5_fpga_conn_post_recv(conn, buf);
    217	if (err)
    218		kfree(buf);
    219
    220	return err;
    221}
    222
    223static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
    224				      u32 *mkey)
    225{
    226	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
    227	void *mkc;
    228	u32 *in;
    229	int err;
    230
    231	in = kvzalloc(inlen, GFP_KERNEL);
    232	if (!in)
    233		return -ENOMEM;
    234
    235	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
    236	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
    237	MLX5_SET(mkc, mkc, lw, 1);
    238	MLX5_SET(mkc, mkc, lr, 1);
    239
    240	MLX5_SET(mkc, mkc, pd, pdn);
    241	MLX5_SET(mkc, mkc, length64, 1);
    242	MLX5_SET(mkc, mkc, qpn, 0xffffff);
    243
    244	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
    245
    246	kvfree(in);
    247	return err;
    248}
    249
    250static void mlx5_fpga_conn_rq_cqe(struct mlx5_fpga_conn *conn,
    251				  struct mlx5_cqe64 *cqe, u8 status)
    252{
    253	struct mlx5_fpga_dma_buf *buf;
    254	int ix, err;
    255
    256	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
    257	buf = conn->qp.rq.bufs[ix];
    258	conn->qp.rq.bufs[ix] = NULL;
    259	conn->qp.rq.cc++;
    260
    261	if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
    262		mlx5_fpga_warn(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
    263			       buf, conn->fpga_qpn, status);
    264	else
    265		mlx5_fpga_dbg(conn->fdev, "RQ buf %p on FPGA QP %u completion status %d\n",
    266			      buf, conn->fpga_qpn, status);
    267
    268	mlx5_fpga_conn_unmap_buf(conn, buf);
    269
    270	if (unlikely(status || !conn->qp.active)) {
    271		conn->qp.active = false;
    272		kfree(buf);
    273		return;
    274	}
    275
    276	buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
    277	mlx5_fpga_dbg(conn->fdev, "Message with %u bytes received successfully\n",
    278		      buf->sg[0].size);
    279	conn->recv_cb(conn->cb_arg, buf);
    280
    281	buf->sg[0].size = MLX5_FPGA_RECV_SIZE;
    282	err = mlx5_fpga_conn_post_recv(conn, buf);
    283	if (unlikely(err)) {
    284		mlx5_fpga_warn(conn->fdev,
    285			       "Failed to re-post recv buf: %d\n", err);
    286		kfree(buf);
    287	}
    288}
    289
    290static void mlx5_fpga_conn_sq_cqe(struct mlx5_fpga_conn *conn,
    291				  struct mlx5_cqe64 *cqe, u8 status)
    292{
    293	struct mlx5_fpga_dma_buf *buf, *nextbuf;
    294	unsigned long flags;
    295	int ix;
    296
    297	spin_lock_irqsave(&conn->qp.sq.lock, flags);
    298
    299	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
    300	buf = conn->qp.sq.bufs[ix];
    301	conn->qp.sq.bufs[ix] = NULL;
    302	conn->qp.sq.cc++;
    303
    304	/* Handle backlog still under the spinlock to ensure message post order */
    305	if (unlikely(!list_empty(&conn->qp.sq.backlog))) {
    306		if (likely(conn->qp.active)) {
    307			nextbuf = list_first_entry(&conn->qp.sq.backlog,
    308						   struct mlx5_fpga_dma_buf, list);
    309			list_del(&nextbuf->list);
    310			mlx5_fpga_conn_post_send(conn, nextbuf);
    311		}
    312	}
    313
    314	spin_unlock_irqrestore(&conn->qp.sq.lock, flags);
    315
    316	if (unlikely(status && (status != MLX5_CQE_SYNDROME_WR_FLUSH_ERR)))
    317		mlx5_fpga_warn(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
    318			       buf, conn->fpga_qpn, status);
    319	else
    320		mlx5_fpga_dbg(conn->fdev, "SQ buf %p on FPGA QP %u completion status %d\n",
    321			      buf, conn->fpga_qpn, status);
    322
    323	mlx5_fpga_conn_unmap_buf(conn, buf);
    324
    325	if (likely(buf->complete))
    326		buf->complete(conn, conn->fdev, buf, status);
    327
    328	if (unlikely(status))
    329		conn->qp.active = false;
    330}
    331
    332static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
    333				      struct mlx5_cqe64 *cqe)
    334{
    335	u8 opcode, status = 0;
    336
    337	opcode = get_cqe_opcode(cqe);
    338
    339	switch (opcode) {
    340	case MLX5_CQE_REQ_ERR:
    341		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
    342		fallthrough;
    343	case MLX5_CQE_REQ:
    344		mlx5_fpga_conn_sq_cqe(conn, cqe, status);
    345		break;
    346
    347	case MLX5_CQE_RESP_ERR:
    348		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
    349		fallthrough;
    350	case MLX5_CQE_RESP_SEND:
    351		mlx5_fpga_conn_rq_cqe(conn, cqe, status);
    352		break;
    353	default:
    354		mlx5_fpga_warn(conn->fdev, "Unexpected cqe opcode %u\n",
    355			       opcode);
    356	}
    357}
    358
    359static void mlx5_fpga_conn_arm_cq(struct mlx5_fpga_conn *conn)
    360{
    361	mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT,
    362		    conn->fdev->conn_res.uar->map, conn->cq.wq.cc);
    363}
    364
    365static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
    366				       unsigned int budget)
    367{
    368	struct mlx5_cqe64 *cqe;
    369
    370	while (budget) {
    371		cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
    372		if (!cqe)
    373			break;
    374
    375		budget--;
    376		mlx5_cqwq_pop(&conn->cq.wq);
    377		mlx5_fpga_conn_handle_cqe(conn, cqe);
    378		mlx5_cqwq_update_db_record(&conn->cq.wq);
    379	}
    380	if (!budget) {
    381		tasklet_schedule(&conn->cq.tasklet);
    382		return;
    383	}
    384
    385	mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc);
    386	/* ensure cq space is freed before enabling more cqes */
    387	wmb();
    388	mlx5_fpga_conn_arm_cq(conn);
    389}
    390
    391static void mlx5_fpga_conn_cq_tasklet(struct tasklet_struct *t)
    392{
    393	struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
    394
    395	if (unlikely(!conn->qp.active))
    396		return;
    397	mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
    398}
    399
    400static void mlx5_fpga_conn_cq_complete(struct mlx5_core_cq *mcq,
    401				       struct mlx5_eqe *eqe)
    402{
    403	struct mlx5_fpga_conn *conn;
    404
    405	conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq);
    406	if (unlikely(!conn->qp.active))
    407		return;
    408	mlx5_fpga_conn_cqes(conn, MLX5_FPGA_CQ_BUDGET);
    409}
    410
    411static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
    412{
    413	struct mlx5_fpga_device *fdev = conn->fdev;
    414	struct mlx5_core_dev *mdev = fdev->mdev;
    415	u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {0};
    416	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
    417	struct mlx5_wq_param wqp;
    418	struct mlx5_cqe64 *cqe;
    419	int inlen, err, eqn;
    420	void *cqc, *in;
    421	__be64 *pas;
    422	u32 i;
    423
    424	cq_size = roundup_pow_of_two(cq_size);
    425	MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size));
    426
    427	wqp.buf_numa_node = mdev->priv.numa_node;
    428	wqp.db_numa_node  = mdev->priv.numa_node;
    429
    430	err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq,
    431			       &conn->cq.wq_ctrl);
    432	if (err)
    433		return err;
    434
    435	for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) {
    436		cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
    437		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
    438	}
    439
    440	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
    441		sizeof(u64) * conn->cq.wq_ctrl.buf.npages;
    442	in = kvzalloc(inlen, GFP_KERNEL);
    443	if (!in) {
    444		err = -ENOMEM;
    445		goto err_cqwq;
    446	}
    447
    448	err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn);
    449	if (err) {
    450		kvfree(in);
    451		goto err_cqwq;
    452	}
    453
    454	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
    455	MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
    456	MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
    457	MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index);
    458	MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift -
    459			   MLX5_ADAPTER_PAGE_SHIFT);
    460	MLX5_SET64(cqc, cqc, dbr_addr, conn->cq.wq_ctrl.db.dma);
    461
    462	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
    463	mlx5_fill_page_frag_array(&conn->cq.wq_ctrl.buf, pas);
    464
    465	err = mlx5_core_create_cq(mdev, &conn->cq.mcq, in, inlen, out, sizeof(out));
    466	kvfree(in);
    467
    468	if (err)
    469		goto err_cqwq;
    470
    471	conn->cq.mcq.cqe_sz     = 64;
    472	conn->cq.mcq.set_ci_db  = conn->cq.wq_ctrl.db.db;
    473	conn->cq.mcq.arm_db     = conn->cq.wq_ctrl.db.db + 1;
    474	*conn->cq.mcq.set_ci_db = 0;
    475	*conn->cq.mcq.arm_db    = 0;
    476	conn->cq.mcq.vector     = 0;
    477	conn->cq.mcq.comp       = mlx5_fpga_conn_cq_complete;
    478	conn->cq.mcq.uar        = fdev->conn_res.uar;
    479	tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
    480
    481	mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
    482
    483	goto out;
    484
    485err_cqwq:
    486	mlx5_wq_destroy(&conn->cq.wq_ctrl);
    487out:
    488	return err;
    489}
    490
    491static void mlx5_fpga_conn_destroy_cq(struct mlx5_fpga_conn *conn)
    492{
    493	tasklet_disable(&conn->cq.tasklet);
    494	tasklet_kill(&conn->cq.tasklet);
    495	mlx5_core_destroy_cq(conn->fdev->mdev, &conn->cq.mcq);
    496	mlx5_wq_destroy(&conn->cq.wq_ctrl);
    497}
    498
    499static int mlx5_fpga_conn_create_wq(struct mlx5_fpga_conn *conn, void *qpc)
    500{
    501	struct mlx5_fpga_device *fdev = conn->fdev;
    502	struct mlx5_core_dev *mdev = fdev->mdev;
    503	struct mlx5_wq_param wqp;
    504
    505	wqp.buf_numa_node = mdev->priv.numa_node;
    506	wqp.db_numa_node  = mdev->priv.numa_node;
    507
    508	return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq,
    509				 &conn->qp.wq_ctrl);
    510}
    511
    512static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
    513				    unsigned int tx_size, unsigned int rx_size)
    514{
    515	struct mlx5_fpga_device *fdev = conn->fdev;
    516	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
    517	struct mlx5_core_dev *mdev = fdev->mdev;
    518	u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
    519	void *in = NULL, *qpc;
    520	int err, inlen;
    521
    522	conn->qp.rq.pc = 0;
    523	conn->qp.rq.cc = 0;
    524	conn->qp.rq.size = roundup_pow_of_two(rx_size);
    525	conn->qp.sq.pc = 0;
    526	conn->qp.sq.cc = 0;
    527	conn->qp.sq.size = roundup_pow_of_two(tx_size);
    528
    529	MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
    530	MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(conn->qp.rq.size));
    531	MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size));
    532	err = mlx5_fpga_conn_create_wq(conn, temp_qpc);
    533	if (err)
    534		goto out;
    535
    536	conn->qp.rq.bufs = kvcalloc(conn->qp.rq.size,
    537				    sizeof(conn->qp.rq.bufs[0]),
    538				    GFP_KERNEL);
    539	if (!conn->qp.rq.bufs) {
    540		err = -ENOMEM;
    541		goto err_wq;
    542	}
    543
    544	conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size,
    545				    sizeof(conn->qp.sq.bufs[0]),
    546				    GFP_KERNEL);
    547	if (!conn->qp.sq.bufs) {
    548		err = -ENOMEM;
    549		goto err_rq_bufs;
    550	}
    551
    552	inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
    553		MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
    554		conn->qp.wq_ctrl.buf.npages;
    555	in = kvzalloc(inlen, GFP_KERNEL);
    556	if (!in) {
    557		err = -ENOMEM;
    558		goto err_sq_bufs;
    559	}
    560
    561	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
    562	MLX5_SET(qpc, qpc, uar_page, fdev->conn_res.uar->index);
    563	MLX5_SET(qpc, qpc, log_page_size,
    564		 conn->qp.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
    565	MLX5_SET(qpc, qpc, fre, 1);
    566	MLX5_SET(qpc, qpc, rlky, 1);
    567	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
    568	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
    569	MLX5_SET(qpc, qpc, pd, fdev->conn_res.pdn);
    570	MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
    571	MLX5_SET(qpc, qpc, log_rq_size, ilog2(conn->qp.rq.size));
    572	MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
    573	MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
    574	MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
    575	MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
    576	MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
    577	MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
    578	if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
    579		MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
    580
    581	mlx5_fill_page_frag_array(&conn->qp.wq_ctrl.buf,
    582				  (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas));
    583
    584	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
    585	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
    586	if (err)
    587		goto err_sq_bufs;
    588
    589	conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn);
    590	mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn);
    591
    592	goto out;
    593
    594err_sq_bufs:
    595	kvfree(conn->qp.sq.bufs);
    596err_rq_bufs:
    597	kvfree(conn->qp.rq.bufs);
    598err_wq:
    599	mlx5_wq_destroy(&conn->qp.wq_ctrl);
    600out:
    601	kvfree(in);
    602	return err;
    603}
    604
    605static void mlx5_fpga_conn_free_recv_bufs(struct mlx5_fpga_conn *conn)
    606{
    607	int ix;
    608
    609	for (ix = 0; ix < conn->qp.rq.size; ix++) {
    610		if (!conn->qp.rq.bufs[ix])
    611			continue;
    612		mlx5_fpga_conn_unmap_buf(conn, conn->qp.rq.bufs[ix]);
    613		kfree(conn->qp.rq.bufs[ix]);
    614		conn->qp.rq.bufs[ix] = NULL;
    615	}
    616}
    617
    618static void mlx5_fpga_conn_flush_send_bufs(struct mlx5_fpga_conn *conn)
    619{
    620	struct mlx5_fpga_dma_buf *buf, *temp;
    621	int ix;
    622
    623	for (ix = 0; ix < conn->qp.sq.size; ix++) {
    624		buf = conn->qp.sq.bufs[ix];
    625		if (!buf)
    626			continue;
    627		conn->qp.sq.bufs[ix] = NULL;
    628		mlx5_fpga_conn_unmap_buf(conn, buf);
    629		if (!buf->complete)
    630			continue;
    631		buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
    632	}
    633	list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) {
    634		mlx5_fpga_conn_unmap_buf(conn, buf);
    635		if (!buf->complete)
    636			continue;
    637		buf->complete(conn, conn->fdev, buf, MLX5_CQE_SYNDROME_WR_FLUSH_ERR);
    638	}
    639}
    640
    641static void mlx5_fpga_conn_destroy_qp(struct mlx5_fpga_conn *conn)
    642{
    643	struct mlx5_core_dev *dev = conn->fdev->mdev;
    644	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
    645
    646	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
    647	MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn);
    648	mlx5_cmd_exec_in(dev, destroy_qp, in);
    649
    650	mlx5_fpga_conn_free_recv_bufs(conn);
    651	mlx5_fpga_conn_flush_send_bufs(conn);
    652	kvfree(conn->qp.sq.bufs);
    653	kvfree(conn->qp.rq.bufs);
    654	mlx5_wq_destroy(&conn->qp.wq_ctrl);
    655}
    656
    657static int mlx5_fpga_conn_reset_qp(struct mlx5_fpga_conn *conn)
    658{
    659	struct mlx5_core_dev *mdev = conn->fdev->mdev;
    660	u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {};
    661
    662	mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn);
    663
    664	MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP);
    665	MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn);
    666
    667	return mlx5_cmd_exec_in(mdev, qp_2rst, in);
    668}
    669
    670static int mlx5_fpga_conn_init_qp(struct mlx5_fpga_conn *conn)
    671{
    672	u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
    673	struct mlx5_fpga_device *fdev = conn->fdev;
    674	struct mlx5_core_dev *mdev = fdev->mdev;
    675	u32 *qpc;
    676
    677	mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn);
    678
    679	qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
    680
    681	MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
    682	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
    683	MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
    684	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
    685	MLX5_SET(qpc, qpc, pd, conn->fdev->conn_res.pdn);
    686	MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
    687	MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
    688	MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
    689
    690	MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
    691	MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn);
    692
    693	return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
    694}
    695
    696static int mlx5_fpga_conn_rtr_qp(struct mlx5_fpga_conn *conn)
    697{
    698	u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
    699	struct mlx5_fpga_device *fdev = conn->fdev;
    700	struct mlx5_core_dev *mdev = fdev->mdev;
    701	u32 *qpc;
    702
    703	mlx5_fpga_dbg(conn->fdev, "QP RTR\n");
    704
    705	qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
    706
    707	MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_1K_BYTES);
    708	MLX5_SET(qpc, qpc, log_msg_max, (u8)MLX5_CAP_GEN(mdev, log_max_msg));
    709	MLX5_SET(qpc, qpc, remote_qpn, conn->fpga_qpn);
    710	MLX5_SET(qpc, qpc, next_rcv_psn,
    711		 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_send_psn));
    712	MLX5_SET(qpc, qpc, primary_address_path.pkey_index, MLX5_FPGA_PKEY_INDEX);
    713	MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, MLX5_FPGA_PORT_NUM);
    714	ether_addr_copy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
    715			MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_mac_47_32));
    716	MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
    717		 MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port));
    718	MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
    719		 conn->qp.sgid_index);
    720	MLX5_SET(qpc, qpc, primary_address_path.hop_limit, 0);
    721	memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
    722	       MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, fpga_ip),
    723	       MLX5_FLD_SZ_BYTES(qpc, primary_address_path.rgid_rip));
    724
    725	MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
    726	MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn);
    727
    728	return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
    729}
    730
    731static int mlx5_fpga_conn_rts_qp(struct mlx5_fpga_conn *conn)
    732{
    733	struct mlx5_fpga_device *fdev = conn->fdev;
    734	u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
    735	struct mlx5_core_dev *mdev = fdev->mdev;
    736	u32 *qpc;
    737
    738	mlx5_fpga_dbg(conn->fdev, "QP RTS\n");
    739
    740	qpc = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
    741
    742	MLX5_SET(qpc, qpc, log_ack_req_freq, 8);
    743	MLX5_SET(qpc, qpc, min_rnr_nak, 0x12);
    744	MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x12); /* ~1.07s */
    745	MLX5_SET(qpc, qpc, next_send_psn,
    746		 MLX5_GET(fpga_qpc, conn->fpga_qpc, next_rcv_psn));
    747	MLX5_SET(qpc, qpc, retry_count, 7);
    748	MLX5_SET(qpc, qpc, rnr_retry, 7); /* Infinite retry if RNR NACK */
    749
    750	MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
    751	MLX5_SET(rtr2rts_qp_in, in, qpn, conn->qp.qpn);
    752	MLX5_SET(rtr2rts_qp_in, in, opt_param_mask, MLX5_QP_OPTPAR_RNR_TIMEOUT);
    753
    754	return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
    755}
    756
    757static int mlx5_fpga_conn_connect(struct mlx5_fpga_conn *conn)
    758{
    759	struct mlx5_fpga_device *fdev = conn->fdev;
    760	int err;
    761
    762	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_ACTIVE);
    763	err = mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
    764				  MLX5_FPGA_QPC_STATE, &conn->fpga_qpc);
    765	if (err) {
    766		mlx5_fpga_err(fdev, "Failed to activate FPGA RC QP: %d\n", err);
    767		goto out;
    768	}
    769
    770	err = mlx5_fpga_conn_reset_qp(conn);
    771	if (err) {
    772		mlx5_fpga_err(fdev, "Failed to change QP state to reset\n");
    773		goto err_fpga_qp;
    774	}
    775
    776	err = mlx5_fpga_conn_init_qp(conn);
    777	if (err) {
    778		mlx5_fpga_err(fdev, "Failed to modify QP from RESET to INIT\n");
    779		goto err_fpga_qp;
    780	}
    781	conn->qp.active = true;
    782
    783	while (!mlx5_fpga_conn_post_recv_buf(conn))
    784		;
    785
    786	err = mlx5_fpga_conn_rtr_qp(conn);
    787	if (err) {
    788		mlx5_fpga_err(fdev, "Failed to change QP state from INIT to RTR\n");
    789		goto err_recv_bufs;
    790	}
    791
    792	err = mlx5_fpga_conn_rts_qp(conn);
    793	if (err) {
    794		mlx5_fpga_err(fdev, "Failed to change QP state from RTR to RTS\n");
    795		goto err_recv_bufs;
    796	}
    797	goto out;
    798
    799err_recv_bufs:
    800	mlx5_fpga_conn_free_recv_bufs(conn);
    801err_fpga_qp:
    802	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
    803	if (mlx5_fpga_modify_qp(conn->fdev->mdev, conn->fpga_qpn,
    804				MLX5_FPGA_QPC_STATE, &conn->fpga_qpc))
    805		mlx5_fpga_err(fdev, "Failed to revert FPGA QP to INIT\n");
    806out:
    807	return err;
    808}
    809
    810struct mlx5_fpga_conn *mlx5_fpga_conn_create(struct mlx5_fpga_device *fdev,
    811					     struct mlx5_fpga_conn_attr *attr,
    812					     enum mlx5_ifc_fpga_qp_type qp_type)
    813{
    814	struct mlx5_fpga_conn *ret, *conn;
    815	u8 *remote_mac, *remote_ip;
    816	int err;
    817
    818	if (!attr->recv_cb)
    819		return ERR_PTR(-EINVAL);
    820
    821	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
    822	if (!conn)
    823		return ERR_PTR(-ENOMEM);
    824
    825	conn->fdev = fdev;
    826	INIT_LIST_HEAD(&conn->qp.sq.backlog);
    827
    828	spin_lock_init(&conn->qp.sq.lock);
    829
    830	conn->recv_cb = attr->recv_cb;
    831	conn->cb_arg = attr->cb_arg;
    832
    833	remote_mac = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_mac_47_32);
    834	err = mlx5_query_mac_address(fdev->mdev, remote_mac);
    835	if (err) {
    836		mlx5_fpga_err(fdev, "Failed to query local MAC: %d\n", err);
    837		ret = ERR_PTR(err);
    838		goto err;
    839	}
    840
    841	/* Build Modified EUI-64 IPv6 address from the MAC address */
    842	remote_ip = MLX5_ADDR_OF(fpga_qpc, conn->fpga_qpc, remote_ip);
    843	remote_ip[0] = 0xfe;
    844	remote_ip[1] = 0x80;
    845	addrconf_addr_eui48(&remote_ip[8], remote_mac);
    846
    847	err = mlx5_core_reserved_gid_alloc(fdev->mdev, &conn->qp.sgid_index);
    848	if (err) {
    849		mlx5_fpga_err(fdev, "Failed to allocate SGID: %d\n", err);
    850		ret = ERR_PTR(err);
    851		goto err;
    852	}
    853
    854	err = mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index,
    855				     MLX5_ROCE_VERSION_2,
    856				     MLX5_ROCE_L3_TYPE_IPV6,
    857				     remote_ip, remote_mac, true, 0,
    858				     MLX5_FPGA_PORT_NUM);
    859	if (err) {
    860		mlx5_fpga_err(fdev, "Failed to set SGID: %d\n", err);
    861		ret = ERR_PTR(err);
    862		goto err_rsvd_gid;
    863	}
    864	mlx5_fpga_dbg(fdev, "Reserved SGID index %u\n", conn->qp.sgid_index);
    865
    866	/* Allow for one cqe per rx/tx wqe, plus one cqe for the next wqe,
    867	 * created during processing of the cqe
    868	 */
    869	err = mlx5_fpga_conn_create_cq(conn,
    870				       (attr->tx_size + attr->rx_size) * 2);
    871	if (err) {
    872		mlx5_fpga_err(fdev, "Failed to create CQ: %d\n", err);
    873		ret = ERR_PTR(err);
    874		goto err_gid;
    875	}
    876
    877	mlx5_fpga_conn_arm_cq(conn);
    878
    879	err = mlx5_fpga_conn_create_qp(conn, attr->tx_size, attr->rx_size);
    880	if (err) {
    881		mlx5_fpga_err(fdev, "Failed to create QP: %d\n", err);
    882		ret = ERR_PTR(err);
    883		goto err_cq;
    884	}
    885
    886	MLX5_SET(fpga_qpc, conn->fpga_qpc, state, MLX5_FPGA_QPC_STATE_INIT);
    887	MLX5_SET(fpga_qpc, conn->fpga_qpc, qp_type, qp_type);
    888	MLX5_SET(fpga_qpc, conn->fpga_qpc, st, MLX5_FPGA_QPC_ST_RC);
    889	MLX5_SET(fpga_qpc, conn->fpga_qpc, ether_type, ETH_P_8021Q);
    890	MLX5_SET(fpga_qpc, conn->fpga_qpc, vid, 0);
    891	MLX5_SET(fpga_qpc, conn->fpga_qpc, next_rcv_psn, 1);
    892	MLX5_SET(fpga_qpc, conn->fpga_qpc, next_send_psn, 0);
    893	MLX5_SET(fpga_qpc, conn->fpga_qpc, pkey, MLX5_FPGA_PKEY);
    894	MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.qpn);
    895	MLX5_SET(fpga_qpc, conn->fpga_qpc, rnr_retry, 7);
    896	MLX5_SET(fpga_qpc, conn->fpga_qpc, retry_count, 7);
    897
    898	err = mlx5_fpga_create_qp(fdev->mdev, &conn->fpga_qpc,
    899				  &conn->fpga_qpn);
    900	if (err) {
    901		mlx5_fpga_err(fdev, "Failed to create FPGA RC QP: %d\n", err);
    902		ret = ERR_PTR(err);
    903		goto err_qp;
    904	}
    905
    906	err = mlx5_fpga_conn_connect(conn);
    907	if (err) {
    908		ret = ERR_PTR(err);
    909		goto err_conn;
    910	}
    911
    912	mlx5_fpga_dbg(fdev, "FPGA QPN is %u\n", conn->fpga_qpn);
    913	ret = conn;
    914	goto out;
    915
    916err_conn:
    917	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
    918err_qp:
    919	mlx5_fpga_conn_destroy_qp(conn);
    920err_cq:
    921	mlx5_fpga_conn_destroy_cq(conn);
    922err_gid:
    923	mlx5_core_roce_gid_set(fdev->mdev, conn->qp.sgid_index, 0, 0, NULL,
    924			       NULL, false, 0, MLX5_FPGA_PORT_NUM);
    925err_rsvd_gid:
    926	mlx5_core_reserved_gid_free(fdev->mdev, conn->qp.sgid_index);
    927err:
    928	kfree(conn);
    929out:
    930	return ret;
    931}
    932
    933void mlx5_fpga_conn_destroy(struct mlx5_fpga_conn *conn)
    934{
    935	conn->qp.active = false;
    936	tasklet_disable(&conn->cq.tasklet);
    937	synchronize_irq(conn->cq.mcq.irqn);
    938
    939	mlx5_fpga_destroy_qp(conn->fdev->mdev, conn->fpga_qpn);
    940	mlx5_fpga_conn_destroy_qp(conn);
    941	mlx5_fpga_conn_destroy_cq(conn);
    942
    943	mlx5_core_roce_gid_set(conn->fdev->mdev, conn->qp.sgid_index, 0, 0,
    944			       NULL, NULL, false, 0, MLX5_FPGA_PORT_NUM);
    945	mlx5_core_reserved_gid_free(conn->fdev->mdev, conn->qp.sgid_index);
    946	kfree(conn);
    947}
    948
    949int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
    950{
    951	int err;
    952
    953	err = mlx5_nic_vport_enable_roce(fdev->mdev);
    954	if (err) {
    955		mlx5_fpga_err(fdev, "Failed to enable RoCE: %d\n", err);
    956		goto out;
    957	}
    958
    959	fdev->conn_res.uar = mlx5_get_uars_page(fdev->mdev);
    960	if (IS_ERR(fdev->conn_res.uar)) {
    961		err = PTR_ERR(fdev->conn_res.uar);
    962		mlx5_fpga_err(fdev, "get_uars_page failed, %d\n", err);
    963		goto err_roce;
    964	}
    965	mlx5_fpga_dbg(fdev, "Allocated UAR index %u\n",
    966		      fdev->conn_res.uar->index);
    967
    968	err = mlx5_core_alloc_pd(fdev->mdev, &fdev->conn_res.pdn);
    969	if (err) {
    970		mlx5_fpga_err(fdev, "alloc pd failed, %d\n", err);
    971		goto err_uar;
    972	}
    973	mlx5_fpga_dbg(fdev, "Allocated PD %u\n", fdev->conn_res.pdn);
    974
    975	err = mlx5_fpga_conn_create_mkey(fdev->mdev, fdev->conn_res.pdn,
    976					 &fdev->conn_res.mkey);
    977	if (err) {
    978		mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
    979		goto err_dealloc_pd;
    980	}
    981	mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey);
    982
    983	return 0;
    984
    985err_dealloc_pd:
    986	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
    987err_uar:
    988	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
    989err_roce:
    990	mlx5_nic_vport_disable_roce(fdev->mdev);
    991out:
    992	return err;
    993}
    994
    995void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
    996{
    997	mlx5_core_destroy_mkey(fdev->mdev, fdev->conn_res.mkey);
    998	mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
    999	mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
   1000	mlx5_nic_vport_disable_roce(fdev->mdev);
   1001}