cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

srq_cmd.c (20478B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2013-2018, Mellanox Technologies inc.  All rights reserved.
      4 */
      5
      6#include <linux/kernel.h>
      7#include <linux/mlx5/driver.h>
      8#include "mlx5_ib.h"
      9#include "srq.h"
     10#include "qp.h"
     11
     12static int get_pas_size(struct mlx5_srq_attr *in)
     13{
     14	u32 log_page_size = in->log_page_size + 12;
     15	u32 log_srq_size  = in->log_size;
     16	u32 log_rq_stride = in->wqe_shift;
     17	u32 page_offset   = in->page_offset;
     18	u32 po_quanta	  = 1 << (log_page_size - 6);
     19	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
     20	u32 page_size	  = 1 << log_page_size;
     21	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
     22	u32 rq_num_pas    = DIV_ROUND_UP(rq_sz_po, page_size);
     23
     24	return rq_num_pas * sizeof(u64);
     25}
     26
     27static void set_wq(void *wq, struct mlx5_srq_attr *in)
     28{
     29	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
     30		 & MLX5_SRQ_FLAG_WQ_SIG));
     31	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
     32	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
     33	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
     34	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
     35	MLX5_SET(wq,   wq, lwm,		  in->lwm);
     36	MLX5_SET(wq,   wq, pd,		  in->pd);
     37	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
     38}
     39
     40static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
     41{
     42	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
     43		 & MLX5_SRQ_FLAG_WQ_SIG));
     44	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
     45	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
     46	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
     47	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
     48	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
     49	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
     50	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
     51	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
     52	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
     53}
     54
     55static void get_wq(void *wq, struct mlx5_srq_attr *in)
     56{
     57	if (MLX5_GET(wq, wq, wq_signature))
     58		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
     59	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
     60	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
     61	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
     62	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
     63	in->lwm		  = MLX5_GET(wq,   wq, lwm);
     64	in->pd		  = MLX5_GET(wq,   wq, pd);
     65	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
     66}
     67
     68static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
     69{
     70	if (MLX5_GET(srqc, srqc, wq_signature))
     71		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
     72	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
     73	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
     74	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
     75	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
     76	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
     77	in->pd		  = MLX5_GET(srqc,   srqc, pd);
     78	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
     79}
     80
     81struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
     82{
     83	struct mlx5_srq_table *table = &dev->srq_table;
     84	struct mlx5_core_srq *srq;
     85
     86	xa_lock_irq(&table->array);
     87	srq = xa_load(&table->array, srqn);
     88	if (srq)
     89		refcount_inc(&srq->common.refcount);
     90	xa_unlock_irq(&table->array);
     91
     92	return srq;
     93}
     94
     95static int __set_srq_page_size(struct mlx5_srq_attr *in,
     96			       unsigned long page_size)
     97{
     98	if (!page_size)
     99		return -EINVAL;
    100	in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
    101
    102	if (WARN_ON(get_pas_size(in) !=
    103		    ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
    104		return -EINVAL;
    105	return 0;
    106}
    107
    108#define set_srq_page_size(in, typ, log_pgsz_fld)                               \
    109	__set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff(           \
    110					(in)->umem, typ, log_pgsz_fld,         \
    111					MLX5_ADAPTER_PAGE_SHIFT, page_offset,  \
    112					64, &(in)->page_offset))
    113
    114static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    115			  struct mlx5_srq_attr *in)
    116{
    117	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
    118	void *create_in;
    119	void *srqc;
    120	void *pas;
    121	int pas_size;
    122	int inlen;
    123	int err;
    124
    125	if (in->umem) {
    126		err = set_srq_page_size(in, srqc, log_page_size);
    127		if (err)
    128			return err;
    129	}
    130
    131	pas_size  = get_pas_size(in);
    132	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
    133	create_in = kvzalloc(inlen, GFP_KERNEL);
    134	if (!create_in)
    135		return -ENOMEM;
    136
    137	MLX5_SET(create_srq_in, create_in, uid, in->uid);
    138	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
    139	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
    140
    141	set_srqc(srqc, in);
    142	if (in->umem)
    143		mlx5_ib_populate_pas(
    144			in->umem,
    145			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
    146			pas, 0);
    147	else
    148		memcpy(pas, in->pas, pas_size);
    149
    150	MLX5_SET(create_srq_in, create_in, opcode,
    151		 MLX5_CMD_OP_CREATE_SRQ);
    152
    153	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
    154			    sizeof(create_out));
    155	kvfree(create_in);
    156	if (!err) {
    157		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
    158		srq->uid = in->uid;
    159	}
    160
    161	return err;
    162}
    163
    164static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
    165{
    166	u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
    167
    168	MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
    169	MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
    170	MLX5_SET(destroy_srq_in, in, uid, srq->uid);
    171
    172	return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
    173}
    174
    175static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    176		       u16 lwm, int is_srq)
    177{
    178	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
    179
    180	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
    181	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
    182	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
    183	MLX5_SET(arm_rq_in, in, lwm, lwm);
    184	MLX5_SET(arm_rq_in, in, uid, srq->uid);
    185
    186	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
    187}
    188
    189static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    190			 struct mlx5_srq_attr *out)
    191{
    192	u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
    193	u32 *srq_out;
    194	void *srqc;
    195	int err;
    196
    197	srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
    198	if (!srq_out)
    199		return -ENOMEM;
    200
    201	MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
    202	MLX5_SET(query_srq_in, in, srqn, srq->srqn);
    203	err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
    204	if (err)
    205		goto out;
    206
    207	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
    208	get_srqc(srqc, out);
    209	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
    210		out->flags |= MLX5_SRQ_FLAG_ERR;
    211out:
    212	kvfree(srq_out);
    213	return err;
    214}
    215
    216static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
    217			      struct mlx5_core_srq *srq,
    218			      struct mlx5_srq_attr *in)
    219{
    220	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
    221	void *create_in;
    222	void *xrc_srqc;
    223	void *pas;
    224	int pas_size;
    225	int inlen;
    226	int err;
    227
    228	if (in->umem) {
    229		err = set_srq_page_size(in, xrc_srqc, log_page_size);
    230		if (err)
    231			return err;
    232	}
    233
    234	pas_size  = get_pas_size(in);
    235	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
    236	create_in = kvzalloc(inlen, GFP_KERNEL);
    237	if (!create_in)
    238		return -ENOMEM;
    239
    240	MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
    241	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
    242				xrc_srq_context_entry);
    243	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
    244
    245	set_srqc(xrc_srqc, in);
    246	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
    247	if (in->umem)
    248		mlx5_ib_populate_pas(
    249			in->umem,
    250			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
    251			pas, 0);
    252	else
    253		memcpy(pas, in->pas, pas_size);
    254	MLX5_SET(create_xrc_srq_in, create_in, opcode,
    255		 MLX5_CMD_OP_CREATE_XRC_SRQ);
    256
    257	memset(create_out, 0, sizeof(create_out));
    258	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
    259			    sizeof(create_out));
    260	if (err)
    261		goto out;
    262
    263	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
    264	srq->uid = in->uid;
    265out:
    266	kvfree(create_in);
    267	return err;
    268}
    269
    270static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
    271			       struct mlx5_core_srq *srq)
    272{
    273	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
    274
    275	MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
    276	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
    277	MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
    278
    279	return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
    280}
    281
    282static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    283			   u16 lwm)
    284{
    285	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
    286
    287	MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
    288	MLX5_SET(arm_xrc_srq_in, in, op_mod,
    289		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
    290	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
    291	MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
    292	MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
    293
    294	return  mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
    295}
    296
    297static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
    298			     struct mlx5_core_srq *srq,
    299			     struct mlx5_srq_attr *out)
    300{
    301	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
    302	u32 *xrcsrq_out;
    303	void *xrc_srqc;
    304	int err;
    305
    306	xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
    307	if (!xrcsrq_out)
    308		return -ENOMEM;
    309
    310	MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
    311	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
    312
    313	err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
    314	if (err)
    315		goto out;
    316
    317	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
    318				xrc_srq_context_entry);
    319	get_srqc(xrc_srqc, out);
    320	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
    321		out->flags |= MLX5_SRQ_FLAG_ERR;
    322
    323out:
    324	kvfree(xrcsrq_out);
    325	return err;
    326}
    327
    328static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    329			  struct mlx5_srq_attr *in)
    330{
    331	void *create_out = NULL;
    332	void *create_in = NULL;
    333	void *rmpc;
    334	void *wq;
    335	void *pas;
    336	int pas_size;
    337	int outlen;
    338	int inlen;
    339	int err;
    340
    341	if (in->umem) {
    342		err = set_srq_page_size(in, wq, log_wq_pg_sz);
    343		if (err)
    344			return err;
    345	}
    346
    347	pas_size = get_pas_size(in);
    348	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
    349	outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
    350	create_in = kvzalloc(inlen, GFP_KERNEL);
    351	create_out = kvzalloc(outlen, GFP_KERNEL);
    352	if (!create_in || !create_out) {
    353		err = -ENOMEM;
    354		goto out;
    355	}
    356
    357	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
    358	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
    359
    360	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
    361	MLX5_SET(create_rmp_in, create_in, uid, in->uid);
    362	pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
    363
    364	set_wq(wq, in);
    365	if (in->umem)
    366		mlx5_ib_populate_pas(
    367			in->umem,
    368			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
    369			pas, 0);
    370	else
    371		memcpy(pas, in->pas, pas_size);
    372
    373	MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
    374	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
    375	if (!err) {
    376		srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
    377		srq->uid = in->uid;
    378	}
    379
    380out:
    381	kvfree(create_in);
    382	kvfree(create_out);
    383	return err;
    384}
    385
    386static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
    387{
    388	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
    389
    390	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
    391	MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
    392	MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
    393	return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
    394}
    395
    396static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    397		       u16 lwm)
    398{
    399	void *out = NULL;
    400	void *in = NULL;
    401	void *rmpc;
    402	void *wq;
    403	void *bitmask;
    404	int outlen;
    405	int inlen;
    406	int err;
    407
    408	inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
    409	outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
    410
    411	in = kvzalloc(inlen, GFP_KERNEL);
    412	out = kvzalloc(outlen, GFP_KERNEL);
    413	if (!in || !out) {
    414		err = -ENOMEM;
    415		goto out;
    416	}
    417
    418	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
    419	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
    420	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
    421
    422	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
    423	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
    424	MLX5_SET(modify_rmp_in, in, uid, srq->uid);
    425	MLX5_SET(wq,		wq,	 lwm,	    lwm);
    426	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
    427	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
    428	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
    429
    430	err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
    431
    432out:
    433	kvfree(in);
    434	kvfree(out);
    435	return err;
    436}
    437
    438static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    439			 struct mlx5_srq_attr *out)
    440{
    441	u32 *rmp_out = NULL;
    442	u32 *rmp_in = NULL;
    443	void *rmpc;
    444	int outlen;
    445	int inlen;
    446	int err;
    447
    448	outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
    449	inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
    450
    451	rmp_out = kvzalloc(outlen, GFP_KERNEL);
    452	rmp_in = kvzalloc(inlen, GFP_KERNEL);
    453	if (!rmp_out || !rmp_in) {
    454		err = -ENOMEM;
    455		goto out;
    456	}
    457
    458	MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
    459	MLX5_SET(query_rmp_in, rmp_in, rmpn,   srq->srqn);
    460	err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
    461	if (err)
    462		goto out;
    463
    464	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
    465	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
    466	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
    467		out->flags |= MLX5_SRQ_FLAG_ERR;
    468
    469out:
    470	kvfree(rmp_out);
    471	kvfree(rmp_in);
    472	return err;
    473}
    474
    475static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    476			  struct mlx5_srq_attr *in)
    477{
    478	u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
    479	void *create_in;
    480	void *xrqc;
    481	void *wq;
    482	void *pas;
    483	int pas_size;
    484	int inlen;
    485	int err;
    486
    487	if (in->umem) {
    488		err = set_srq_page_size(in, wq, log_wq_pg_sz);
    489		if (err)
    490			return err;
    491	}
    492
    493	pas_size = get_pas_size(in);
    494	inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
    495	create_in = kvzalloc(inlen, GFP_KERNEL);
    496	if (!create_in)
    497		return -ENOMEM;
    498
    499	xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
    500	wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
    501	pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
    502
    503	set_wq(wq, in);
    504	if (in->umem)
    505		mlx5_ib_populate_pas(
    506			in->umem,
    507			1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
    508			pas, 0);
    509	else
    510		memcpy(pas, in->pas, pas_size);
    511
    512	if (in->type == IB_SRQT_TM) {
    513		MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
    514		if (in->flags & MLX5_SRQ_FLAG_RNDV)
    515			MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
    516		MLX5_SET(xrqc, xrqc,
    517			 tag_matching_topology_context.log_matching_list_sz,
    518			 in->tm_log_list_size);
    519	}
    520	MLX5_SET(xrqc, xrqc, user_index, in->user_index);
    521	MLX5_SET(xrqc, xrqc, cqn, in->cqn);
    522	MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
    523	MLX5_SET(create_xrq_in, create_in, uid, in->uid);
    524	err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
    525			    sizeof(create_out));
    526	kvfree(create_in);
    527	if (!err) {
    528		srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
    529		srq->uid = in->uid;
    530	}
    531
    532	return err;
    533}
    534
    535static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
    536{
    537	u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
    538
    539	MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
    540	MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
    541	MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
    542
    543	return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
    544}
    545
    546static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
    547		       struct mlx5_core_srq *srq,
    548		       u16 lwm)
    549{
    550	u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
    551
    552	MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
    553	MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
    554	MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
    555	MLX5_SET(arm_rq_in, in, lwm, lwm);
    556	MLX5_SET(arm_rq_in, in, uid, srq->uid);
    557
    558	return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
    559}
    560
    561static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    562			 struct mlx5_srq_attr *out)
    563{
    564	u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
    565	u32 *xrq_out;
    566	int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
    567	void *xrqc;
    568	int err;
    569
    570	xrq_out = kvzalloc(outlen, GFP_KERNEL);
    571	if (!xrq_out)
    572		return -ENOMEM;
    573
    574	MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
    575	MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
    576
    577	err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
    578	if (err)
    579		goto out;
    580
    581	xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
    582	get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
    583	if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
    584		out->flags |= MLX5_SRQ_FLAG_ERR;
    585	out->tm_next_tag =
    586		MLX5_GET(xrqc, xrqc,
    587			 tag_matching_topology_context.append_next_index);
    588	out->tm_hw_phase_cnt =
    589		MLX5_GET(xrqc, xrqc,
    590			 tag_matching_topology_context.hw_phase_cnt);
    591	out->tm_sw_phase_cnt =
    592		MLX5_GET(xrqc, xrqc,
    593			 tag_matching_topology_context.sw_phase_cnt);
    594
    595out:
    596	kvfree(xrq_out);
    597	return err;
    598}
    599
    600static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    601			    struct mlx5_srq_attr *in)
    602{
    603	if (!dev->mdev->issi)
    604		return create_srq_cmd(dev, srq, in);
    605	switch (srq->common.res) {
    606	case MLX5_RES_XSRQ:
    607		return create_xrc_srq_cmd(dev, srq, in);
    608	case MLX5_RES_XRQ:
    609		return create_xrq_cmd(dev, srq, in);
    610	default:
    611		return create_rmp_cmd(dev, srq, in);
    612	}
    613}
    614
    615static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
    616{
    617	if (!dev->mdev->issi)
    618		return destroy_srq_cmd(dev, srq);
    619	switch (srq->common.res) {
    620	case MLX5_RES_XSRQ:
    621		return destroy_xrc_srq_cmd(dev, srq);
    622	case MLX5_RES_XRQ:
    623		return destroy_xrq_cmd(dev, srq);
    624	default:
    625		return destroy_rmp_cmd(dev, srq);
    626	}
    627}
    628
    629int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    630			struct mlx5_srq_attr *in)
    631{
    632	struct mlx5_srq_table *table = &dev->srq_table;
    633	int err;
    634
    635	switch (in->type) {
    636	case IB_SRQT_XRC:
    637		srq->common.res = MLX5_RES_XSRQ;
    638		break;
    639	case IB_SRQT_TM:
    640		srq->common.res = MLX5_RES_XRQ;
    641		break;
    642	default:
    643		srq->common.res = MLX5_RES_SRQ;
    644	}
    645
    646	err = create_srq_split(dev, srq, in);
    647	if (err)
    648		return err;
    649
    650	refcount_set(&srq->common.refcount, 1);
    651	init_completion(&srq->common.free);
    652
    653	err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
    654	if (err)
    655		goto err_destroy_srq_split;
    656
    657	return 0;
    658
    659err_destroy_srq_split:
    660	destroy_srq_split(dev, srq);
    661
    662	return err;
    663}
    664
    665int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
    666{
    667	struct mlx5_srq_table *table = &dev->srq_table;
    668	struct mlx5_core_srq *tmp;
    669	int err;
    670
    671	/* Delete entry, but leave index occupied */
    672	tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
    673	if (WARN_ON(tmp != srq))
    674		return xa_err(tmp) ?: -EINVAL;
    675
    676	err = destroy_srq_split(dev, srq);
    677	if (err) {
    678		/*
    679		 * We don't need to check returned result for an error,
    680		 * because  we are storing in pre-allocated space xarray
    681		 * entry and it can't fail at this stage.
    682		 */
    683		xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
    684		return err;
    685	}
    686	xa_erase_irq(&table->array, srq->srqn);
    687
    688	mlx5_core_res_put(&srq->common);
    689	wait_for_completion(&srq->common.free);
    690	return 0;
    691}
    692
    693int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    694		       struct mlx5_srq_attr *out)
    695{
    696	if (!dev->mdev->issi)
    697		return query_srq_cmd(dev, srq, out);
    698	switch (srq->common.res) {
    699	case MLX5_RES_XSRQ:
    700		return query_xrc_srq_cmd(dev, srq, out);
    701	case MLX5_RES_XRQ:
    702		return query_xrq_cmd(dev, srq, out);
    703	default:
    704		return query_rmp_cmd(dev, srq, out);
    705	}
    706}
    707
    708int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
    709		     u16 lwm, int is_srq)
    710{
    711	if (!dev->mdev->issi)
    712		return arm_srq_cmd(dev, srq, lwm, is_srq);
    713	switch (srq->common.res) {
    714	case MLX5_RES_XSRQ:
    715		return arm_xrc_srq_cmd(dev, srq, lwm);
    716	case MLX5_RES_XRQ:
    717		return arm_xrq_cmd(dev, srq, lwm);
    718	default:
    719		return arm_rmp_cmd(dev, srq, lwm);
    720	}
    721}
    722
    723static int srq_event_notifier(struct notifier_block *nb,
    724			      unsigned long type, void *data)
    725{
    726	struct mlx5_srq_table *table;
    727	struct mlx5_core_srq *srq;
    728	struct mlx5_eqe *eqe;
    729	u32 srqn;
    730
    731	if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
    732	    type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
    733		return NOTIFY_DONE;
    734
    735	table = container_of(nb, struct mlx5_srq_table, nb);
    736
    737	eqe = data;
    738	srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
    739
    740	xa_lock(&table->array);
    741	srq = xa_load(&table->array, srqn);
    742	if (srq)
    743		refcount_inc(&srq->common.refcount);
    744	xa_unlock(&table->array);
    745
    746	if (!srq)
    747		return NOTIFY_OK;
    748
    749	srq->event(srq, eqe->type);
    750
    751	mlx5_core_res_put(&srq->common);
    752
    753	return NOTIFY_OK;
    754}
    755
    756int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
    757{
    758	struct mlx5_srq_table *table = &dev->srq_table;
    759
    760	memset(table, 0, sizeof(*table));
    761	xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
    762
    763	table->nb.notifier_call = srq_event_notifier;
    764	mlx5_notifier_register(dev->mdev, &table->nb);
    765
    766	return 0;
    767}
    768
    769void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
    770{
    771	struct mlx5_srq_table *table = &dev->srq_table;
    772
    773	mlx5_notifier_unregister(dev->mdev, &table->nb);
    774}