cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rxe_qp.c (19233B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
      4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
      5 */
      6
      7#include <linux/skbuff.h>
      8#include <linux/delay.h>
      9#include <linux/sched.h>
     10#include <linux/vmalloc.h>
     11#include <rdma/uverbs_ioctl.h>
     12
     13#include "rxe.h"
     14#include "rxe_loc.h"
     15#include "rxe_queue.h"
     16#include "rxe_task.h"
     17
     18static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
     19			  int has_srq)
     20{
     21	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
     22		pr_warn("invalid send wr = %d > %d\n",
     23			cap->max_send_wr, rxe->attr.max_qp_wr);
     24		goto err1;
     25	}
     26
     27	if (cap->max_send_sge > rxe->attr.max_send_sge) {
     28		pr_warn("invalid send sge = %d > %d\n",
     29			cap->max_send_sge, rxe->attr.max_send_sge);
     30		goto err1;
     31	}
     32
     33	if (!has_srq) {
     34		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
     35			pr_warn("invalid recv wr = %d > %d\n",
     36				cap->max_recv_wr, rxe->attr.max_qp_wr);
     37			goto err1;
     38		}
     39
     40		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
     41			pr_warn("invalid recv sge = %d > %d\n",
     42				cap->max_recv_sge, rxe->attr.max_recv_sge);
     43			goto err1;
     44		}
     45	}
     46
     47	if (cap->max_inline_data > rxe->max_inline_data) {
     48		pr_warn("invalid max inline data = %d > %d\n",
     49			cap->max_inline_data, rxe->max_inline_data);
     50		goto err1;
     51	}
     52
     53	return 0;
     54
     55err1:
     56	return -EINVAL;
     57}
     58
     59int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
     60{
     61	struct ib_qp_cap *cap = &init->cap;
     62	struct rxe_port *port;
     63	int port_num = init->port_num;
     64
     65	switch (init->qp_type) {
     66	case IB_QPT_GSI:
     67	case IB_QPT_RC:
     68	case IB_QPT_UC:
     69	case IB_QPT_UD:
     70		break;
     71	default:
     72		return -EOPNOTSUPP;
     73	}
     74
     75	if (!init->recv_cq || !init->send_cq) {
     76		pr_warn("missing cq\n");
     77		goto err1;
     78	}
     79
     80	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
     81		goto err1;
     82
     83	if (init->qp_type == IB_QPT_GSI) {
     84		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
     85			pr_warn("invalid port = %d\n", port_num);
     86			goto err1;
     87		}
     88
     89		port = &rxe->port;
     90
     91		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
     92			pr_warn("GSI QP exists for port %d\n", port_num);
     93			goto err1;
     94		}
     95	}
     96
     97	return 0;
     98
     99err1:
    100	return -EINVAL;
    101}
    102
    103static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
    104{
    105	qp->resp.res_head = 0;
    106	qp->resp.res_tail = 0;
    107	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
    108
    109	if (!qp->resp.resources)
    110		return -ENOMEM;
    111
    112	return 0;
    113}
    114
    115static void free_rd_atomic_resources(struct rxe_qp *qp)
    116{
    117	if (qp->resp.resources) {
    118		int i;
    119
    120		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
    121			struct resp_res *res = &qp->resp.resources[i];
    122
    123			free_rd_atomic_resource(qp, res);
    124		}
    125		kfree(qp->resp.resources);
    126		qp->resp.resources = NULL;
    127	}
    128}
    129
    130void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
    131{
    132	if (res->type == RXE_ATOMIC_MASK)
    133		kfree_skb(res->atomic.skb);
    134	res->type = 0;
    135}
    136
    137static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
    138{
    139	int i;
    140	struct resp_res *res;
    141
    142	if (qp->resp.resources) {
    143		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
    144			res = &qp->resp.resources[i];
    145			free_rd_atomic_resource(qp, res);
    146		}
    147	}
    148}
    149
    150static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
    151			     struct ib_qp_init_attr *init)
    152{
    153	struct rxe_port *port;
    154	u32 qpn;
    155
    156	qp->sq_sig_type		= init->sq_sig_type;
    157	qp->attr.path_mtu	= 1;
    158	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
    159
    160	qpn			= qp->elem.index;
    161	port			= &rxe->port;
    162
    163	switch (init->qp_type) {
    164	case IB_QPT_GSI:
    165		qp->ibqp.qp_num		= 1;
    166		port->qp_gsi_index	= qpn;
    167		qp->attr.port_num	= init->port_num;
    168		break;
    169
    170	default:
    171		qp->ibqp.qp_num		= qpn;
    172		break;
    173	}
    174
    175	spin_lock_init(&qp->state_lock);
    176
    177	atomic_set(&qp->ssn, 0);
    178	atomic_set(&qp->skb_out, 0);
    179}
    180
    181static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
    182			   struct ib_qp_init_attr *init, struct ib_udata *udata,
    183			   struct rxe_create_qp_resp __user *uresp)
    184{
    185	int err;
    186	int wqe_size;
    187	enum queue_type type;
    188
    189	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
    190	if (err < 0)
    191		return err;
    192	qp->sk->sk->sk_user_data = qp;
    193
    194	/* pick a source UDP port number for this QP based on
    195	 * the source QPN. this spreads traffic for different QPs
    196	 * across different NIC RX queues (while using a single
    197	 * flow for a given QP to maintain packet order).
    198	 * the port number must be in the Dynamic Ports range
    199	 * (0xc000 - 0xffff).
    200	 */
    201	qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
    202	qp->sq.max_wr		= init->cap.max_send_wr;
    203
    204	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
    205	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
    206			 init->cap.max_inline_data);
    207	qp->sq.max_sge = init->cap.max_send_sge =
    208		wqe_size / sizeof(struct ib_sge);
    209	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
    210	wqe_size += sizeof(struct rxe_send_wqe);
    211
    212	type = QUEUE_TYPE_FROM_CLIENT;
    213	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
    214				wqe_size, type);
    215	if (!qp->sq.queue)
    216		return -ENOMEM;
    217
    218	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
    219			   qp->sq.queue->buf, qp->sq.queue->buf_size,
    220			   &qp->sq.queue->ip);
    221
    222	if (err) {
    223		vfree(qp->sq.queue->buf);
    224		kfree(qp->sq.queue);
    225		qp->sq.queue = NULL;
    226		return err;
    227	}
    228
    229	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
    230					       QUEUE_TYPE_FROM_CLIENT);
    231
    232	qp->req.state		= QP_STATE_RESET;
    233	qp->req.opcode		= -1;
    234	qp->comp.opcode		= -1;
    235
    236	spin_lock_init(&qp->sq.sq_lock);
    237	skb_queue_head_init(&qp->req_pkts);
    238
    239	rxe_init_task(rxe, &qp->req.task, qp,
    240		      rxe_requester, "req");
    241	rxe_init_task(rxe, &qp->comp.task, qp,
    242		      rxe_completer, "comp");
    243
    244	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
    245	if (init->qp_type == IB_QPT_RC) {
    246		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
    247		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
    248	}
    249	return 0;
    250}
    251
    252static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
    253			    struct ib_qp_init_attr *init,
    254			    struct ib_udata *udata,
    255			    struct rxe_create_qp_resp __user *uresp)
    256{
    257	int err;
    258	int wqe_size;
    259	enum queue_type type;
    260
    261	if (!qp->srq) {
    262		qp->rq.max_wr		= init->cap.max_recv_wr;
    263		qp->rq.max_sge		= init->cap.max_recv_sge;
    264
    265		wqe_size = rcv_wqe_size(qp->rq.max_sge);
    266
    267		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
    268			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
    269
    270		type = QUEUE_TYPE_FROM_CLIENT;
    271		qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
    272					wqe_size, type);
    273		if (!qp->rq.queue)
    274			return -ENOMEM;
    275
    276		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
    277				   qp->rq.queue->buf, qp->rq.queue->buf_size,
    278				   &qp->rq.queue->ip);
    279		if (err) {
    280			vfree(qp->rq.queue->buf);
    281			kfree(qp->rq.queue);
    282			qp->rq.queue = NULL;
    283			return err;
    284		}
    285	}
    286
    287	spin_lock_init(&qp->rq.producer_lock);
    288	spin_lock_init(&qp->rq.consumer_lock);
    289
    290	skb_queue_head_init(&qp->resp_pkts);
    291
    292	rxe_init_task(rxe, &qp->resp.task, qp,
    293		      rxe_responder, "resp");
    294
    295	qp->resp.opcode		= OPCODE_NONE;
    296	qp->resp.msn		= 0;
    297	qp->resp.state		= QP_STATE_RESET;
    298
    299	return 0;
    300}
    301
    302/* called by the create qp verb */
    303int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
    304		     struct ib_qp_init_attr *init,
    305		     struct rxe_create_qp_resp __user *uresp,
    306		     struct ib_pd *ibpd,
    307		     struct ib_udata *udata)
    308{
    309	int err;
    310	struct rxe_cq *rcq = to_rcq(init->recv_cq);
    311	struct rxe_cq *scq = to_rcq(init->send_cq);
    312	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
    313
    314	rxe_get(pd);
    315	rxe_get(rcq);
    316	rxe_get(scq);
    317	if (srq)
    318		rxe_get(srq);
    319
    320	qp->pd			= pd;
    321	qp->rcq			= rcq;
    322	qp->scq			= scq;
    323	qp->srq			= srq;
    324
    325	atomic_inc(&rcq->num_wq);
    326	atomic_inc(&scq->num_wq);
    327
    328	rxe_qp_init_misc(rxe, qp, init);
    329
    330	err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
    331	if (err)
    332		goto err1;
    333
    334	err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
    335	if (err)
    336		goto err2;
    337
    338	qp->attr.qp_state = IB_QPS_RESET;
    339	qp->valid = 1;
    340
    341	return 0;
    342
    343err2:
    344	rxe_queue_cleanup(qp->sq.queue);
    345	qp->sq.queue = NULL;
    346err1:
    347	atomic_dec(&rcq->num_wq);
    348	atomic_dec(&scq->num_wq);
    349
    350	qp->pd = NULL;
    351	qp->rcq = NULL;
    352	qp->scq = NULL;
    353	qp->srq = NULL;
    354
    355	if (srq)
    356		rxe_put(srq);
    357	rxe_put(scq);
    358	rxe_put(rcq);
    359	rxe_put(pd);
    360
    361	return err;
    362}
    363
    364/* called by the query qp verb */
    365int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
    366{
    367	init->event_handler		= qp->ibqp.event_handler;
    368	init->qp_context		= qp->ibqp.qp_context;
    369	init->send_cq			= qp->ibqp.send_cq;
    370	init->recv_cq			= qp->ibqp.recv_cq;
    371	init->srq			= qp->ibqp.srq;
    372
    373	init->cap.max_send_wr		= qp->sq.max_wr;
    374	init->cap.max_send_sge		= qp->sq.max_sge;
    375	init->cap.max_inline_data	= qp->sq.max_inline;
    376
    377	if (!qp->srq) {
    378		init->cap.max_recv_wr		= qp->rq.max_wr;
    379		init->cap.max_recv_sge		= qp->rq.max_sge;
    380	}
    381
    382	init->sq_sig_type		= qp->sq_sig_type;
    383
    384	init->qp_type			= qp->ibqp.qp_type;
    385	init->port_num			= 1;
    386
    387	return 0;
    388}
    389
    390/* called by the modify qp verb, this routine checks all the parameters before
    391 * making any changes
    392 */
    393int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
    394		    struct ib_qp_attr *attr, int mask)
    395{
    396	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
    397					attr->cur_qp_state : qp->attr.qp_state;
    398	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
    399					attr->qp_state : cur_state;
    400
    401	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
    402		pr_warn("invalid mask or state for qp\n");
    403		goto err1;
    404	}
    405
    406	if (mask & IB_QP_STATE) {
    407		if (cur_state == IB_QPS_SQD) {
    408			if (qp->req.state == QP_STATE_DRAIN &&
    409			    new_state != IB_QPS_ERR)
    410				goto err1;
    411		}
    412	}
    413
    414	if (mask & IB_QP_PORT) {
    415		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
    416			pr_warn("invalid port %d\n", attr->port_num);
    417			goto err1;
    418		}
    419	}
    420
    421	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
    422		goto err1;
    423
    424	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
    425		goto err1;
    426
    427	if (mask & IB_QP_ALT_PATH) {
    428		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
    429			goto err1;
    430		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
    431			pr_warn("invalid alt port %d\n", attr->alt_port_num);
    432			goto err1;
    433		}
    434		if (attr->alt_timeout > 31) {
    435			pr_warn("invalid QP alt timeout %d > 31\n",
    436				attr->alt_timeout);
    437			goto err1;
    438		}
    439	}
    440
    441	if (mask & IB_QP_PATH_MTU) {
    442		struct rxe_port *port = &rxe->port;
    443
    444		enum ib_mtu max_mtu = port->attr.max_mtu;
    445		enum ib_mtu mtu = attr->path_mtu;
    446
    447		if (mtu > max_mtu) {
    448			pr_debug("invalid mtu (%d) > (%d)\n",
    449				 ib_mtu_enum_to_int(mtu),
    450				 ib_mtu_enum_to_int(max_mtu));
    451			goto err1;
    452		}
    453	}
    454
    455	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
    456		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
    457			pr_warn("invalid max_rd_atomic %d > %d\n",
    458				attr->max_rd_atomic,
    459				rxe->attr.max_qp_rd_atom);
    460			goto err1;
    461		}
    462	}
    463
    464	if (mask & IB_QP_TIMEOUT) {
    465		if (attr->timeout > 31) {
    466			pr_warn("invalid QP timeout %d > 31\n",
    467				attr->timeout);
    468			goto err1;
    469		}
    470	}
    471
    472	return 0;
    473
    474err1:
    475	return -EINVAL;
    476}
    477
    478/* move the qp to the reset state */
    479static void rxe_qp_reset(struct rxe_qp *qp)
    480{
    481	/* stop tasks from running */
    482	rxe_disable_task(&qp->resp.task);
    483
    484	/* stop request/comp */
    485	if (qp->sq.queue) {
    486		if (qp_type(qp) == IB_QPT_RC)
    487			rxe_disable_task(&qp->comp.task);
    488		rxe_disable_task(&qp->req.task);
    489	}
    490
    491	/* move qp to the reset state */
    492	qp->req.state = QP_STATE_RESET;
    493	qp->resp.state = QP_STATE_RESET;
    494
    495	/* let state machines reset themselves drain work and packet queues
    496	 * etc.
    497	 */
    498	__rxe_do_task(&qp->resp.task);
    499
    500	if (qp->sq.queue) {
    501		__rxe_do_task(&qp->comp.task);
    502		__rxe_do_task(&qp->req.task);
    503		rxe_queue_reset(qp->sq.queue);
    504	}
    505
    506	/* cleanup attributes */
    507	atomic_set(&qp->ssn, 0);
    508	qp->req.opcode = -1;
    509	qp->req.need_retry = 0;
    510	qp->req.noack_pkts = 0;
    511	qp->resp.msn = 0;
    512	qp->resp.opcode = -1;
    513	qp->resp.drop_msg = 0;
    514	qp->resp.goto_error = 0;
    515	qp->resp.sent_psn_nak = 0;
    516
    517	if (qp->resp.mr) {
    518		rxe_put(qp->resp.mr);
    519		qp->resp.mr = NULL;
    520	}
    521
    522	cleanup_rd_atomic_resources(qp);
    523
    524	/* reenable tasks */
    525	rxe_enable_task(&qp->resp.task);
    526
    527	if (qp->sq.queue) {
    528		if (qp_type(qp) == IB_QPT_RC)
    529			rxe_enable_task(&qp->comp.task);
    530
    531		rxe_enable_task(&qp->req.task);
    532	}
    533}
    534
    535/* drain the send queue */
    536static void rxe_qp_drain(struct rxe_qp *qp)
    537{
    538	if (qp->sq.queue) {
    539		if (qp->req.state != QP_STATE_DRAINED) {
    540			qp->req.state = QP_STATE_DRAIN;
    541			if (qp_type(qp) == IB_QPT_RC)
    542				rxe_run_task(&qp->comp.task, 1);
    543			else
    544				__rxe_do_task(&qp->comp.task);
    545			rxe_run_task(&qp->req.task, 1);
    546		}
    547	}
    548}
    549
    550/* move the qp to the error state */
    551void rxe_qp_error(struct rxe_qp *qp)
    552{
    553	qp->req.state = QP_STATE_ERROR;
    554	qp->resp.state = QP_STATE_ERROR;
    555	qp->attr.qp_state = IB_QPS_ERR;
    556
    557	/* drain work and packet queues */
    558	rxe_run_task(&qp->resp.task, 1);
    559
    560	if (qp_type(qp) == IB_QPT_RC)
    561		rxe_run_task(&qp->comp.task, 1);
    562	else
    563		__rxe_do_task(&qp->comp.task);
    564	rxe_run_task(&qp->req.task, 1);
    565}
    566
    567/* called by the modify qp verb */
    568int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
    569		     struct ib_udata *udata)
    570{
    571	int err;
    572
    573	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
    574		int max_rd_atomic = attr->max_rd_atomic ?
    575			roundup_pow_of_two(attr->max_rd_atomic) : 0;
    576
    577		qp->attr.max_rd_atomic = max_rd_atomic;
    578		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
    579	}
    580
    581	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
    582		int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
    583			roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
    584
    585		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
    586
    587		free_rd_atomic_resources(qp);
    588
    589		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
    590		if (err)
    591			return err;
    592	}
    593
    594	if (mask & IB_QP_CUR_STATE)
    595		qp->attr.cur_qp_state = attr->qp_state;
    596
    597	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
    598		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
    599
    600	if (mask & IB_QP_ACCESS_FLAGS)
    601		qp->attr.qp_access_flags = attr->qp_access_flags;
    602
    603	if (mask & IB_QP_PKEY_INDEX)
    604		qp->attr.pkey_index = attr->pkey_index;
    605
    606	if (mask & IB_QP_PORT)
    607		qp->attr.port_num = attr->port_num;
    608
    609	if (mask & IB_QP_QKEY)
    610		qp->attr.qkey = attr->qkey;
    611
    612	if (mask & IB_QP_AV)
    613		rxe_init_av(&attr->ah_attr, &qp->pri_av);
    614
    615	if (mask & IB_QP_ALT_PATH) {
    616		rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
    617		qp->attr.alt_port_num = attr->alt_port_num;
    618		qp->attr.alt_pkey_index = attr->alt_pkey_index;
    619		qp->attr.alt_timeout = attr->alt_timeout;
    620	}
    621
    622	if (mask & IB_QP_PATH_MTU) {
    623		qp->attr.path_mtu = attr->path_mtu;
    624		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
    625	}
    626
    627	if (mask & IB_QP_TIMEOUT) {
    628		qp->attr.timeout = attr->timeout;
    629		if (attr->timeout == 0) {
    630			qp->qp_timeout_jiffies = 0;
    631		} else {
    632			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
    633			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
    634
    635			qp->qp_timeout_jiffies = j ? j : 1;
    636		}
    637	}
    638
    639	if (mask & IB_QP_RETRY_CNT) {
    640		qp->attr.retry_cnt = attr->retry_cnt;
    641		qp->comp.retry_cnt = attr->retry_cnt;
    642		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
    643			 attr->retry_cnt);
    644	}
    645
    646	if (mask & IB_QP_RNR_RETRY) {
    647		qp->attr.rnr_retry = attr->rnr_retry;
    648		qp->comp.rnr_retry = attr->rnr_retry;
    649		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
    650			 attr->rnr_retry);
    651	}
    652
    653	if (mask & IB_QP_RQ_PSN) {
    654		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
    655		qp->resp.psn = qp->attr.rq_psn;
    656		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
    657			 qp->resp.psn);
    658	}
    659
    660	if (mask & IB_QP_MIN_RNR_TIMER) {
    661		qp->attr.min_rnr_timer = attr->min_rnr_timer;
    662		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
    663			 attr->min_rnr_timer);
    664	}
    665
    666	if (mask & IB_QP_SQ_PSN) {
    667		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
    668		qp->req.psn = qp->attr.sq_psn;
    669		qp->comp.psn = qp->attr.sq_psn;
    670		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
    671	}
    672
    673	if (mask & IB_QP_PATH_MIG_STATE)
    674		qp->attr.path_mig_state = attr->path_mig_state;
    675
    676	if (mask & IB_QP_DEST_QPN)
    677		qp->attr.dest_qp_num = attr->dest_qp_num;
    678
    679	if (mask & IB_QP_STATE) {
    680		qp->attr.qp_state = attr->qp_state;
    681
    682		switch (attr->qp_state) {
    683		case IB_QPS_RESET:
    684			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
    685			rxe_qp_reset(qp);
    686			break;
    687
    688		case IB_QPS_INIT:
    689			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
    690			qp->req.state = QP_STATE_INIT;
    691			qp->resp.state = QP_STATE_INIT;
    692			break;
    693
    694		case IB_QPS_RTR:
    695			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
    696			qp->resp.state = QP_STATE_READY;
    697			break;
    698
    699		case IB_QPS_RTS:
    700			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
    701			qp->req.state = QP_STATE_READY;
    702			break;
    703
    704		case IB_QPS_SQD:
    705			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
    706			rxe_qp_drain(qp);
    707			break;
    708
    709		case IB_QPS_SQE:
    710			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
    711			/* Not possible from modify_qp. */
    712			break;
    713
    714		case IB_QPS_ERR:
    715			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
    716			rxe_qp_error(qp);
    717			break;
    718		}
    719	}
    720
    721	return 0;
    722}
    723
    724/* called by the query qp verb */
    725int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
    726{
    727	*attr = qp->attr;
    728
    729	attr->rq_psn				= qp->resp.psn;
    730	attr->sq_psn				= qp->req.psn;
    731
    732	attr->cap.max_send_wr			= qp->sq.max_wr;
    733	attr->cap.max_send_sge			= qp->sq.max_sge;
    734	attr->cap.max_inline_data		= qp->sq.max_inline;
    735
    736	if (!qp->srq) {
    737		attr->cap.max_recv_wr		= qp->rq.max_wr;
    738		attr->cap.max_recv_sge		= qp->rq.max_sge;
    739	}
    740
    741	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
    742	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
    743
    744	if (qp->req.state == QP_STATE_DRAIN) {
    745		attr->sq_draining = 1;
    746		/* applications that get this state
    747		 * typically spin on it. yield the
    748		 * processor
    749		 */
    750		cond_resched();
    751	} else {
    752		attr->sq_draining = 0;
    753	}
    754
    755	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
    756
    757	return 0;
    758}
    759
    760int rxe_qp_chk_destroy(struct rxe_qp *qp)
    761{
    762	/* See IBA o10-2.2.3
    763	 * An attempt to destroy a QP while attached to a mcast group
    764	 * will fail immediately.
    765	 */
    766	if (atomic_read(&qp->mcg_num)) {
    767		pr_debug("Attempt to destroy QP while attached to multicast group\n");
    768		return -EBUSY;
    769	}
    770
    771	return 0;
    772}
    773
    774/* called when the last reference to the qp is dropped */
    775static void rxe_qp_do_cleanup(struct work_struct *work)
    776{
    777	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
    778
    779	qp->valid = 0;
    780	qp->qp_timeout_jiffies = 0;
    781	rxe_cleanup_task(&qp->resp.task);
    782
    783	if (qp_type(qp) == IB_QPT_RC) {
    784		del_timer_sync(&qp->retrans_timer);
    785		del_timer_sync(&qp->rnr_nak_timer);
    786	}
    787
    788	rxe_cleanup_task(&qp->req.task);
    789	rxe_cleanup_task(&qp->comp.task);
    790
    791	/* flush out any receive wr's or pending requests */
    792	__rxe_do_task(&qp->req.task);
    793	if (qp->sq.queue) {
    794		__rxe_do_task(&qp->comp.task);
    795		__rxe_do_task(&qp->req.task);
    796	}
    797
    798	if (qp->sq.queue)
    799		rxe_queue_cleanup(qp->sq.queue);
    800
    801	if (qp->srq)
    802		rxe_put(qp->srq);
    803
    804	if (qp->rq.queue)
    805		rxe_queue_cleanup(qp->rq.queue);
    806
    807	atomic_dec(&qp->scq->num_wq);
    808	if (qp->scq)
    809		rxe_put(qp->scq);
    810
    811	atomic_dec(&qp->rcq->num_wq);
    812	if (qp->rcq)
    813		rxe_put(qp->rcq);
    814
    815	if (qp->pd)
    816		rxe_put(qp->pd);
    817
    818	if (qp->resp.mr)
    819		rxe_put(qp->resp.mr);
    820
    821	if (qp_type(qp) == IB_QPT_RC)
    822		sk_dst_reset(qp->sk->sk);
    823
    824	free_rd_atomic_resources(qp);
    825
    826	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
    827	sock_release(qp->sk);
    828}
    829
    830/* called when the last reference to the qp is dropped */
    831void rxe_qp_cleanup(struct rxe_pool_elem *elem)
    832{
    833	struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
    834
    835	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
    836}