cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rw.c (21104B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2016 HGST, a Western Digital Company.
      4 */
      5#include <linux/memremap.h>
      6#include <linux/moduleparam.h>
      7#include <linux/slab.h>
      8#include <linux/pci-p2pdma.h>
      9#include <rdma/mr_pool.h>
     10#include <rdma/rw.h>
     11
     12enum {
     13	RDMA_RW_SINGLE_WR,
     14	RDMA_RW_MULTI_WR,
     15	RDMA_RW_MR,
     16	RDMA_RW_SIG_MR,
     17};
     18
     19static bool rdma_rw_force_mr;
     20module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
     21MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
     22
     23/*
     24 * Report whether memory registration should be used. Memory registration must
     25 * be used for iWarp devices because of iWARP-specific limitations. Memory
     26 * registration is also enabled if registering memory might yield better
     27 * performance than using multiple SGE entries, see rdma_rw_io_needs_mr()
     28 */
     29static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u32 port_num)
     30{
     31	if (rdma_protocol_iwarp(dev, port_num))
     32		return true;
     33	if (dev->attrs.max_sgl_rd)
     34		return true;
     35	if (unlikely(rdma_rw_force_mr))
     36		return true;
     37	return false;
     38}
     39
     40/*
     41 * Check if the device will use memory registration for this RW operation.
     42 * For RDMA READs we must use MRs on iWarp and can optionally use them as an
     43 * optimization otherwise.  Additionally we have a debug option to force usage
     44 * of MRs to help testing this code path.
     45 */
     46static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u32 port_num,
     47		enum dma_data_direction dir, int dma_nents)
     48{
     49	if (dir == DMA_FROM_DEVICE) {
     50		if (rdma_protocol_iwarp(dev, port_num))
     51			return true;
     52		if (dev->attrs.max_sgl_rd && dma_nents > dev->attrs.max_sgl_rd)
     53			return true;
     54	}
     55	if (unlikely(rdma_rw_force_mr))
     56		return true;
     57	return false;
     58}
     59
     60static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev,
     61					   bool pi_support)
     62{
     63	u32 max_pages;
     64
     65	if (pi_support)
     66		max_pages = dev->attrs.max_pi_fast_reg_page_list_len;
     67	else
     68		max_pages = dev->attrs.max_fast_reg_page_list_len;
     69
     70	/* arbitrary limit to avoid allocating gigantic resources */
     71	return min_t(u32, max_pages, 256);
     72}
     73
     74static inline int rdma_rw_inv_key(struct rdma_rw_reg_ctx *reg)
     75{
     76	int count = 0;
     77
     78	if (reg->mr->need_inval) {
     79		reg->inv_wr.opcode = IB_WR_LOCAL_INV;
     80		reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
     81		reg->inv_wr.next = &reg->reg_wr.wr;
     82		count++;
     83	} else {
     84		reg->inv_wr.next = NULL;
     85	}
     86
     87	return count;
     88}
     89
     90/* Caller must have zero-initialized *reg. */
     91static int rdma_rw_init_one_mr(struct ib_qp *qp, u32 port_num,
     92		struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
     93		u32 sg_cnt, u32 offset)
     94{
     95	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
     96						    qp->integrity_en);
     97	u32 nents = min(sg_cnt, pages_per_mr);
     98	int count = 0, ret;
     99
    100	reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
    101	if (!reg->mr)
    102		return -EAGAIN;
    103
    104	count += rdma_rw_inv_key(reg);
    105
    106	ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
    107	if (ret < 0 || ret < nents) {
    108		ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
    109		return -EINVAL;
    110	}
    111
    112	reg->reg_wr.wr.opcode = IB_WR_REG_MR;
    113	reg->reg_wr.mr = reg->mr;
    114	reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
    115	if (rdma_protocol_iwarp(qp->device, port_num))
    116		reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
    117	count++;
    118
    119	reg->sge.addr = reg->mr->iova;
    120	reg->sge.length = reg->mr->length;
    121	return count;
    122}
    123
    124static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    125		u32 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
    126		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
    127{
    128	struct rdma_rw_reg_ctx *prev = NULL;
    129	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
    130						    qp->integrity_en);
    131	int i, j, ret = 0, count = 0;
    132
    133	ctx->nr_ops = DIV_ROUND_UP(sg_cnt, pages_per_mr);
    134	ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
    135	if (!ctx->reg) {
    136		ret = -ENOMEM;
    137		goto out;
    138	}
    139
    140	for (i = 0; i < ctx->nr_ops; i++) {
    141		struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
    142		u32 nents = min(sg_cnt, pages_per_mr);
    143
    144		ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
    145				offset);
    146		if (ret < 0)
    147			goto out_free;
    148		count += ret;
    149
    150		if (prev) {
    151			if (reg->mr->need_inval)
    152				prev->wr.wr.next = &reg->inv_wr;
    153			else
    154				prev->wr.wr.next = &reg->reg_wr.wr;
    155		}
    156
    157		reg->reg_wr.wr.next = &reg->wr.wr;
    158
    159		reg->wr.wr.sg_list = &reg->sge;
    160		reg->wr.wr.num_sge = 1;
    161		reg->wr.remote_addr = remote_addr;
    162		reg->wr.rkey = rkey;
    163		if (dir == DMA_TO_DEVICE) {
    164			reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
    165		} else if (!rdma_cap_read_inv(qp->device, port_num)) {
    166			reg->wr.wr.opcode = IB_WR_RDMA_READ;
    167		} else {
    168			reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
    169			reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
    170		}
    171		count++;
    172
    173		remote_addr += reg->sge.length;
    174		sg_cnt -= nents;
    175		for (j = 0; j < nents; j++)
    176			sg = sg_next(sg);
    177		prev = reg;
    178		offset = 0;
    179	}
    180
    181	if (prev)
    182		prev->wr.wr.next = NULL;
    183
    184	ctx->type = RDMA_RW_MR;
    185	return count;
    186
    187out_free:
    188	while (--i >= 0)
    189		ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
    190	kfree(ctx->reg);
    191out:
    192	return ret;
    193}
    194
    195static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    196		struct scatterlist *sg, u32 sg_cnt, u32 offset,
    197		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
    198{
    199	u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
    200		      qp->max_read_sge;
    201	struct ib_sge *sge;
    202	u32 total_len = 0, i, j;
    203
    204	ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
    205
    206	ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
    207	if (!ctx->map.sges)
    208		goto out;
    209
    210	ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
    211	if (!ctx->map.wrs)
    212		goto out_free_sges;
    213
    214	for (i = 0; i < ctx->nr_ops; i++) {
    215		struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
    216		u32 nr_sge = min(sg_cnt, max_sge);
    217
    218		if (dir == DMA_TO_DEVICE)
    219			rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
    220		else
    221			rdma_wr->wr.opcode = IB_WR_RDMA_READ;
    222		rdma_wr->remote_addr = remote_addr + total_len;
    223		rdma_wr->rkey = rkey;
    224		rdma_wr->wr.num_sge = nr_sge;
    225		rdma_wr->wr.sg_list = sge;
    226
    227		for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
    228			sge->addr = sg_dma_address(sg) + offset;
    229			sge->length = sg_dma_len(sg) - offset;
    230			sge->lkey = qp->pd->local_dma_lkey;
    231
    232			total_len += sge->length;
    233			sge++;
    234			sg_cnt--;
    235			offset = 0;
    236		}
    237
    238		rdma_wr->wr.next = i + 1 < ctx->nr_ops ?
    239			&ctx->map.wrs[i + 1].wr : NULL;
    240	}
    241
    242	ctx->type = RDMA_RW_MULTI_WR;
    243	return ctx->nr_ops;
    244
    245out_free_sges:
    246	kfree(ctx->map.sges);
    247out:
    248	return -ENOMEM;
    249}
    250
    251static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    252		struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
    253		enum dma_data_direction dir)
    254{
    255	struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
    256
    257	ctx->nr_ops = 1;
    258
    259	ctx->single.sge.lkey = qp->pd->local_dma_lkey;
    260	ctx->single.sge.addr = sg_dma_address(sg) + offset;
    261	ctx->single.sge.length = sg_dma_len(sg) - offset;
    262
    263	memset(rdma_wr, 0, sizeof(*rdma_wr));
    264	if (dir == DMA_TO_DEVICE)
    265		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
    266	else
    267		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
    268	rdma_wr->wr.sg_list = &ctx->single.sge;
    269	rdma_wr->wr.num_sge = 1;
    270	rdma_wr->remote_addr = remote_addr;
    271	rdma_wr->rkey = rkey;
    272
    273	ctx->type = RDMA_RW_SINGLE_WR;
    274	return 1;
    275}
    276
    277static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
    278			     u32 sg_cnt, enum dma_data_direction dir)
    279{
    280	if (is_pci_p2pdma_page(sg_page(sg)))
    281		pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
    282	else
    283		ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
    284}
    285
    286static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
    287			       enum dma_data_direction dir)
    288{
    289	int nents;
    290
    291	if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
    292		if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
    293			return 0;
    294		nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
    295					  sgt->orig_nents, dir);
    296		if (!nents)
    297			return -EIO;
    298		sgt->nents = nents;
    299		return 0;
    300	}
    301	return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
    302}
    303
    304/**
    305 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
    306 * @ctx:	context to initialize
    307 * @qp:		queue pair to operate on
    308 * @port_num:	port num to which the connection is bound
    309 * @sg:		scatterlist to READ/WRITE from/to
    310 * @sg_cnt:	number of entries in @sg
    311 * @sg_offset:	current byte offset into @sg
    312 * @remote_addr:remote address to read/write (relative to @rkey)
    313 * @rkey:	remote key to operate on
    314 * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
    315 *
    316 * Returns the number of WQEs that will be needed on the workqueue if
    317 * successful, or a negative error code.
    318 */
    319int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
    320		struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
    321		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
    322{
    323	struct ib_device *dev = qp->pd->device;
    324	struct sg_table sgt = {
    325		.sgl = sg,
    326		.orig_nents = sg_cnt,
    327	};
    328	int ret;
    329
    330	ret = rdma_rw_map_sgtable(dev, &sgt, dir);
    331	if (ret)
    332		return ret;
    333	sg_cnt = sgt.nents;
    334
    335	/*
    336	 * Skip to the S/G entry that sg_offset falls into:
    337	 */
    338	for (;;) {
    339		u32 len = sg_dma_len(sg);
    340
    341		if (sg_offset < len)
    342			break;
    343
    344		sg = sg_next(sg);
    345		sg_offset -= len;
    346		sg_cnt--;
    347	}
    348
    349	ret = -EIO;
    350	if (WARN_ON_ONCE(sg_cnt == 0))
    351		goto out_unmap_sg;
    352
    353	if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
    354		ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
    355				sg_offset, remote_addr, rkey, dir);
    356	} else if (sg_cnt > 1) {
    357		ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
    358				remote_addr, rkey, dir);
    359	} else {
    360		ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
    361				remote_addr, rkey, dir);
    362	}
    363
    364	if (ret < 0)
    365		goto out_unmap_sg;
    366	return ret;
    367
    368out_unmap_sg:
    369	rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
    370	return ret;
    371}
    372EXPORT_SYMBOL(rdma_rw_ctx_init);
    373
    374/**
    375 * rdma_rw_ctx_signature_init - initialize a RW context with signature offload
    376 * @ctx:	context to initialize
    377 * @qp:		queue pair to operate on
    378 * @port_num:	port num to which the connection is bound
    379 * @sg:		scatterlist to READ/WRITE from/to
    380 * @sg_cnt:	number of entries in @sg
    381 * @prot_sg:	scatterlist to READ/WRITE protection information from/to
    382 * @prot_sg_cnt: number of entries in @prot_sg
    383 * @sig_attrs:	signature offloading algorithms
    384 * @remote_addr:remote address to read/write (relative to @rkey)
    385 * @rkey:	remote key to operate on
    386 * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
    387 *
    388 * Returns the number of WQEs that will be needed on the workqueue if
    389 * successful, or a negative error code.
    390 */
    391int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    392		u32 port_num, struct scatterlist *sg, u32 sg_cnt,
    393		struct scatterlist *prot_sg, u32 prot_sg_cnt,
    394		struct ib_sig_attrs *sig_attrs,
    395		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
    396{
    397	struct ib_device *dev = qp->pd->device;
    398	u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
    399						    qp->integrity_en);
    400	struct sg_table sgt = {
    401		.sgl = sg,
    402		.orig_nents = sg_cnt,
    403	};
    404	struct sg_table prot_sgt = {
    405		.sgl = prot_sg,
    406		.orig_nents = prot_sg_cnt,
    407	};
    408	struct ib_rdma_wr *rdma_wr;
    409	int count = 0, ret;
    410
    411	if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
    412		pr_err("SG count too large: sg_cnt=%u, prot_sg_cnt=%u, pages_per_mr=%u\n",
    413		       sg_cnt, prot_sg_cnt, pages_per_mr);
    414		return -EINVAL;
    415	}
    416
    417	ret = rdma_rw_map_sgtable(dev, &sgt, dir);
    418	if (ret)
    419		return ret;
    420
    421	if (prot_sg_cnt) {
    422		ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
    423		if (ret)
    424			goto out_unmap_sg;
    425	}
    426
    427	ctx->type = RDMA_RW_SIG_MR;
    428	ctx->nr_ops = 1;
    429	ctx->reg = kzalloc(sizeof(*ctx->reg), GFP_KERNEL);
    430	if (!ctx->reg) {
    431		ret = -ENOMEM;
    432		goto out_unmap_prot_sg;
    433	}
    434
    435	ctx->reg->mr = ib_mr_pool_get(qp, &qp->sig_mrs);
    436	if (!ctx->reg->mr) {
    437		ret = -EAGAIN;
    438		goto out_free_ctx;
    439	}
    440
    441	count += rdma_rw_inv_key(ctx->reg);
    442
    443	memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
    444
    445	ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
    446			      prot_sgt.nents, NULL, SZ_4K);
    447	if (unlikely(ret)) {
    448		pr_err("failed to map PI sg (%u)\n",
    449		       sgt.nents + prot_sgt.nents);
    450		goto out_destroy_sig_mr;
    451	}
    452
    453	ctx->reg->reg_wr.wr.opcode = IB_WR_REG_MR_INTEGRITY;
    454	ctx->reg->reg_wr.wr.wr_cqe = NULL;
    455	ctx->reg->reg_wr.wr.num_sge = 0;
    456	ctx->reg->reg_wr.wr.send_flags = 0;
    457	ctx->reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
    458	if (rdma_protocol_iwarp(qp->device, port_num))
    459		ctx->reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
    460	ctx->reg->reg_wr.mr = ctx->reg->mr;
    461	ctx->reg->reg_wr.key = ctx->reg->mr->lkey;
    462	count++;
    463
    464	ctx->reg->sge.addr = ctx->reg->mr->iova;
    465	ctx->reg->sge.length = ctx->reg->mr->length;
    466	if (sig_attrs->wire.sig_type == IB_SIG_TYPE_NONE)
    467		ctx->reg->sge.length -= ctx->reg->mr->sig_attrs->meta_length;
    468
    469	rdma_wr = &ctx->reg->wr;
    470	rdma_wr->wr.sg_list = &ctx->reg->sge;
    471	rdma_wr->wr.num_sge = 1;
    472	rdma_wr->remote_addr = remote_addr;
    473	rdma_wr->rkey = rkey;
    474	if (dir == DMA_TO_DEVICE)
    475		rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
    476	else
    477		rdma_wr->wr.opcode = IB_WR_RDMA_READ;
    478	ctx->reg->reg_wr.wr.next = &rdma_wr->wr;
    479	count++;
    480
    481	return count;
    482
    483out_destroy_sig_mr:
    484	ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
    485out_free_ctx:
    486	kfree(ctx->reg);
    487out_unmap_prot_sg:
    488	if (prot_sgt.nents)
    489		rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
    490out_unmap_sg:
    491	rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
    492	return ret;
    493}
    494EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
    495
    496/*
    497 * Now that we are going to post the WRs we can update the lkey and need_inval
    498 * state on the MRs.  If we were doing this at init time, we would get double
    499 * or missing invalidations if a context was initialized but not actually
    500 * posted.
    501 */
    502static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
    503{
    504	reg->mr->need_inval = need_inval;
    505	ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
    506	reg->reg_wr.key = reg->mr->lkey;
    507	reg->sge.lkey = reg->mr->lkey;
    508}
    509
    510/**
    511 * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
    512 * @ctx:	context to operate on
    513 * @qp:		queue pair to operate on
    514 * @port_num:	port num to which the connection is bound
    515 * @cqe:	completion queue entry for the last WR
    516 * @chain_wr:	WR to append to the posted chain
    517 *
    518 * Return the WR chain for the set of RDMA READ/WRITE operations described by
    519 * @ctx, as well as any memory registration operations needed.  If @chain_wr
    520 * is non-NULL the WR it points to will be appended to the chain of WRs posted.
    521 * If @chain_wr is not set @cqe must be set so that the caller gets a
    522 * completion notification.
    523 */
    524struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    525		u32 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
    526{
    527	struct ib_send_wr *first_wr, *last_wr;
    528	int i;
    529
    530	switch (ctx->type) {
    531	case RDMA_RW_SIG_MR:
    532	case RDMA_RW_MR:
    533		for (i = 0; i < ctx->nr_ops; i++) {
    534			rdma_rw_update_lkey(&ctx->reg[i],
    535				ctx->reg[i].wr.wr.opcode !=
    536					IB_WR_RDMA_READ_WITH_INV);
    537		}
    538
    539		if (ctx->reg[0].inv_wr.next)
    540			first_wr = &ctx->reg[0].inv_wr;
    541		else
    542			first_wr = &ctx->reg[0].reg_wr.wr;
    543		last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
    544		break;
    545	case RDMA_RW_MULTI_WR:
    546		first_wr = &ctx->map.wrs[0].wr;
    547		last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
    548		break;
    549	case RDMA_RW_SINGLE_WR:
    550		first_wr = &ctx->single.wr.wr;
    551		last_wr = &ctx->single.wr.wr;
    552		break;
    553	default:
    554		BUG();
    555	}
    556
    557	if (chain_wr) {
    558		last_wr->next = chain_wr;
    559	} else {
    560		last_wr->wr_cqe = cqe;
    561		last_wr->send_flags |= IB_SEND_SIGNALED;
    562	}
    563
    564	return first_wr;
    565}
    566EXPORT_SYMBOL(rdma_rw_ctx_wrs);
    567
    568/**
    569 * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
    570 * @ctx:	context to operate on
    571 * @qp:		queue pair to operate on
    572 * @port_num:	port num to which the connection is bound
    573 * @cqe:	completion queue entry for the last WR
    574 * @chain_wr:	WR to append to the posted chain
    575 *
    576 * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
    577 * any memory registration operations needed.  If @chain_wr is non-NULL the
    578 * WR it points to will be appended to the chain of WRs posted.  If @chain_wr
    579 * is not set @cqe must be set so that the caller gets a completion
    580 * notification.
    581 */
    582int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
    583		struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
    584{
    585	struct ib_send_wr *first_wr;
    586
    587	first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
    588	return ib_post_send(qp, first_wr, NULL);
    589}
    590EXPORT_SYMBOL(rdma_rw_ctx_post);
    591
    592/**
    593 * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
    594 * @ctx:	context to release
    595 * @qp:		queue pair to operate on
    596 * @port_num:	port num to which the connection is bound
    597 * @sg:		scatterlist that was used for the READ/WRITE
    598 * @sg_cnt:	number of entries in @sg
    599 * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
    600 */
    601void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    602			 u32 port_num, struct scatterlist *sg, u32 sg_cnt,
    603			 enum dma_data_direction dir)
    604{
    605	int i;
    606
    607	switch (ctx->type) {
    608	case RDMA_RW_MR:
    609		for (i = 0; i < ctx->nr_ops; i++)
    610			ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
    611		kfree(ctx->reg);
    612		break;
    613	case RDMA_RW_MULTI_WR:
    614		kfree(ctx->map.wrs);
    615		kfree(ctx->map.sges);
    616		break;
    617	case RDMA_RW_SINGLE_WR:
    618		break;
    619	default:
    620		BUG();
    621		break;
    622	}
    623
    624	rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
    625}
    626EXPORT_SYMBOL(rdma_rw_ctx_destroy);
    627
    628/**
    629 * rdma_rw_ctx_destroy_signature - release all resources allocated by
    630 *	rdma_rw_ctx_signature_init
    631 * @ctx:	context to release
    632 * @qp:		queue pair to operate on
    633 * @port_num:	port num to which the connection is bound
    634 * @sg:		scatterlist that was used for the READ/WRITE
    635 * @sg_cnt:	number of entries in @sg
    636 * @prot_sg:	scatterlist that was used for the READ/WRITE of the PI
    637 * @prot_sg_cnt: number of entries in @prot_sg
    638 * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
    639 */
    640void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
    641		u32 port_num, struct scatterlist *sg, u32 sg_cnt,
    642		struct scatterlist *prot_sg, u32 prot_sg_cnt,
    643		enum dma_data_direction dir)
    644{
    645	if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
    646		return;
    647
    648	ib_mr_pool_put(qp, &qp->sig_mrs, ctx->reg->mr);
    649	kfree(ctx->reg);
    650
    651	if (prot_sg_cnt)
    652		rdma_rw_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
    653	rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
    654}
    655EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
    656
    657/**
    658 * rdma_rw_mr_factor - return number of MRs required for a payload
    659 * @device:	device handling the connection
    660 * @port_num:	port num to which the connection is bound
    661 * @maxpages:	maximum payload pages per rdma_rw_ctx
    662 *
    663 * Returns the number of MRs the device requires to move @maxpayload
    664 * bytes. The returned value is used during transport creation to
    665 * compute max_rdma_ctxts and the size of the transport's Send and
    666 * Send Completion Queues.
    667 */
    668unsigned int rdma_rw_mr_factor(struct ib_device *device, u32 port_num,
    669			       unsigned int maxpages)
    670{
    671	unsigned int mr_pages;
    672
    673	if (rdma_rw_can_use_mr(device, port_num))
    674		mr_pages = rdma_rw_fr_page_list_len(device, false);
    675	else
    676		mr_pages = device->attrs.max_sge_rd;
    677	return DIV_ROUND_UP(maxpages, mr_pages);
    678}
    679EXPORT_SYMBOL(rdma_rw_mr_factor);
    680
    681void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
    682{
    683	u32 factor;
    684
    685	WARN_ON_ONCE(attr->port_num == 0);
    686
    687	/*
    688	 * Each context needs at least one RDMA READ or WRITE WR.
    689	 *
    690	 * For some hardware we might need more, eventually we should ask the
    691	 * HCA driver for a multiplier here.
    692	 */
    693	factor = 1;
    694
    695	/*
    696	 * If the devices needs MRs to perform RDMA READ or WRITE operations,
    697	 * we'll need two additional MRs for the registrations and the
    698	 * invalidation.
    699	 */
    700	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN ||
    701	    rdma_rw_can_use_mr(dev, attr->port_num))
    702		factor += 2;	/* inv + reg */
    703
    704	attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
    705
    706	/*
    707	 * But maybe we were just too high in the sky and the device doesn't
    708	 * even support all we need, and we'll have to live with what we get..
    709	 */
    710	attr->cap.max_send_wr =
    711		min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
    712}
    713
    714int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
    715{
    716	struct ib_device *dev = qp->pd->device;
    717	u32 nr_mrs = 0, nr_sig_mrs = 0, max_num_sg = 0;
    718	int ret = 0;
    719
    720	if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) {
    721		nr_sig_mrs = attr->cap.max_rdma_ctxs;
    722		nr_mrs = attr->cap.max_rdma_ctxs;
    723		max_num_sg = rdma_rw_fr_page_list_len(dev, true);
    724	} else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
    725		nr_mrs = attr->cap.max_rdma_ctxs;
    726		max_num_sg = rdma_rw_fr_page_list_len(dev, false);
    727	}
    728
    729	if (nr_mrs) {
    730		ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
    731				IB_MR_TYPE_MEM_REG,
    732				max_num_sg, 0);
    733		if (ret) {
    734			pr_err("%s: failed to allocated %u MRs\n",
    735				__func__, nr_mrs);
    736			return ret;
    737		}
    738	}
    739
    740	if (nr_sig_mrs) {
    741		ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
    742				IB_MR_TYPE_INTEGRITY, max_num_sg, max_num_sg);
    743		if (ret) {
    744			pr_err("%s: failed to allocated %u SIG MRs\n",
    745				__func__, nr_sig_mrs);
    746			goto out_free_rdma_mrs;
    747		}
    748	}
    749
    750	return 0;
    751
    752out_free_rdma_mrs:
    753	ib_mr_pool_destroy(qp, &qp->rdma_mrs);
    754	return ret;
    755}
    756
    757void rdma_rw_cleanup_mrs(struct ib_qp *qp)
    758{
    759	ib_mr_pool_destroy(qp, &qp->sig_mrs);
    760	ib_mr_pool_destroy(qp, &qp->rdma_mrs);
    761}