cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rxe_mw.c (7111B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2020 Hewlett Packard Enterprise, Inc. All rights reserved.
      4 */
      5
      6/*
      7 * The rdma_rxe driver supports type 1 or type 2B memory windows.
      8 * Type 1 MWs are created by ibv_alloc_mw() verbs calls and bound by
      9 * ibv_bind_mw() calls. Type 2 MWs are also created by ibv_alloc_mw()
     10 * but bound by bind_mw work requests. The ibv_bind_mw() call is converted
     11 * by libibverbs to a bind_mw work request.
     12 */
     13
     14#include "rxe.h"
     15
     16int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
     17{
     18	struct rxe_mw *mw = to_rmw(ibmw);
     19	struct rxe_pd *pd = to_rpd(ibmw->pd);
     20	struct rxe_dev *rxe = to_rdev(ibmw->device);
     21	int ret;
     22
     23	rxe_get(pd);
     24
     25	ret = rxe_add_to_pool(&rxe->mw_pool, mw);
     26	if (ret) {
     27		rxe_put(pd);
     28		return ret;
     29	}
     30
     31	mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
     32	mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
     33			RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
     34	spin_lock_init(&mw->lock);
     35
     36	return 0;
     37}
     38
     39int rxe_dealloc_mw(struct ib_mw *ibmw)
     40{
     41	struct rxe_mw *mw = to_rmw(ibmw);
     42
     43	rxe_put(mw);
     44
     45	return 0;
     46}
     47
     48static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
     49			 struct rxe_mw *mw, struct rxe_mr *mr)
     50{
     51	u32 key = wqe->wr.wr.mw.rkey & 0xff;
     52
     53	if (mw->ibmw.type == IB_MW_TYPE_1) {
     54		if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
     55			pr_err_once(
     56				"attempt to bind a type 1 MW not in the valid state\n");
     57			return -EINVAL;
     58		}
     59
     60		/* o10-36.2.2 */
     61		if (unlikely((mw->access & IB_ZERO_BASED))) {
     62			pr_err_once("attempt to bind a zero based type 1 MW\n");
     63			return -EINVAL;
     64		}
     65	}
     66
     67	if (mw->ibmw.type == IB_MW_TYPE_2) {
     68		/* o10-37.2.30 */
     69		if (unlikely(mw->state != RXE_MW_STATE_FREE)) {
     70			pr_err_once(
     71				"attempt to bind a type 2 MW not in the free state\n");
     72			return -EINVAL;
     73		}
     74
     75		/* C10-72 */
     76		if (unlikely(qp->pd != to_rpd(mw->ibmw.pd))) {
     77			pr_err_once(
     78				"attempt to bind type 2 MW with qp with different PD\n");
     79			return -EINVAL;
     80		}
     81
     82		/* o10-37.2.40 */
     83		if (unlikely(!mr || wqe->wr.wr.mw.length == 0)) {
     84			pr_err_once(
     85				"attempt to invalidate type 2 MW by binding with NULL or zero length MR\n");
     86			return -EINVAL;
     87		}
     88	}
     89
     90	if (unlikely(key == (mw->rkey & 0xff))) {
     91		pr_err_once("attempt to bind MW with same key\n");
     92		return -EINVAL;
     93	}
     94
     95	/* remaining checks only apply to a nonzero MR */
     96	if (!mr)
     97		return 0;
     98
     99	if (unlikely(mr->access & IB_ZERO_BASED)) {
    100		pr_err_once("attempt to bind MW to zero based MR\n");
    101		return -EINVAL;
    102	}
    103
    104	/* C10-73 */
    105	if (unlikely(!(mr->access & IB_ACCESS_MW_BIND))) {
    106		pr_err_once(
    107			"attempt to bind an MW to an MR without bind access\n");
    108		return -EINVAL;
    109	}
    110
    111	/* C10-74 */
    112	if (unlikely((mw->access &
    113		      (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC)) &&
    114		     !(mr->access & IB_ACCESS_LOCAL_WRITE))) {
    115		pr_err_once(
    116			"attempt to bind an writeable MW to an MR without local write access\n");
    117		return -EINVAL;
    118	}
    119
    120	/* C10-75 */
    121	if (mw->access & IB_ZERO_BASED) {
    122		if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
    123			pr_err_once(
    124				"attempt to bind a ZB MW outside of the MR\n");
    125			return -EINVAL;
    126		}
    127	} else {
    128		if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
    129			     ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
    130			      (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
    131			pr_err_once(
    132				"attempt to bind a VA MW outside of the MR\n");
    133			return -EINVAL;
    134		}
    135	}
    136
    137	return 0;
    138}
    139
    140static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
    141		      struct rxe_mw *mw, struct rxe_mr *mr)
    142{
    143	u32 key = wqe->wr.wr.mw.rkey & 0xff;
    144
    145	mw->rkey = (mw->rkey & ~0xff) | key;
    146	mw->access = wqe->wr.wr.mw.access;
    147	mw->state = RXE_MW_STATE_VALID;
    148	mw->addr = wqe->wr.wr.mw.addr;
    149	mw->length = wqe->wr.wr.mw.length;
    150
    151	if (mw->mr) {
    152		rxe_put(mw->mr);
    153		atomic_dec(&mw->mr->num_mw);
    154		mw->mr = NULL;
    155	}
    156
    157	if (mw->length) {
    158		mw->mr = mr;
    159		atomic_inc(&mr->num_mw);
    160		rxe_get(mr);
    161	}
    162
    163	if (mw->ibmw.type == IB_MW_TYPE_2) {
    164		rxe_get(qp);
    165		mw->qp = qp;
    166	}
    167}
    168
    169int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
    170{
    171	int ret;
    172	struct rxe_mw *mw;
    173	struct rxe_mr *mr;
    174	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
    175	u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
    176	u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
    177
    178	mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
    179	if (unlikely(!mw)) {
    180		ret = -EINVAL;
    181		goto err;
    182	}
    183
    184	if (unlikely(mw->rkey != mw_rkey)) {
    185		ret = -EINVAL;
    186		goto err_drop_mw;
    187	}
    188
    189	if (likely(wqe->wr.wr.mw.length)) {
    190		mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
    191		if (unlikely(!mr)) {
    192			ret = -EINVAL;
    193			goto err_drop_mw;
    194		}
    195
    196		if (unlikely(mr->lkey != mr_lkey)) {
    197			ret = -EINVAL;
    198			goto err_drop_mr;
    199		}
    200	} else {
    201		mr = NULL;
    202	}
    203
    204	spin_lock_bh(&mw->lock);
    205
    206	ret = rxe_check_bind_mw(qp, wqe, mw, mr);
    207	if (ret)
    208		goto err_unlock;
    209
    210	rxe_do_bind_mw(qp, wqe, mw, mr);
    211err_unlock:
    212	spin_unlock_bh(&mw->lock);
    213err_drop_mr:
    214	if (mr)
    215		rxe_put(mr);
    216err_drop_mw:
    217	rxe_put(mw);
    218err:
    219	return ret;
    220}
    221
    222static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
    223{
    224	if (unlikely(mw->state == RXE_MW_STATE_INVALID))
    225		return -EINVAL;
    226
    227	/* o10-37.2.26 */
    228	if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
    229		return -EINVAL;
    230
    231	return 0;
    232}
    233
    234static void rxe_do_invalidate_mw(struct rxe_mw *mw)
    235{
    236	struct rxe_qp *qp;
    237	struct rxe_mr *mr;
    238
    239	/* valid type 2 MW will always have a QP pointer */
    240	qp = mw->qp;
    241	mw->qp = NULL;
    242	rxe_put(qp);
    243
    244	/* valid type 2 MW will always have an MR pointer */
    245	mr = mw->mr;
    246	mw->mr = NULL;
    247	atomic_dec(&mr->num_mw);
    248	rxe_put(mr);
    249
    250	mw->access = 0;
    251	mw->addr = 0;
    252	mw->length = 0;
    253	mw->state = RXE_MW_STATE_FREE;
    254}
    255
    256int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
    257{
    258	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
    259	struct rxe_mw *mw;
    260	int ret;
    261
    262	mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
    263	if (!mw) {
    264		ret = -EINVAL;
    265		goto err;
    266	}
    267
    268	if (rkey != mw->rkey) {
    269		ret = -EINVAL;
    270		goto err_drop_ref;
    271	}
    272
    273	spin_lock_bh(&mw->lock);
    274
    275	ret = rxe_check_invalidate_mw(qp, mw);
    276	if (ret)
    277		goto err_unlock;
    278
    279	rxe_do_invalidate_mw(mw);
    280err_unlock:
    281	spin_unlock_bh(&mw->lock);
    282err_drop_ref:
    283	rxe_put(mw);
    284err:
    285	return ret;
    286}
    287
    288struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
    289{
    290	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
    291	struct rxe_pd *pd = to_rpd(qp->ibqp.pd);
    292	struct rxe_mw *mw;
    293	int index = rkey >> 8;
    294
    295	mw = rxe_pool_get_index(&rxe->mw_pool, index);
    296	if (!mw)
    297		return NULL;
    298
    299	if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
    300		     (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
    301		     (mw->length == 0) ||
    302		     (access && !(access & mw->access)) ||
    303		     mw->state != RXE_MW_STATE_VALID)) {
    304		rxe_put(mw);
    305		return NULL;
    306	}
    307
    308	return mw;
    309}
    310
    311void rxe_mw_cleanup(struct rxe_pool_elem *elem)
    312{
    313	struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
    314	struct rxe_pd *pd = to_rpd(mw->ibmw.pd);
    315
    316	rxe_put(pd);
    317
    318	if (mw->mr) {
    319		struct rxe_mr *mr = mw->mr;
    320
    321		mw->mr = NULL;
    322		atomic_dec(&mr->num_mw);
    323		rxe_put(mr);
    324	}
    325
    326	if (mw->qp) {
    327		struct rxe_qp *qp = mw->qp;
    328
    329		mw->qp = NULL;
    330		rxe_put(qp);
    331	}
    332
    333	mw->access = 0;
    334	mw->addr = 0;
    335	mw->length = 0;
    336	mw->state = RXE_MW_STATE_INVALID;
    337}