cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

odp.c (48575B)


      1/*
      2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <rdma/ib_umem.h>
     34#include <rdma/ib_umem_odp.h>
     35#include <linux/kernel.h>
     36#include <linux/dma-buf.h>
     37#include <linux/dma-resv.h>
     38
     39#include "mlx5_ib.h"
     40#include "cmd.h"
     41#include "umr.h"
     42#include "qp.h"
     43
     44#include <linux/mlx5/eq.h>
     45
     46/* Contains the details of a pagefault. */
     47struct mlx5_pagefault {
     48	u32			bytes_committed;
     49	u32			token;
     50	u8			event_subtype;
     51	u8			type;
     52	union {
     53		/* Initiator or send message responder pagefault details. */
     54		struct {
     55			/* Received packet size, only valid for responders. */
     56			u32	packet_size;
     57			/*
     58			 * Number of resource holding WQE, depends on type.
     59			 */
     60			u32	wq_num;
     61			/*
     62			 * WQE index. Refers to either the send queue or
     63			 * receive queue, according to event_subtype.
     64			 */
     65			u16	wqe_index;
     66		} wqe;
     67		/* RDMA responder pagefault details */
     68		struct {
     69			u32	r_key;
     70			/*
     71			 * Received packet size, minimal size page fault
     72			 * resolution required for forward progress.
     73			 */
     74			u32	packet_size;
     75			u32	rdma_op_len;
     76			u64	rdma_va;
     77		} rdma;
     78	};
     79
     80	struct mlx5_ib_pf_eq	*eq;
     81	struct work_struct	work;
     82};
     83
     84#define MAX_PREFETCH_LEN (4*1024*1024U)
     85
     86/* Timeout in ms to wait for an active mmu notifier to complete when handling
     87 * a pagefault. */
     88#define MMU_NOTIFIER_TIMEOUT 1000
     89
     90#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
     91#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
     92#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
     93#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
     94#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
     95
     96#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
     97
     98static u64 mlx5_imr_ksm_entries;
     99
    100static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
    101			struct mlx5_ib_mr *imr, int flags)
    102{
    103	struct mlx5_klm *end = pklm + nentries;
    104
    105	if (flags & MLX5_IB_UPD_XLT_ZAP) {
    106		for (; pklm != end; pklm++, idx++) {
    107			pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
    108			pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
    109			pklm->va = 0;
    110		}
    111		return;
    112	}
    113
    114	/*
    115	 * The locking here is pretty subtle. Ideally the implicit_children
    116	 * xarray would be protected by the umem_mutex, however that is not
    117	 * possible. Instead this uses a weaker update-then-lock pattern:
    118	 *
    119	 *    xa_store()
    120	 *    mutex_lock(umem_mutex)
    121	 *     mlx5r_umr_update_xlt()
    122	 *    mutex_unlock(umem_mutex)
    123	 *    destroy lkey
    124	 *
    125	 * ie any change the xarray must be followed by the locked update_xlt
    126	 * before destroying.
    127	 *
    128	 * The umem_mutex provides the acquire/release semantic needed to make
    129	 * the xa_store() visible to a racing thread.
    130	 */
    131	lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
    132
    133	for (; pklm != end; pklm++, idx++) {
    134		struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
    135
    136		pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
    137		if (mtt) {
    138			pklm->key = cpu_to_be32(mtt->ibmr.lkey);
    139			pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
    140		} else {
    141			pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
    142			pklm->va = 0;
    143		}
    144	}
    145}
    146
    147static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
    148{
    149	u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
    150
    151	if (umem_dma & ODP_READ_ALLOWED_BIT)
    152		mtt_entry |= MLX5_IB_MTT_READ;
    153	if (umem_dma & ODP_WRITE_ALLOWED_BIT)
    154		mtt_entry |= MLX5_IB_MTT_WRITE;
    155
    156	return mtt_entry;
    157}
    158
    159static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
    160			 struct mlx5_ib_mr *mr, int flags)
    161{
    162	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
    163	dma_addr_t pa;
    164	size_t i;
    165
    166	if (flags & MLX5_IB_UPD_XLT_ZAP)
    167		return;
    168
    169	for (i = 0; i < nentries; i++) {
    170		pa = odp->dma_list[idx + i];
    171		pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
    172	}
    173}
    174
    175void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
    176			   struct mlx5_ib_mr *mr, int flags)
    177{
    178	if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
    179		populate_klm(xlt, idx, nentries, mr, flags);
    180	} else {
    181		populate_mtt(xlt, idx, nentries, mr, flags);
    182	}
    183}
    184
    185/*
    186 * This must be called after the mr has been removed from implicit_children.
    187 * NOTE: The MR does not necessarily have to be
    188 * empty here, parallel page faults could have raced with the free process and
    189 * added pages to it.
    190 */
    191static void free_implicit_child_mr_work(struct work_struct *work)
    192{
    193	struct mlx5_ib_mr *mr =
    194		container_of(work, struct mlx5_ib_mr, odp_destroy.work);
    195	struct mlx5_ib_mr *imr = mr->parent;
    196	struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
    197	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
    198
    199	mlx5r_deref_wait_odp_mkey(&mr->mmkey);
    200
    201	mutex_lock(&odp_imr->umem_mutex);
    202	mlx5r_umr_update_xlt(mr->parent,
    203			     ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
    204			     MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
    205	mutex_unlock(&odp_imr->umem_mutex);
    206	mlx5_ib_dereg_mr(&mr->ibmr, NULL);
    207
    208	mlx5r_deref_odp_mkey(&imr->mmkey);
    209}
    210
    211static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
    212{
    213	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
    214	unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
    215	struct mlx5_ib_mr *imr = mr->parent;
    216
    217	if (!refcount_inc_not_zero(&imr->mmkey.usecount))
    218		return;
    219
    220	xa_erase(&imr->implicit_children, idx);
    221
    222	/* Freeing a MR is a sleeping operation, so bounce to a work queue */
    223	INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
    224	queue_work(system_unbound_wq, &mr->odp_destroy.work);
    225}
    226
    227static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
    228				     const struct mmu_notifier_range *range,
    229				     unsigned long cur_seq)
    230{
    231	struct ib_umem_odp *umem_odp =
    232		container_of(mni, struct ib_umem_odp, notifier);
    233	struct mlx5_ib_mr *mr;
    234	const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
    235				    sizeof(struct mlx5_mtt)) - 1;
    236	u64 idx = 0, blk_start_idx = 0;
    237	u64 invalidations = 0;
    238	unsigned long start;
    239	unsigned long end;
    240	int in_block = 0;
    241	u64 addr;
    242
    243	if (!mmu_notifier_range_blockable(range))
    244		return false;
    245
    246	mutex_lock(&umem_odp->umem_mutex);
    247	mmu_interval_set_seq(mni, cur_seq);
    248	/*
    249	 * If npages is zero then umem_odp->private may not be setup yet. This
    250	 * does not complete until after the first page is mapped for DMA.
    251	 */
    252	if (!umem_odp->npages)
    253		goto out;
    254	mr = umem_odp->private;
    255
    256	start = max_t(u64, ib_umem_start(umem_odp), range->start);
    257	end = min_t(u64, ib_umem_end(umem_odp), range->end);
    258
    259	/*
    260	 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
    261	 * while we are doing the invalidation, no page fault will attempt to
    262	 * overwrite the same MTTs.  Concurent invalidations might race us,
    263	 * but they will write 0s as well, so no difference in the end result.
    264	 */
    265	for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
    266		idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
    267		/*
    268		 * Strive to write the MTTs in chunks, but avoid overwriting
    269		 * non-existing MTTs. The huristic here can be improved to
    270		 * estimate the cost of another UMR vs. the cost of bigger
    271		 * UMR.
    272		 */
    273		if (umem_odp->dma_list[idx] &
    274		    (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
    275			if (!in_block) {
    276				blk_start_idx = idx;
    277				in_block = 1;
    278			}
    279
    280			/* Count page invalidations */
    281			invalidations += idx - blk_start_idx + 1;
    282		} else {
    283			u64 umr_offset = idx & umr_block_mask;
    284
    285			if (in_block && umr_offset == 0) {
    286				mlx5r_umr_update_xlt(mr, blk_start_idx,
    287						     idx - blk_start_idx, 0,
    288						     MLX5_IB_UPD_XLT_ZAP |
    289						     MLX5_IB_UPD_XLT_ATOMIC);
    290				in_block = 0;
    291			}
    292		}
    293	}
    294	if (in_block)
    295		mlx5r_umr_update_xlt(mr, blk_start_idx,
    296				     idx - blk_start_idx + 1, 0,
    297				     MLX5_IB_UPD_XLT_ZAP |
    298				     MLX5_IB_UPD_XLT_ATOMIC);
    299
    300	mlx5_update_odp_stats(mr, invalidations, invalidations);
    301
    302	/*
    303	 * We are now sure that the device will not access the
    304	 * memory. We can safely unmap it, and mark it as dirty if
    305	 * needed.
    306	 */
    307
    308	ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
    309
    310	if (unlikely(!umem_odp->npages && mr->parent))
    311		destroy_unused_implicit_child_mr(mr);
    312out:
    313	mutex_unlock(&umem_odp->umem_mutex);
    314	return true;
    315}
    316
    317const struct mmu_interval_notifier_ops mlx5_mn_ops = {
    318	.invalidate = mlx5_ib_invalidate_range,
    319};
    320
    321static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
    322{
    323	struct ib_odp_caps *caps = &dev->odp_caps;
    324
    325	memset(caps, 0, sizeof(*caps));
    326
    327	if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
    328		return;
    329
    330	caps->general_caps = IB_ODP_SUPPORT;
    331
    332	if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
    333		dev->odp_max_size = U64_MAX;
    334	else
    335		dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
    336
    337	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
    338		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
    339
    340	if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
    341		caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
    342
    343	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
    344		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
    345
    346	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
    347		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
    348
    349	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
    350		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
    351
    352	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
    353		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
    354
    355	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
    356		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
    357
    358	if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
    359		caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
    360
    361	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
    362		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
    363
    364	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
    365		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
    366
    367	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
    368		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
    369
    370	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
    371		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
    372
    373	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
    374		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
    375
    376	if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
    377		caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
    378
    379	if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
    380	    MLX5_CAP_GEN(dev->mdev, null_mkey) &&
    381	    MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
    382	    !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
    383		caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
    384}
    385
    386static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
    387				      struct mlx5_pagefault *pfault,
    388				      int error)
    389{
    390	int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
    391		     pfault->wqe.wq_num : pfault->token;
    392	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
    393	int err;
    394
    395	MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
    396	MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
    397	MLX5_SET(page_fault_resume_in, in, token, pfault->token);
    398	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
    399	MLX5_SET(page_fault_resume_in, in, error, !!error);
    400
    401	err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
    402	if (err)
    403		mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
    404			    wq_num, err);
    405}
    406
    407static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
    408						unsigned long idx)
    409{
    410	struct mlx5_ib_dev *dev = mr_to_mdev(imr);
    411	struct ib_umem_odp *odp;
    412	struct mlx5_ib_mr *mr;
    413	struct mlx5_ib_mr *ret;
    414	int err;
    415
    416	odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
    417				      idx * MLX5_IMR_MTT_SIZE,
    418				      MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
    419	if (IS_ERR(odp))
    420		return ERR_CAST(odp);
    421
    422	mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
    423				 imr->access_flags);
    424	if (IS_ERR(mr)) {
    425		ib_umem_odp_release(odp);
    426		return mr;
    427	}
    428
    429	mr->access_flags = imr->access_flags;
    430	mr->ibmr.pd = imr->ibmr.pd;
    431	mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
    432	mr->umem = &odp->umem;
    433	mr->ibmr.lkey = mr->mmkey.key;
    434	mr->ibmr.rkey = mr->mmkey.key;
    435	mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
    436	mr->parent = imr;
    437	odp->private = mr;
    438
    439	/*
    440	 * First refcount is owned by the xarray and second refconut
    441	 * is returned to the caller.
    442	 */
    443	refcount_set(&mr->mmkey.usecount, 2);
    444
    445	err = mlx5r_umr_update_xlt(mr, 0,
    446				   MLX5_IMR_MTT_ENTRIES,
    447				   PAGE_SHIFT,
    448				   MLX5_IB_UPD_XLT_ZAP |
    449				   MLX5_IB_UPD_XLT_ENABLE);
    450	if (err) {
    451		ret = ERR_PTR(err);
    452		goto out_mr;
    453	}
    454
    455	xa_lock(&imr->implicit_children);
    456	ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
    457			   GFP_KERNEL);
    458	if (unlikely(ret)) {
    459		if (xa_is_err(ret)) {
    460			ret = ERR_PTR(xa_err(ret));
    461			goto out_lock;
    462		}
    463		/*
    464		 * Another thread beat us to creating the child mr, use
    465		 * theirs.
    466		 */
    467		refcount_inc(&ret->mmkey.usecount);
    468		goto out_lock;
    469	}
    470	xa_unlock(&imr->implicit_children);
    471
    472	mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
    473	return mr;
    474
    475out_lock:
    476	xa_unlock(&imr->implicit_children);
    477out_mr:
    478	mlx5_ib_dereg_mr(&mr->ibmr, NULL);
    479	return ret;
    480}
    481
    482struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
    483					     int access_flags)
    484{
    485	struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
    486	struct ib_umem_odp *umem_odp;
    487	struct mlx5_ib_mr *imr;
    488	int err;
    489
    490	if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
    491		return ERR_PTR(-EOPNOTSUPP);
    492
    493	umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
    494	if (IS_ERR(umem_odp))
    495		return ERR_CAST(umem_odp);
    496
    497	imr = mlx5_mr_cache_alloc(dev,
    498				  &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
    499				  access_flags);
    500	if (IS_ERR(imr)) {
    501		ib_umem_odp_release(umem_odp);
    502		return imr;
    503	}
    504
    505	imr->access_flags = access_flags;
    506	imr->ibmr.pd = &pd->ibpd;
    507	imr->ibmr.iova = 0;
    508	imr->umem = &umem_odp->umem;
    509	imr->ibmr.lkey = imr->mmkey.key;
    510	imr->ibmr.rkey = imr->mmkey.key;
    511	imr->ibmr.device = &dev->ib_dev;
    512	imr->is_odp_implicit = true;
    513	xa_init(&imr->implicit_children);
    514
    515	err = mlx5r_umr_update_xlt(imr, 0,
    516				   mlx5_imr_ksm_entries,
    517				   MLX5_KSM_PAGE_SHIFT,
    518				   MLX5_IB_UPD_XLT_INDIRECT |
    519				   MLX5_IB_UPD_XLT_ZAP |
    520				   MLX5_IB_UPD_XLT_ENABLE);
    521	if (err)
    522		goto out_mr;
    523
    524	err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
    525	if (err)
    526		goto out_mr;
    527
    528	mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
    529	return imr;
    530out_mr:
    531	mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
    532	mlx5_ib_dereg_mr(&imr->ibmr, NULL);
    533	return ERR_PTR(err);
    534}
    535
    536void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
    537{
    538	struct mlx5_ib_mr *mtt;
    539	unsigned long idx;
    540
    541	/*
    542	 * If this is an implicit MR it is already invalidated so we can just
    543	 * delete the children mkeys.
    544	 */
    545	xa_for_each(&mr->implicit_children, idx, mtt) {
    546		xa_erase(&mr->implicit_children, idx);
    547		mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
    548	}
    549}
    550
    551#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
    552#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
    553#define MLX5_PF_FLAGS_ENABLE BIT(3)
    554static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
    555			     u64 user_va, size_t bcnt, u32 *bytes_mapped,
    556			     u32 flags)
    557{
    558	int page_shift, ret, np;
    559	bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
    560	u64 access_mask;
    561	u64 start_idx;
    562	bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
    563	u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
    564
    565	if (flags & MLX5_PF_FLAGS_ENABLE)
    566		xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
    567
    568	page_shift = odp->page_shift;
    569	start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
    570	access_mask = ODP_READ_ALLOWED_BIT;
    571
    572	if (odp->umem.writable && !downgrade)
    573		access_mask |= ODP_WRITE_ALLOWED_BIT;
    574
    575	np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
    576	if (np < 0)
    577		return np;
    578
    579	/*
    580	 * No need to check whether the MTTs really belong to this MR, since
    581	 * ib_umem_odp_map_dma_and_lock already checks this.
    582	 */
    583	ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
    584	mutex_unlock(&odp->umem_mutex);
    585
    586	if (ret < 0) {
    587		if (ret != -EAGAIN)
    588			mlx5_ib_err(mr_to_mdev(mr),
    589				    "Failed to update mkey page tables\n");
    590		goto out;
    591	}
    592
    593	if (bytes_mapped) {
    594		u32 new_mappings = (np << page_shift) -
    595			(user_va - round_down(user_va, 1 << page_shift));
    596
    597		*bytes_mapped += min_t(u32, new_mappings, bcnt);
    598	}
    599
    600	return np << (page_shift - PAGE_SHIFT);
    601
    602out:
    603	return ret;
    604}
    605
    606static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
    607				 struct ib_umem_odp *odp_imr, u64 user_va,
    608				 size_t bcnt, u32 *bytes_mapped, u32 flags)
    609{
    610	unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
    611	unsigned long upd_start_idx = end_idx + 1;
    612	unsigned long upd_len = 0;
    613	unsigned long npages = 0;
    614	int err;
    615	int ret;
    616
    617	if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
    618		     mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
    619		return -EFAULT;
    620
    621	/* Fault each child mr that intersects with our interval. */
    622	while (bcnt) {
    623		unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
    624		struct ib_umem_odp *umem_odp;
    625		struct mlx5_ib_mr *mtt;
    626		u64 len;
    627
    628		xa_lock(&imr->implicit_children);
    629		mtt = xa_load(&imr->implicit_children, idx);
    630		if (unlikely(!mtt)) {
    631			xa_unlock(&imr->implicit_children);
    632			mtt = implicit_get_child_mr(imr, idx);
    633			if (IS_ERR(mtt)) {
    634				ret = PTR_ERR(mtt);
    635				goto out;
    636			}
    637			upd_start_idx = min(upd_start_idx, idx);
    638			upd_len = idx - upd_start_idx + 1;
    639		} else {
    640			refcount_inc(&mtt->mmkey.usecount);
    641			xa_unlock(&imr->implicit_children);
    642		}
    643
    644		umem_odp = to_ib_umem_odp(mtt->umem);
    645		len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
    646		      user_va;
    647
    648		ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
    649					bytes_mapped, flags);
    650
    651		mlx5r_deref_odp_mkey(&mtt->mmkey);
    652
    653		if (ret < 0)
    654			goto out;
    655		user_va += len;
    656		bcnt -= len;
    657		npages += ret;
    658	}
    659
    660	ret = npages;
    661
    662	/*
    663	 * Any time the implicit_children are changed we must perform an
    664	 * update of the xlt before exiting to ensure the HW and the
    665	 * implicit_children remains synchronized.
    666	 */
    667out:
    668	if (likely(!upd_len))
    669		return ret;
    670
    671	/*
    672	 * Notice this is not strictly ordered right, the KSM is updated after
    673	 * the implicit_children is updated, so a parallel page fault could
    674	 * see a MR that is not yet visible in the KSM.  This is similar to a
    675	 * parallel page fault seeing a MR that is being concurrently removed
    676	 * from the KSM. Both of these improbable situations are resolved
    677	 * safely by resuming the HW and then taking another page fault. The
    678	 * next pagefault handler will see the new information.
    679	 */
    680	mutex_lock(&odp_imr->umem_mutex);
    681	err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
    682				   MLX5_IB_UPD_XLT_INDIRECT |
    683					  MLX5_IB_UPD_XLT_ATOMIC);
    684	mutex_unlock(&odp_imr->umem_mutex);
    685	if (err) {
    686		mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
    687		return err;
    688	}
    689	return ret;
    690}
    691
    692static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
    693			       u32 *bytes_mapped, u32 flags)
    694{
    695	struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
    696	u32 xlt_flags = 0;
    697	int err;
    698	unsigned int page_size;
    699
    700	if (flags & MLX5_PF_FLAGS_ENABLE)
    701		xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
    702
    703	dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
    704	err = ib_umem_dmabuf_map_pages(umem_dmabuf);
    705	if (err) {
    706		dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
    707		return err;
    708	}
    709
    710	page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
    711					     log_page_size, 0,
    712					     umem_dmabuf->umem.iova);
    713	if (unlikely(page_size < PAGE_SIZE)) {
    714		ib_umem_dmabuf_unmap_pages(umem_dmabuf);
    715		err = -EINVAL;
    716	} else {
    717		err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
    718	}
    719	dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
    720
    721	if (err)
    722		return err;
    723
    724	if (bytes_mapped)
    725		*bytes_mapped += bcnt;
    726
    727	return ib_umem_num_pages(mr->umem);
    728}
    729
    730/*
    731 * Returns:
    732 *  -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
    733 *           not accessible, or the MR is no longer valid.
    734 *  -EAGAIN/-ENOMEM: The operation should be retried
    735 *
    736 *  -EINVAL/others: General internal malfunction
    737 *  >0: Number of pages mapped
    738 */
    739static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
    740			u32 *bytes_mapped, u32 flags)
    741{
    742	struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
    743
    744	if (unlikely(io_virt < mr->ibmr.iova))
    745		return -EFAULT;
    746
    747	if (mr->umem->is_dmabuf)
    748		return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
    749
    750	if (!odp->is_implicit_odp) {
    751		u64 user_va;
    752
    753		if (check_add_overflow(io_virt - mr->ibmr.iova,
    754				       (u64)odp->umem.address, &user_va))
    755			return -EFAULT;
    756		if (unlikely(user_va >= ib_umem_end(odp) ||
    757			     ib_umem_end(odp) - user_va < bcnt))
    758			return -EFAULT;
    759		return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
    760					 flags);
    761	}
    762	return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
    763				     flags);
    764}
    765
    766int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
    767{
    768	int ret;
    769
    770	ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
    771				mr->umem->length, NULL,
    772				MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
    773	return ret >= 0 ? 0 : ret;
    774}
    775
    776int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
    777{
    778	int ret;
    779
    780	ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
    781				  MLX5_PF_FLAGS_ENABLE);
    782
    783	return ret >= 0 ? 0 : ret;
    784}
    785
    786struct pf_frame {
    787	struct pf_frame *next;
    788	u32 key;
    789	u64 io_virt;
    790	size_t bcnt;
    791	int depth;
    792};
    793
    794static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
    795{
    796	if (!mmkey)
    797		return false;
    798	if (mmkey->type == MLX5_MKEY_MW)
    799		return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
    800	return mmkey->key == key;
    801}
    802
    803/*
    804 * Handle a single data segment in a page-fault WQE or RDMA region.
    805 *
    806 * Returns number of OS pages retrieved on success. The caller may continue to
    807 * the next data segment.
    808 * Can return the following error codes:
    809 * -EAGAIN to designate a temporary error. The caller will abort handling the
    810 *  page fault and resolve it.
    811 * -EFAULT when there's an error mapping the requested pages. The caller will
    812 *  abort the page fault handling.
    813 */
    814static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
    815					 struct ib_pd *pd, u32 key,
    816					 u64 io_virt, size_t bcnt,
    817					 u32 *bytes_committed,
    818					 u32 *bytes_mapped)
    819{
    820	int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
    821	struct pf_frame *head = NULL, *frame;
    822	struct mlx5_ib_mkey *mmkey;
    823	struct mlx5_ib_mr *mr;
    824	struct mlx5_klm *pklm;
    825	u32 *out = NULL;
    826	size_t offset;
    827
    828	io_virt += *bytes_committed;
    829	bcnt -= *bytes_committed;
    830
    831next_mr:
    832	xa_lock(&dev->odp_mkeys);
    833	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
    834	if (!mmkey) {
    835		xa_unlock(&dev->odp_mkeys);
    836		mlx5_ib_dbg(
    837			dev,
    838			"skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
    839			key);
    840		if (bytes_mapped)
    841			*bytes_mapped += bcnt;
    842		/*
    843		 * The user could specify a SGL with multiple lkeys and only
    844		 * some of them are ODP. Treat the non-ODP ones as fully
    845		 * faulted.
    846		 */
    847		ret = 0;
    848		goto end;
    849	}
    850	refcount_inc(&mmkey->usecount);
    851	xa_unlock(&dev->odp_mkeys);
    852
    853	if (!mkey_is_eq(mmkey, key)) {
    854		mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
    855		ret = -EFAULT;
    856		goto end;
    857	}
    858
    859	switch (mmkey->type) {
    860	case MLX5_MKEY_MR:
    861		mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
    862
    863		ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
    864		if (ret < 0)
    865			goto end;
    866
    867		mlx5_update_odp_stats(mr, faults, ret);
    868
    869		npages += ret;
    870		ret = 0;
    871		break;
    872
    873	case MLX5_MKEY_MW:
    874	case MLX5_MKEY_INDIRECT_DEVX:
    875		if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
    876			mlx5_ib_dbg(dev, "indirection level exceeded\n");
    877			ret = -EFAULT;
    878			goto end;
    879		}
    880
    881		outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
    882			sizeof(*pklm) * (mmkey->ndescs - 2);
    883
    884		if (outlen > cur_outlen) {
    885			kfree(out);
    886			out = kzalloc(outlen, GFP_KERNEL);
    887			if (!out) {
    888				ret = -ENOMEM;
    889				goto end;
    890			}
    891			cur_outlen = outlen;
    892		}
    893
    894		pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
    895						       bsf0_klm0_pas_mtt0_1);
    896
    897		ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
    898		if (ret)
    899			goto end;
    900
    901		offset = io_virt - MLX5_GET64(query_mkey_out, out,
    902					      memory_key_mkey_entry.start_addr);
    903
    904		for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
    905			if (offset >= be32_to_cpu(pklm->bcount)) {
    906				offset -= be32_to_cpu(pklm->bcount);
    907				continue;
    908			}
    909
    910			frame = kzalloc(sizeof(*frame), GFP_KERNEL);
    911			if (!frame) {
    912				ret = -ENOMEM;
    913				goto end;
    914			}
    915
    916			frame->key = be32_to_cpu(pklm->key);
    917			frame->io_virt = be64_to_cpu(pklm->va) + offset;
    918			frame->bcnt = min_t(size_t, bcnt,
    919					    be32_to_cpu(pklm->bcount) - offset);
    920			frame->depth = depth + 1;
    921			frame->next = head;
    922			head = frame;
    923
    924			bcnt -= frame->bcnt;
    925			offset = 0;
    926		}
    927		break;
    928
    929	default:
    930		mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
    931		ret = -EFAULT;
    932		goto end;
    933	}
    934
    935	if (head) {
    936		frame = head;
    937		head = frame->next;
    938
    939		key = frame->key;
    940		io_virt = frame->io_virt;
    941		bcnt = frame->bcnt;
    942		depth = frame->depth;
    943		kfree(frame);
    944
    945		mlx5r_deref_odp_mkey(mmkey);
    946		goto next_mr;
    947	}
    948
    949end:
    950	if (mmkey)
    951		mlx5r_deref_odp_mkey(mmkey);
    952	while (head) {
    953		frame = head;
    954		head = frame->next;
    955		kfree(frame);
    956	}
    957	kfree(out);
    958
    959	*bytes_committed = 0;
    960	return ret ? ret : npages;
    961}
    962
    963/*
    964 * Parse a series of data segments for page fault handling.
    965 *
    966 * @dev:  Pointer to mlx5 IB device
    967 * @pfault: contains page fault information.
    968 * @wqe: points at the first data segment in the WQE.
    969 * @wqe_end: points after the end of the WQE.
    970 * @bytes_mapped: receives the number of bytes that the function was able to
    971 *                map. This allows the caller to decide intelligently whether
    972 *                enough memory was mapped to resolve the page fault
    973 *                successfully (e.g. enough for the next MTU, or the entire
    974 *                WQE).
    975 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
    976 *                   the committed bytes).
    977 * @receive_queue: receive WQE end of sg list
    978 *
    979 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
    980 * negative error code.
    981 */
    982static int pagefault_data_segments(struct mlx5_ib_dev *dev,
    983				   struct mlx5_pagefault *pfault,
    984				   void *wqe,
    985				   void *wqe_end, u32 *bytes_mapped,
    986				   u32 *total_wqe_bytes, bool receive_queue)
    987{
    988	int ret = 0, npages = 0;
    989	u64 io_virt;
    990	u32 key;
    991	u32 byte_count;
    992	size_t bcnt;
    993	int inline_segment;
    994
    995	if (bytes_mapped)
    996		*bytes_mapped = 0;
    997	if (total_wqe_bytes)
    998		*total_wqe_bytes = 0;
    999
   1000	while (wqe < wqe_end) {
   1001		struct mlx5_wqe_data_seg *dseg = wqe;
   1002
   1003		io_virt = be64_to_cpu(dseg->addr);
   1004		key = be32_to_cpu(dseg->lkey);
   1005		byte_count = be32_to_cpu(dseg->byte_count);
   1006		inline_segment = !!(byte_count &  MLX5_INLINE_SEG);
   1007		bcnt	       = byte_count & ~MLX5_INLINE_SEG;
   1008
   1009		if (inline_segment) {
   1010			bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
   1011			wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
   1012				     16);
   1013		} else {
   1014			wqe += sizeof(*dseg);
   1015		}
   1016
   1017		/* receive WQE end of sg list. */
   1018		if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
   1019		    io_virt == 0)
   1020			break;
   1021
   1022		if (!inline_segment && total_wqe_bytes) {
   1023			*total_wqe_bytes += bcnt - min_t(size_t, bcnt,
   1024					pfault->bytes_committed);
   1025		}
   1026
   1027		/* A zero length data segment designates a length of 2GB. */
   1028		if (bcnt == 0)
   1029			bcnt = 1U << 31;
   1030
   1031		if (inline_segment || bcnt <= pfault->bytes_committed) {
   1032			pfault->bytes_committed -=
   1033				min_t(size_t, bcnt,
   1034				      pfault->bytes_committed);
   1035			continue;
   1036		}
   1037
   1038		ret = pagefault_single_data_segment(dev, NULL, key,
   1039						    io_virt, bcnt,
   1040						    &pfault->bytes_committed,
   1041						    bytes_mapped);
   1042		if (ret < 0)
   1043			break;
   1044		npages += ret;
   1045	}
   1046
   1047	return ret < 0 ? ret : npages;
   1048}
   1049
   1050/*
   1051 * Parse initiator WQE. Advances the wqe pointer to point at the
   1052 * scatter-gather list, and set wqe_end to the end of the WQE.
   1053 */
   1054static int mlx5_ib_mr_initiator_pfault_handler(
   1055	struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
   1056	struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
   1057{
   1058	struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
   1059	u16 wqe_index = pfault->wqe.wqe_index;
   1060	struct mlx5_base_av *av;
   1061	unsigned ds, opcode;
   1062	u32 qpn = qp->trans_qp.base.mqp.qpn;
   1063
   1064	ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
   1065	if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
   1066		mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
   1067			    ds, wqe_length);
   1068		return -EFAULT;
   1069	}
   1070
   1071	if (ds == 0) {
   1072		mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
   1073			    wqe_index, qpn);
   1074		return -EFAULT;
   1075	}
   1076
   1077	*wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
   1078	*wqe += sizeof(*ctrl);
   1079
   1080	opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
   1081		 MLX5_WQE_CTRL_OPCODE_MASK;
   1082
   1083	if (qp->type == IB_QPT_XRC_INI)
   1084		*wqe += sizeof(struct mlx5_wqe_xrc_seg);
   1085
   1086	if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
   1087		av = *wqe;
   1088		if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
   1089			*wqe += sizeof(struct mlx5_av);
   1090		else
   1091			*wqe += sizeof(struct mlx5_base_av);
   1092	}
   1093
   1094	switch (opcode) {
   1095	case MLX5_OPCODE_RDMA_WRITE:
   1096	case MLX5_OPCODE_RDMA_WRITE_IMM:
   1097	case MLX5_OPCODE_RDMA_READ:
   1098		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
   1099		break;
   1100	case MLX5_OPCODE_ATOMIC_CS:
   1101	case MLX5_OPCODE_ATOMIC_FA:
   1102		*wqe += sizeof(struct mlx5_wqe_raddr_seg);
   1103		*wqe += sizeof(struct mlx5_wqe_atomic_seg);
   1104		break;
   1105	}
   1106
   1107	return 0;
   1108}
   1109
   1110/*
   1111 * Parse responder WQE and set wqe_end to the end of the WQE.
   1112 */
   1113static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
   1114						   struct mlx5_ib_srq *srq,
   1115						   void **wqe, void **wqe_end,
   1116						   int wqe_length)
   1117{
   1118	int wqe_size = 1 << srq->msrq.wqe_shift;
   1119
   1120	if (wqe_size > wqe_length) {
   1121		mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
   1122		return -EFAULT;
   1123	}
   1124
   1125	*wqe_end = *wqe + wqe_size;
   1126	*wqe += sizeof(struct mlx5_wqe_srq_next_seg);
   1127
   1128	return 0;
   1129}
   1130
   1131static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
   1132						  struct mlx5_ib_qp *qp,
   1133						  void *wqe, void **wqe_end,
   1134						  int wqe_length)
   1135{
   1136	struct mlx5_ib_wq *wq = &qp->rq;
   1137	int wqe_size = 1 << wq->wqe_shift;
   1138
   1139	if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
   1140		mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
   1141		return -EFAULT;
   1142	}
   1143
   1144	if (wqe_size > wqe_length) {
   1145		mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
   1146		return -EFAULT;
   1147	}
   1148
   1149	*wqe_end = wqe + wqe_size;
   1150
   1151	return 0;
   1152}
   1153
   1154static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
   1155						       u32 wq_num, int pf_type)
   1156{
   1157	struct mlx5_core_rsc_common *common = NULL;
   1158	struct mlx5_core_srq *srq;
   1159
   1160	switch (pf_type) {
   1161	case MLX5_WQE_PF_TYPE_RMP:
   1162		srq = mlx5_cmd_get_srq(dev, wq_num);
   1163		if (srq)
   1164			common = &srq->common;
   1165		break;
   1166	case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
   1167	case MLX5_WQE_PF_TYPE_RESP:
   1168	case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
   1169		common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
   1170		break;
   1171	default:
   1172		break;
   1173	}
   1174
   1175	return common;
   1176}
   1177
   1178static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
   1179{
   1180	struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
   1181
   1182	return to_mibqp(mqp);
   1183}
   1184
   1185static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
   1186{
   1187	struct mlx5_core_srq *msrq =
   1188		container_of(res, struct mlx5_core_srq, common);
   1189
   1190	return to_mibsrq(msrq);
   1191}
   1192
   1193static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
   1194					  struct mlx5_pagefault *pfault)
   1195{
   1196	bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
   1197	u16 wqe_index = pfault->wqe.wqe_index;
   1198	void *wqe, *wqe_start = NULL, *wqe_end = NULL;
   1199	u32 bytes_mapped, total_wqe_bytes;
   1200	struct mlx5_core_rsc_common *res;
   1201	int resume_with_error = 1;
   1202	struct mlx5_ib_qp *qp;
   1203	size_t bytes_copied;
   1204	int ret = 0;
   1205
   1206	res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
   1207	if (!res) {
   1208		mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
   1209		return;
   1210	}
   1211
   1212	if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
   1213	    res->res != MLX5_RES_XSRQ) {
   1214		mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
   1215			    pfault->type);
   1216		goto resolve_page_fault;
   1217	}
   1218
   1219	wqe_start = (void *)__get_free_page(GFP_KERNEL);
   1220	if (!wqe_start) {
   1221		mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
   1222		goto resolve_page_fault;
   1223	}
   1224
   1225	wqe = wqe_start;
   1226	qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
   1227	if (qp && sq) {
   1228		ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
   1229					  &bytes_copied);
   1230		if (ret)
   1231			goto read_user;
   1232		ret = mlx5_ib_mr_initiator_pfault_handler(
   1233			dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
   1234	} else if (qp && !sq) {
   1235		ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
   1236					  &bytes_copied);
   1237		if (ret)
   1238			goto read_user;
   1239		ret = mlx5_ib_mr_responder_pfault_handler_rq(
   1240			dev, qp, wqe, &wqe_end, bytes_copied);
   1241	} else if (!qp) {
   1242		struct mlx5_ib_srq *srq = res_to_srq(res);
   1243
   1244		ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
   1245					   &bytes_copied);
   1246		if (ret)
   1247			goto read_user;
   1248		ret = mlx5_ib_mr_responder_pfault_handler_srq(
   1249			dev, srq, &wqe, &wqe_end, bytes_copied);
   1250	}
   1251
   1252	if (ret < 0 || wqe >= wqe_end)
   1253		goto resolve_page_fault;
   1254
   1255	ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
   1256				      &total_wqe_bytes, !sq);
   1257	if (ret == -EAGAIN)
   1258		goto out;
   1259
   1260	if (ret < 0 || total_wqe_bytes > bytes_mapped)
   1261		goto resolve_page_fault;
   1262
   1263out:
   1264	ret = 0;
   1265	resume_with_error = 0;
   1266
   1267read_user:
   1268	if (ret)
   1269		mlx5_ib_err(
   1270			dev,
   1271			"Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
   1272			ret, wqe_index, pfault->token);
   1273
   1274resolve_page_fault:
   1275	mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
   1276	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
   1277		    pfault->wqe.wq_num, resume_with_error,
   1278		    pfault->type);
   1279	mlx5_core_res_put(res);
   1280	free_page((unsigned long)wqe_start);
   1281}
   1282
   1283static int pages_in_range(u64 address, u32 length)
   1284{
   1285	return (ALIGN(address + length, PAGE_SIZE) -
   1286		(address & PAGE_MASK)) >> PAGE_SHIFT;
   1287}
   1288
   1289static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
   1290					   struct mlx5_pagefault *pfault)
   1291{
   1292	u64 address;
   1293	u32 length;
   1294	u32 prefetch_len = pfault->bytes_committed;
   1295	int prefetch_activated = 0;
   1296	u32 rkey = pfault->rdma.r_key;
   1297	int ret;
   1298
   1299	/* The RDMA responder handler handles the page fault in two parts.
   1300	 * First it brings the necessary pages for the current packet
   1301	 * (and uses the pfault context), and then (after resuming the QP)
   1302	 * prefetches more pages. The second operation cannot use the pfault
   1303	 * context and therefore uses the dummy_pfault context allocated on
   1304	 * the stack */
   1305	pfault->rdma.rdma_va += pfault->bytes_committed;
   1306	pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
   1307					 pfault->rdma.rdma_op_len);
   1308	pfault->bytes_committed = 0;
   1309
   1310	address = pfault->rdma.rdma_va;
   1311	length  = pfault->rdma.rdma_op_len;
   1312
   1313	/* For some operations, the hardware cannot tell the exact message
   1314	 * length, and in those cases it reports zero. Use prefetch
   1315	 * logic. */
   1316	if (length == 0) {
   1317		prefetch_activated = 1;
   1318		length = pfault->rdma.packet_size;
   1319		prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
   1320	}
   1321
   1322	ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
   1323					    &pfault->bytes_committed, NULL);
   1324	if (ret == -EAGAIN) {
   1325		/* We're racing with an invalidation, don't prefetch */
   1326		prefetch_activated = 0;
   1327	} else if (ret < 0 || pages_in_range(address, length) > ret) {
   1328		mlx5_ib_page_fault_resume(dev, pfault, 1);
   1329		if (ret != -ENOENT)
   1330			mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
   1331				    ret, pfault->token, pfault->type);
   1332		return;
   1333	}
   1334
   1335	mlx5_ib_page_fault_resume(dev, pfault, 0);
   1336	mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
   1337		    pfault->token, pfault->type,
   1338		    prefetch_activated);
   1339
   1340	/* At this point, there might be a new pagefault already arriving in
   1341	 * the eq, switch to the dummy pagefault for the rest of the
   1342	 * processing. We're still OK with the objects being alive as the
   1343	 * work-queue is being fenced. */
   1344
   1345	if (prefetch_activated) {
   1346		u32 bytes_committed = 0;
   1347
   1348		ret = pagefault_single_data_segment(dev, NULL, rkey, address,
   1349						    prefetch_len,
   1350						    &bytes_committed, NULL);
   1351		if (ret < 0 && ret != -EAGAIN) {
   1352			mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
   1353				    ret, pfault->token, address, prefetch_len);
   1354		}
   1355	}
   1356}
   1357
   1358static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
   1359{
   1360	u8 event_subtype = pfault->event_subtype;
   1361
   1362	switch (event_subtype) {
   1363	case MLX5_PFAULT_SUBTYPE_WQE:
   1364		mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
   1365		break;
   1366	case MLX5_PFAULT_SUBTYPE_RDMA:
   1367		mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
   1368		break;
   1369	default:
   1370		mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
   1371			    event_subtype);
   1372		mlx5_ib_page_fault_resume(dev, pfault, 1);
   1373	}
   1374}
   1375
   1376static void mlx5_ib_eqe_pf_action(struct work_struct *work)
   1377{
   1378	struct mlx5_pagefault *pfault = container_of(work,
   1379						     struct mlx5_pagefault,
   1380						     work);
   1381	struct mlx5_ib_pf_eq *eq = pfault->eq;
   1382
   1383	mlx5_ib_pfault(eq->dev, pfault);
   1384	mempool_free(pfault, eq->pool);
   1385}
   1386
   1387static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
   1388{
   1389	struct mlx5_eqe_page_fault *pf_eqe;
   1390	struct mlx5_pagefault *pfault;
   1391	struct mlx5_eqe *eqe;
   1392	int cc = 0;
   1393
   1394	while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
   1395		pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
   1396		if (!pfault) {
   1397			schedule_work(&eq->work);
   1398			break;
   1399		}
   1400
   1401		pf_eqe = &eqe->data.page_fault;
   1402		pfault->event_subtype = eqe->sub_type;
   1403		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
   1404
   1405		mlx5_ib_dbg(eq->dev,
   1406			    "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
   1407			    eqe->sub_type, pfault->bytes_committed);
   1408
   1409		switch (eqe->sub_type) {
   1410		case MLX5_PFAULT_SUBTYPE_RDMA:
   1411			/* RDMA based event */
   1412			pfault->type =
   1413				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
   1414			pfault->token =
   1415				be32_to_cpu(pf_eqe->rdma.pftype_token) &
   1416				MLX5_24BIT_MASK;
   1417			pfault->rdma.r_key =
   1418				be32_to_cpu(pf_eqe->rdma.r_key);
   1419			pfault->rdma.packet_size =
   1420				be16_to_cpu(pf_eqe->rdma.packet_length);
   1421			pfault->rdma.rdma_op_len =
   1422				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
   1423			pfault->rdma.rdma_va =
   1424				be64_to_cpu(pf_eqe->rdma.rdma_va);
   1425			mlx5_ib_dbg(eq->dev,
   1426				    "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
   1427				    pfault->type, pfault->token,
   1428				    pfault->rdma.r_key);
   1429			mlx5_ib_dbg(eq->dev,
   1430				    "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
   1431				    pfault->rdma.rdma_op_len,
   1432				    pfault->rdma.rdma_va);
   1433			break;
   1434
   1435		case MLX5_PFAULT_SUBTYPE_WQE:
   1436			/* WQE based event */
   1437			pfault->type =
   1438				(be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
   1439			pfault->token =
   1440				be32_to_cpu(pf_eqe->wqe.token);
   1441			pfault->wqe.wq_num =
   1442				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
   1443				MLX5_24BIT_MASK;
   1444			pfault->wqe.wqe_index =
   1445				be16_to_cpu(pf_eqe->wqe.wqe_index);
   1446			pfault->wqe.packet_size =
   1447				be16_to_cpu(pf_eqe->wqe.packet_length);
   1448			mlx5_ib_dbg(eq->dev,
   1449				    "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
   1450				    pfault->type, pfault->token,
   1451				    pfault->wqe.wq_num,
   1452				    pfault->wqe.wqe_index);
   1453			break;
   1454
   1455		default:
   1456			mlx5_ib_warn(eq->dev,
   1457				     "Unsupported page fault event sub-type: 0x%02hhx\n",
   1458				     eqe->sub_type);
   1459			/* Unsupported page faults should still be
   1460			 * resolved by the page fault handler
   1461			 */
   1462		}
   1463
   1464		pfault->eq = eq;
   1465		INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
   1466		queue_work(eq->wq, &pfault->work);
   1467
   1468		cc = mlx5_eq_update_cc(eq->core, ++cc);
   1469	}
   1470
   1471	mlx5_eq_update_ci(eq->core, cc, 1);
   1472}
   1473
   1474static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
   1475			     void *data)
   1476{
   1477	struct mlx5_ib_pf_eq *eq =
   1478		container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
   1479	unsigned long flags;
   1480
   1481	if (spin_trylock_irqsave(&eq->lock, flags)) {
   1482		mlx5_ib_eq_pf_process(eq);
   1483		spin_unlock_irqrestore(&eq->lock, flags);
   1484	} else {
   1485		schedule_work(&eq->work);
   1486	}
   1487
   1488	return IRQ_HANDLED;
   1489}
   1490
   1491/* mempool_refill() was proposed but unfortunately wasn't accepted
   1492 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
   1493 * Cheap workaround.
   1494 */
   1495static void mempool_refill(mempool_t *pool)
   1496{
   1497	while (pool->curr_nr < pool->min_nr)
   1498		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
   1499}
   1500
   1501static void mlx5_ib_eq_pf_action(struct work_struct *work)
   1502{
   1503	struct mlx5_ib_pf_eq *eq =
   1504		container_of(work, struct mlx5_ib_pf_eq, work);
   1505
   1506	mempool_refill(eq->pool);
   1507
   1508	spin_lock_irq(&eq->lock);
   1509	mlx5_ib_eq_pf_process(eq);
   1510	spin_unlock_irq(&eq->lock);
   1511}
   1512
   1513enum {
   1514	MLX5_IB_NUM_PF_EQE	= 0x1000,
   1515	MLX5_IB_NUM_PF_DRAIN	= 64,
   1516};
   1517
   1518int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
   1519{
   1520	struct mlx5_eq_param param = {};
   1521	int err = 0;
   1522
   1523	mutex_lock(&dev->odp_eq_mutex);
   1524	if (eq->core)
   1525		goto unlock;
   1526	INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
   1527	spin_lock_init(&eq->lock);
   1528	eq->dev = dev;
   1529
   1530	eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
   1531					       sizeof(struct mlx5_pagefault));
   1532	if (!eq->pool) {
   1533		err = -ENOMEM;
   1534		goto unlock;
   1535	}
   1536
   1537	eq->wq = alloc_workqueue("mlx5_ib_page_fault",
   1538				 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
   1539				 MLX5_NUM_CMD_EQE);
   1540	if (!eq->wq) {
   1541		err = -ENOMEM;
   1542		goto err_mempool;
   1543	}
   1544
   1545	eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
   1546	param = (struct mlx5_eq_param) {
   1547		.nent = MLX5_IB_NUM_PF_EQE,
   1548	};
   1549	param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
   1550	eq->core = mlx5_eq_create_generic(dev->mdev, &param);
   1551	if (IS_ERR(eq->core)) {
   1552		err = PTR_ERR(eq->core);
   1553		goto err_wq;
   1554	}
   1555	err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
   1556	if (err) {
   1557		mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
   1558		goto err_eq;
   1559	}
   1560
   1561	mutex_unlock(&dev->odp_eq_mutex);
   1562	return 0;
   1563err_eq:
   1564	mlx5_eq_destroy_generic(dev->mdev, eq->core);
   1565err_wq:
   1566	eq->core = NULL;
   1567	destroy_workqueue(eq->wq);
   1568err_mempool:
   1569	mempool_destroy(eq->pool);
   1570unlock:
   1571	mutex_unlock(&dev->odp_eq_mutex);
   1572	return err;
   1573}
   1574
   1575static int
   1576mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
   1577{
   1578	int err;
   1579
   1580	if (!eq->core)
   1581		return 0;
   1582	mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
   1583	err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
   1584	cancel_work_sync(&eq->work);
   1585	destroy_workqueue(eq->wq);
   1586	mempool_destroy(eq->pool);
   1587
   1588	return err;
   1589}
   1590
   1591void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
   1592{
   1593	if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
   1594		return;
   1595
   1596	switch (ent->order - 2) {
   1597	case MLX5_IMR_MTT_CACHE_ENTRY:
   1598		ent->page = PAGE_SHIFT;
   1599		ent->ndescs = MLX5_IMR_MTT_ENTRIES;
   1600		ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
   1601		ent->limit = 0;
   1602		break;
   1603
   1604	case MLX5_IMR_KSM_CACHE_ENTRY:
   1605		ent->page = MLX5_KSM_PAGE_SHIFT;
   1606		ent->ndescs = mlx5_imr_ksm_entries;
   1607		ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
   1608		ent->limit = 0;
   1609		break;
   1610	}
   1611}
   1612
   1613static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
   1614	.advise_mr = mlx5_ib_advise_mr,
   1615};
   1616
   1617int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
   1618{
   1619	int ret = 0;
   1620
   1621	internal_fill_odp_caps(dev);
   1622
   1623	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
   1624		return ret;
   1625
   1626	ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
   1627
   1628	if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
   1629		ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
   1630		if (ret) {
   1631			mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
   1632			return ret;
   1633		}
   1634	}
   1635
   1636	mutex_init(&dev->odp_eq_mutex);
   1637	return ret;
   1638}
   1639
   1640void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
   1641{
   1642	if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
   1643		return;
   1644
   1645	mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
   1646}
   1647
   1648int mlx5_ib_odp_init(void)
   1649{
   1650	mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
   1651				       MLX5_IMR_MTT_BITS);
   1652
   1653	return 0;
   1654}
   1655
   1656struct prefetch_mr_work {
   1657	struct work_struct work;
   1658	u32 pf_flags;
   1659	u32 num_sge;
   1660	struct {
   1661		u64 io_virt;
   1662		struct mlx5_ib_mr *mr;
   1663		size_t length;
   1664	} frags[];
   1665};
   1666
   1667static void destroy_prefetch_work(struct prefetch_mr_work *work)
   1668{
   1669	u32 i;
   1670
   1671	for (i = 0; i < work->num_sge; ++i)
   1672		mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
   1673
   1674	kvfree(work);
   1675}
   1676
   1677static struct mlx5_ib_mr *
   1678get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
   1679		    u32 lkey)
   1680{
   1681	struct mlx5_ib_dev *dev = to_mdev(pd->device);
   1682	struct mlx5_ib_mr *mr = NULL;
   1683	struct mlx5_ib_mkey *mmkey;
   1684
   1685	xa_lock(&dev->odp_mkeys);
   1686	mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
   1687	if (!mmkey || mmkey->key != lkey) {
   1688		mr = ERR_PTR(-ENOENT);
   1689		goto end;
   1690	}
   1691	if (mmkey->type != MLX5_MKEY_MR) {
   1692		mr = ERR_PTR(-EINVAL);
   1693		goto end;
   1694	}
   1695
   1696	mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
   1697
   1698	if (mr->ibmr.pd != pd) {
   1699		mr = ERR_PTR(-EPERM);
   1700		goto end;
   1701	}
   1702
   1703	/* prefetch with write-access must be supported by the MR */
   1704	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
   1705	    !mr->umem->writable) {
   1706		mr = ERR_PTR(-EPERM);
   1707		goto end;
   1708	}
   1709
   1710	refcount_inc(&mmkey->usecount);
   1711end:
   1712	xa_unlock(&dev->odp_mkeys);
   1713	return mr;
   1714}
   1715
   1716static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
   1717{
   1718	struct prefetch_mr_work *work =
   1719		container_of(w, struct prefetch_mr_work, work);
   1720	u32 bytes_mapped = 0;
   1721	int ret;
   1722	u32 i;
   1723
   1724	/* We rely on IB/core that work is executed if we have num_sge != 0 only. */
   1725	WARN_ON(!work->num_sge);
   1726	for (i = 0; i < work->num_sge; ++i) {
   1727		ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
   1728				   work->frags[i].length, &bytes_mapped,
   1729				   work->pf_flags);
   1730		if (ret <= 0)
   1731			continue;
   1732		mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
   1733	}
   1734
   1735	destroy_prefetch_work(work);
   1736}
   1737
   1738static int init_prefetch_work(struct ib_pd *pd,
   1739			       enum ib_uverbs_advise_mr_advice advice,
   1740			       u32 pf_flags, struct prefetch_mr_work *work,
   1741			       struct ib_sge *sg_list, u32 num_sge)
   1742{
   1743	u32 i;
   1744
   1745	INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
   1746	work->pf_flags = pf_flags;
   1747
   1748	for (i = 0; i < num_sge; ++i) {
   1749		struct mlx5_ib_mr *mr;
   1750
   1751		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
   1752		if (IS_ERR(mr)) {
   1753			work->num_sge = i;
   1754			return PTR_ERR(mr);
   1755		}
   1756		work->frags[i].io_virt = sg_list[i].addr;
   1757		work->frags[i].length = sg_list[i].length;
   1758		work->frags[i].mr = mr;
   1759	}
   1760	work->num_sge = num_sge;
   1761	return 0;
   1762}
   1763
   1764static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
   1765				    enum ib_uverbs_advise_mr_advice advice,
   1766				    u32 pf_flags, struct ib_sge *sg_list,
   1767				    u32 num_sge)
   1768{
   1769	u32 bytes_mapped = 0;
   1770	int ret = 0;
   1771	u32 i;
   1772
   1773	for (i = 0; i < num_sge; ++i) {
   1774		struct mlx5_ib_mr *mr;
   1775
   1776		mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
   1777		if (IS_ERR(mr))
   1778			return PTR_ERR(mr);
   1779		ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
   1780				   &bytes_mapped, pf_flags);
   1781		if (ret < 0) {
   1782			mlx5r_deref_odp_mkey(&mr->mmkey);
   1783			return ret;
   1784		}
   1785		mlx5_update_odp_stats(mr, prefetch, ret);
   1786		mlx5r_deref_odp_mkey(&mr->mmkey);
   1787	}
   1788
   1789	return 0;
   1790}
   1791
   1792int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
   1793			       enum ib_uverbs_advise_mr_advice advice,
   1794			       u32 flags, struct ib_sge *sg_list, u32 num_sge)
   1795{
   1796	u32 pf_flags = 0;
   1797	struct prefetch_mr_work *work;
   1798	int rc;
   1799
   1800	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
   1801		pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
   1802
   1803	if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
   1804		pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
   1805
   1806	if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
   1807		return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
   1808						num_sge);
   1809
   1810	work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
   1811	if (!work)
   1812		return -ENOMEM;
   1813
   1814	rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
   1815	if (rc) {
   1816		destroy_prefetch_work(work);
   1817		return rc;
   1818	}
   1819	queue_work(system_unbound_wq, &work->work);
   1820	return 0;
   1821}