cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rxe_mmap.c (3524B)


      1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
      2/*
      3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
      4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
      5 */
      6
      7#include <linux/vmalloc.h>
      8#include <linux/mm.h>
      9#include <linux/errno.h>
     10#include <rdma/uverbs_ioctl.h>
     11
     12#include "rxe.h"
     13#include "rxe_loc.h"
     14#include "rxe_queue.h"
     15
     16void rxe_mmap_release(struct kref *ref)
     17{
     18	struct rxe_mmap_info *ip = container_of(ref,
     19					struct rxe_mmap_info, ref);
     20	struct rxe_dev *rxe = to_rdev(ip->context->device);
     21
     22	spin_lock_bh(&rxe->pending_lock);
     23
     24	if (!list_empty(&ip->pending_mmaps))
     25		list_del(&ip->pending_mmaps);
     26
     27	spin_unlock_bh(&rxe->pending_lock);
     28
     29	vfree(ip->obj);		/* buf */
     30	kfree(ip);
     31}
     32
     33/*
     34 * open and close keep track of how many times the memory region is mapped,
     35 * to avoid releasing it.
     36 */
     37static void rxe_vma_open(struct vm_area_struct *vma)
     38{
     39	struct rxe_mmap_info *ip = vma->vm_private_data;
     40
     41	kref_get(&ip->ref);
     42}
     43
     44static void rxe_vma_close(struct vm_area_struct *vma)
     45{
     46	struct rxe_mmap_info *ip = vma->vm_private_data;
     47
     48	kref_put(&ip->ref, rxe_mmap_release);
     49}
     50
     51static const struct vm_operations_struct rxe_vm_ops = {
     52	.open = rxe_vma_open,
     53	.close = rxe_vma_close,
     54};
     55
     56/**
     57 * rxe_mmap - create a new mmap region
     58 * @context: the IB user context of the process making the mmap() call
     59 * @vma: the VMA to be initialized
     60 * Return zero if the mmap is OK. Otherwise, return an errno.
     61 */
     62int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
     63{
     64	struct rxe_dev *rxe = to_rdev(context->device);
     65	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
     66	unsigned long size = vma->vm_end - vma->vm_start;
     67	struct rxe_mmap_info *ip, *pp;
     68	int ret;
     69
     70	/*
     71	 * Search the device's list of objects waiting for a mmap call.
     72	 * Normally, this list is very short since a call to create a
     73	 * CQ, QP, or SRQ is soon followed by a call to mmap().
     74	 */
     75	spin_lock_bh(&rxe->pending_lock);
     76	list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
     77		if (context != ip->context || (__u64)offset != ip->info.offset)
     78			continue;
     79
     80		/* Don't allow a mmap larger than the object. */
     81		if (size > ip->info.size) {
     82			pr_err("mmap region is larger than the object!\n");
     83			spin_unlock_bh(&rxe->pending_lock);
     84			ret = -EINVAL;
     85			goto done;
     86		}
     87
     88		goto found_it;
     89	}
     90	pr_warn("unable to find pending mmap info\n");
     91	spin_unlock_bh(&rxe->pending_lock);
     92	ret = -EINVAL;
     93	goto done;
     94
     95found_it:
     96	list_del_init(&ip->pending_mmaps);
     97	spin_unlock_bh(&rxe->pending_lock);
     98
     99	ret = remap_vmalloc_range(vma, ip->obj, 0);
    100	if (ret) {
    101		pr_err("err %d from remap_vmalloc_range\n", ret);
    102		goto done;
    103	}
    104
    105	vma->vm_ops = &rxe_vm_ops;
    106	vma->vm_private_data = ip;
    107	rxe_vma_open(vma);
    108done:
    109	return ret;
    110}
    111
    112/*
    113 * Allocate information for rxe_mmap
    114 */
    115struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
    116					   struct ib_udata *udata, void *obj)
    117{
    118	struct rxe_mmap_info *ip;
    119
    120	if (!udata)
    121		return ERR_PTR(-EINVAL);
    122
    123	ip = kmalloc(sizeof(*ip), GFP_KERNEL);
    124	if (!ip)
    125		return ERR_PTR(-ENOMEM);
    126
    127	size = PAGE_ALIGN(size);
    128
    129	spin_lock_bh(&rxe->mmap_offset_lock);
    130
    131	if (rxe->mmap_offset == 0)
    132		rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
    133
    134	ip->info.offset = rxe->mmap_offset;
    135	rxe->mmap_offset += ALIGN(size, SHMLBA);
    136
    137	spin_unlock_bh(&rxe->mmap_offset_lock);
    138
    139	INIT_LIST_HEAD(&ip->pending_mmaps);
    140	ip->info.size = size;
    141	ip->context =
    142		container_of(udata, struct uverbs_attr_bundle, driver_udata)
    143			->context;
    144	ip->obj = obj;
    145	kref_init(&ip->ref);
    146
    147	return ip;
    148}