cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

umem_odp.c (15247B)


      1/*
      2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
      3 *
      4 * This software is available to you under a choice of one of two
      5 * licenses.  You may choose to be licensed under the terms of the GNU
      6 * General Public License (GPL) Version 2, available from the file
      7 * COPYING in the main directory of this source tree, or the
      8 * OpenIB.org BSD license below:
      9 *
     10 *     Redistribution and use in source and binary forms, with or
     11 *     without modification, are permitted provided that the following
     12 *     conditions are met:
     13 *
     14 *      - Redistributions of source code must retain the above
     15 *        copyright notice, this list of conditions and the following
     16 *        disclaimer.
     17 *
     18 *      - Redistributions in binary form must reproduce the above
     19 *        copyright notice, this list of conditions and the following
     20 *        disclaimer in the documentation and/or other materials
     21 *        provided with the distribution.
     22 *
     23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     30 * SOFTWARE.
     31 */
     32
     33#include <linux/types.h>
     34#include <linux/sched.h>
     35#include <linux/sched/mm.h>
     36#include <linux/sched/task.h>
     37#include <linux/pid.h>
     38#include <linux/slab.h>
     39#include <linux/export.h>
     40#include <linux/vmalloc.h>
     41#include <linux/hugetlb.h>
     42#include <linux/interval_tree.h>
     43#include <linux/hmm.h>
     44#include <linux/pagemap.h>
     45
     46#include <rdma/ib_verbs.h>
     47#include <rdma/ib_umem.h>
     48#include <rdma/ib_umem_odp.h>
     49
     50#include "uverbs.h"
     51
     52static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
     53				   const struct mmu_interval_notifier_ops *ops)
     54{
     55	int ret;
     56
     57	umem_odp->umem.is_odp = 1;
     58	mutex_init(&umem_odp->umem_mutex);
     59
     60	if (!umem_odp->is_implicit_odp) {
     61		size_t page_size = 1UL << umem_odp->page_shift;
     62		unsigned long start;
     63		unsigned long end;
     64		size_t ndmas, npfns;
     65
     66		start = ALIGN_DOWN(umem_odp->umem.address, page_size);
     67		if (check_add_overflow(umem_odp->umem.address,
     68				       (unsigned long)umem_odp->umem.length,
     69				       &end))
     70			return -EOVERFLOW;
     71		end = ALIGN(end, page_size);
     72		if (unlikely(end < page_size))
     73			return -EOVERFLOW;
     74
     75		ndmas = (end - start) >> umem_odp->page_shift;
     76		if (!ndmas)
     77			return -EINVAL;
     78
     79		npfns = (end - start) >> PAGE_SHIFT;
     80		umem_odp->pfn_list = kvcalloc(
     81			npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL);
     82		if (!umem_odp->pfn_list)
     83			return -ENOMEM;
     84
     85		umem_odp->dma_list = kvcalloc(
     86			ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL);
     87		if (!umem_odp->dma_list) {
     88			ret = -ENOMEM;
     89			goto out_pfn_list;
     90		}
     91
     92		ret = mmu_interval_notifier_insert(&umem_odp->notifier,
     93						   umem_odp->umem.owning_mm,
     94						   start, end - start, ops);
     95		if (ret)
     96			goto out_dma_list;
     97	}
     98
     99	return 0;
    100
    101out_dma_list:
    102	kvfree(umem_odp->dma_list);
    103out_pfn_list:
    104	kvfree(umem_odp->pfn_list);
    105	return ret;
    106}
    107
    108/**
    109 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
    110 *
    111 * Implicit ODP umems do not have a VA range and do not have any page lists.
    112 * They exist only to hold the per_mm reference to help the driver create
    113 * children umems.
    114 *
    115 * @device: IB device to create UMEM
    116 * @access: ib_reg_mr access flags
    117 */
    118struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
    119					       int access)
    120{
    121	struct ib_umem *umem;
    122	struct ib_umem_odp *umem_odp;
    123	int ret;
    124
    125	if (access & IB_ACCESS_HUGETLB)
    126		return ERR_PTR(-EINVAL);
    127
    128	umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
    129	if (!umem_odp)
    130		return ERR_PTR(-ENOMEM);
    131	umem = &umem_odp->umem;
    132	umem->ibdev = device;
    133	umem->writable = ib_access_writable(access);
    134	umem->owning_mm = current->mm;
    135	umem_odp->is_implicit_odp = 1;
    136	umem_odp->page_shift = PAGE_SHIFT;
    137
    138	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
    139	ret = ib_init_umem_odp(umem_odp, NULL);
    140	if (ret) {
    141		put_pid(umem_odp->tgid);
    142		kfree(umem_odp);
    143		return ERR_PTR(ret);
    144	}
    145	return umem_odp;
    146}
    147EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
    148
    149/**
    150 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
    151 *                           parent ODP umem
    152 *
    153 * @root: The parent umem enclosing the child. This must be allocated using
    154 *        ib_alloc_implicit_odp_umem()
    155 * @addr: The starting userspace VA
    156 * @size: The length of the userspace VA
    157 * @ops: MMU interval ops, currently only @invalidate
    158 */
    159struct ib_umem_odp *
    160ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
    161			size_t size,
    162			const struct mmu_interval_notifier_ops *ops)
    163{
    164	/*
    165	 * Caller must ensure that root cannot be freed during the call to
    166	 * ib_alloc_odp_umem.
    167	 */
    168	struct ib_umem_odp *odp_data;
    169	struct ib_umem *umem;
    170	int ret;
    171
    172	if (WARN_ON(!root->is_implicit_odp))
    173		return ERR_PTR(-EINVAL);
    174
    175	odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
    176	if (!odp_data)
    177		return ERR_PTR(-ENOMEM);
    178	umem = &odp_data->umem;
    179	umem->ibdev = root->umem.ibdev;
    180	umem->length     = size;
    181	umem->address    = addr;
    182	umem->writable   = root->umem.writable;
    183	umem->owning_mm  = root->umem.owning_mm;
    184	odp_data->page_shift = PAGE_SHIFT;
    185	odp_data->notifier.ops = ops;
    186
    187	/*
    188	 * A mmget must be held when registering a notifier, the owming_mm only
    189	 * has a mm_grab at this point.
    190	 */
    191	if (!mmget_not_zero(umem->owning_mm)) {
    192		ret = -EFAULT;
    193		goto out_free;
    194	}
    195
    196	odp_data->tgid = get_pid(root->tgid);
    197	ret = ib_init_umem_odp(odp_data, ops);
    198	if (ret)
    199		goto out_tgid;
    200	mmput(umem->owning_mm);
    201	return odp_data;
    202
    203out_tgid:
    204	put_pid(odp_data->tgid);
    205	mmput(umem->owning_mm);
    206out_free:
    207	kfree(odp_data);
    208	return ERR_PTR(ret);
    209}
    210EXPORT_SYMBOL(ib_umem_odp_alloc_child);
    211
    212/**
    213 * ib_umem_odp_get - Create a umem_odp for a userspace va
    214 *
    215 * @device: IB device struct to get UMEM
    216 * @addr: userspace virtual address to start at
    217 * @size: length of region to pin
    218 * @access: IB_ACCESS_xxx flags for memory being pinned
    219 * @ops: MMU interval ops, currently only @invalidate
    220 *
    221 * The driver should use when the access flags indicate ODP memory. It avoids
    222 * pinning, instead, stores the mm for future page fault handling in
    223 * conjunction with MMU notifiers.
    224 */
    225struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
    226				    unsigned long addr, size_t size, int access,
    227				    const struct mmu_interval_notifier_ops *ops)
    228{
    229	struct ib_umem_odp *umem_odp;
    230	int ret;
    231
    232	if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
    233		return ERR_PTR(-EINVAL);
    234
    235	umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
    236	if (!umem_odp)
    237		return ERR_PTR(-ENOMEM);
    238
    239	umem_odp->umem.ibdev = device;
    240	umem_odp->umem.length = size;
    241	umem_odp->umem.address = addr;
    242	umem_odp->umem.writable = ib_access_writable(access);
    243	umem_odp->umem.owning_mm = current->mm;
    244	umem_odp->notifier.ops = ops;
    245
    246	umem_odp->page_shift = PAGE_SHIFT;
    247#ifdef CONFIG_HUGETLB_PAGE
    248	if (access & IB_ACCESS_HUGETLB)
    249		umem_odp->page_shift = HPAGE_SHIFT;
    250#endif
    251
    252	umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
    253	ret = ib_init_umem_odp(umem_odp, ops);
    254	if (ret)
    255		goto err_put_pid;
    256	return umem_odp;
    257
    258err_put_pid:
    259	put_pid(umem_odp->tgid);
    260	kfree(umem_odp);
    261	return ERR_PTR(ret);
    262}
    263EXPORT_SYMBOL(ib_umem_odp_get);
    264
    265void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
    266{
    267	/*
    268	 * Ensure that no more pages are mapped in the umem.
    269	 *
    270	 * It is the driver's responsibility to ensure, before calling us,
    271	 * that the hardware will not attempt to access the MR any more.
    272	 */
    273	if (!umem_odp->is_implicit_odp) {
    274		mutex_lock(&umem_odp->umem_mutex);
    275		ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
    276					    ib_umem_end(umem_odp));
    277		mutex_unlock(&umem_odp->umem_mutex);
    278		mmu_interval_notifier_remove(&umem_odp->notifier);
    279		kvfree(umem_odp->dma_list);
    280		kvfree(umem_odp->pfn_list);
    281	}
    282	put_pid(umem_odp->tgid);
    283	kfree(umem_odp);
    284}
    285EXPORT_SYMBOL(ib_umem_odp_release);
    286
    287/*
    288 * Map for DMA and insert a single page into the on-demand paging page tables.
    289 *
    290 * @umem: the umem to insert the page to.
    291 * @dma_index: index in the umem to add the dma to.
    292 * @page: the page struct to map and add.
    293 * @access_mask: access permissions needed for this page.
    294 *
    295 * The function returns -EFAULT if the DMA mapping operation fails.
    296 *
    297 */
    298static int ib_umem_odp_map_dma_single_page(
    299		struct ib_umem_odp *umem_odp,
    300		unsigned int dma_index,
    301		struct page *page,
    302		u64 access_mask)
    303{
    304	struct ib_device *dev = umem_odp->umem.ibdev;
    305	dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index];
    306
    307	if (*dma_addr) {
    308		/*
    309		 * If the page is already dma mapped it means it went through
    310		 * a non-invalidating trasition, like read-only to writable.
    311		 * Resync the flags.
    312		 */
    313		*dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask;
    314		return 0;
    315	}
    316
    317	*dma_addr = ib_dma_map_page(dev, page, 0, 1 << umem_odp->page_shift,
    318				    DMA_BIDIRECTIONAL);
    319	if (ib_dma_mapping_error(dev, *dma_addr)) {
    320		*dma_addr = 0;
    321		return -EFAULT;
    322	}
    323	umem_odp->npages++;
    324	*dma_addr |= access_mask;
    325	return 0;
    326}
    327
    328/**
    329 * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it.
    330 *
    331 * Maps the range passed in the argument to DMA addresses.
    332 * The DMA addresses of the mapped pages is updated in umem_odp->dma_list.
    333 * Upon success the ODP MR will be locked to let caller complete its device
    334 * page table update.
    335 *
    336 * Returns the number of pages mapped in success, negative error code
    337 * for failure.
    338 * @umem_odp: the umem to map and pin
    339 * @user_virt: the address from which we need to map.
    340 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
    341 *        bigger due to alignment, and may also be smaller in case of an error
    342 *        pinning or mapping a page. The actual pages mapped is returned in
    343 *        the return value.
    344 * @access_mask: bit mask of the requested access permissions for the given
    345 *               range.
    346 * @fault: is faulting required for the given range
    347 */
    348int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
    349				 u64 bcnt, u64 access_mask, bool fault)
    350			__acquires(&umem_odp->umem_mutex)
    351{
    352	struct task_struct *owning_process  = NULL;
    353	struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
    354	int pfn_index, dma_index, ret = 0, start_idx;
    355	unsigned int page_shift, hmm_order, pfn_start_idx;
    356	unsigned long num_pfns, current_seq;
    357	struct hmm_range range = {};
    358	unsigned long timeout;
    359
    360	if (access_mask == 0)
    361		return -EINVAL;
    362
    363	if (user_virt < ib_umem_start(umem_odp) ||
    364	    user_virt + bcnt > ib_umem_end(umem_odp))
    365		return -EFAULT;
    366
    367	page_shift = umem_odp->page_shift;
    368
    369	/*
    370	 * owning_process is allowed to be NULL, this means somehow the mm is
    371	 * existing beyond the lifetime of the originating process.. Presumably
    372	 * mmget_not_zero will fail in this case.
    373	 */
    374	owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
    375	if (!owning_process || !mmget_not_zero(owning_mm)) {
    376		ret = -EINVAL;
    377		goto out_put_task;
    378	}
    379
    380	range.notifier = &umem_odp->notifier;
    381	range.start = ALIGN_DOWN(user_virt, 1UL << page_shift);
    382	range.end = ALIGN(user_virt + bcnt, 1UL << page_shift);
    383	pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
    384	num_pfns = (range.end - range.start) >> PAGE_SHIFT;
    385	if (fault) {
    386		range.default_flags = HMM_PFN_REQ_FAULT;
    387
    388		if (access_mask & ODP_WRITE_ALLOWED_BIT)
    389			range.default_flags |= HMM_PFN_REQ_WRITE;
    390	}
    391
    392	range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]);
    393	timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
    394
    395retry:
    396	current_seq = range.notifier_seq =
    397		mmu_interval_read_begin(&umem_odp->notifier);
    398
    399	mmap_read_lock(owning_mm);
    400	ret = hmm_range_fault(&range);
    401	mmap_read_unlock(owning_mm);
    402	if (unlikely(ret)) {
    403		if (ret == -EBUSY && !time_after(jiffies, timeout))
    404			goto retry;
    405		goto out_put_mm;
    406	}
    407
    408	start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift;
    409	dma_index = start_idx;
    410
    411	mutex_lock(&umem_odp->umem_mutex);
    412	if (mmu_interval_read_retry(&umem_odp->notifier, current_seq)) {
    413		mutex_unlock(&umem_odp->umem_mutex);
    414		goto retry;
    415	}
    416
    417	for (pfn_index = 0; pfn_index < num_pfns;
    418		pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) {
    419
    420		if (fault) {
    421			/*
    422			 * Since we asked for hmm_range_fault() to populate
    423			 * pages it shouldn't return an error entry on success.
    424			 */
    425			WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR);
    426			WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID));
    427		} else {
    428			if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) {
    429				WARN_ON(umem_odp->dma_list[dma_index]);
    430				continue;
    431			}
    432			access_mask = ODP_READ_ALLOWED_BIT;
    433			if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE)
    434				access_mask |= ODP_WRITE_ALLOWED_BIT;
    435		}
    436
    437		hmm_order = hmm_pfn_to_map_order(range.hmm_pfns[pfn_index]);
    438		/* If a hugepage was detected and ODP wasn't set for, the umem
    439		 * page_shift will be used, the opposite case is an error.
    440		 */
    441		if (hmm_order + PAGE_SHIFT < page_shift) {
    442			ret = -EINVAL;
    443			ibdev_dbg(umem_odp->umem.ibdev,
    444				  "%s: un-expected hmm_order %u, page_shift %u\n",
    445				  __func__, hmm_order, page_shift);
    446			break;
    447		}
    448
    449		ret = ib_umem_odp_map_dma_single_page(
    450				umem_odp, dma_index, hmm_pfn_to_page(range.hmm_pfns[pfn_index]),
    451				access_mask);
    452		if (ret < 0) {
    453			ibdev_dbg(umem_odp->umem.ibdev,
    454				  "ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
    455			break;
    456		}
    457	}
    458	/* upon success lock should stay on hold for the callee */
    459	if (!ret)
    460		ret = dma_index - start_idx;
    461	else
    462		mutex_unlock(&umem_odp->umem_mutex);
    463
    464out_put_mm:
    465	mmput(owning_mm);
    466out_put_task:
    467	if (owning_process)
    468		put_task_struct(owning_process);
    469	return ret;
    470}
    471EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock);
    472
    473void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
    474				 u64 bound)
    475{
    476	dma_addr_t dma_addr;
    477	dma_addr_t dma;
    478	int idx;
    479	u64 addr;
    480	struct ib_device *dev = umem_odp->umem.ibdev;
    481
    482	lockdep_assert_held(&umem_odp->umem_mutex);
    483
    484	virt = max_t(u64, virt, ib_umem_start(umem_odp));
    485	bound = min_t(u64, bound, ib_umem_end(umem_odp));
    486	for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
    487		idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
    488		dma = umem_odp->dma_list[idx];
    489
    490		/* The access flags guaranteed a valid DMA address in case was NULL */
    491		if (dma) {
    492			unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT;
    493			struct page *page = hmm_pfn_to_page(umem_odp->pfn_list[pfn_idx]);
    494
    495			dma_addr = dma & ODP_DMA_ADDR_MASK;
    496			ib_dma_unmap_page(dev, dma_addr,
    497					  BIT(umem_odp->page_shift),
    498					  DMA_BIDIRECTIONAL);
    499			if (dma & ODP_WRITE_ALLOWED_BIT) {
    500				struct page *head_page = compound_head(page);
    501				/*
    502				 * set_page_dirty prefers being called with
    503				 * the page lock. However, MMU notifiers are
    504				 * called sometimes with and sometimes without
    505				 * the lock. We rely on the umem_mutex instead
    506				 * to prevent other mmu notifiers from
    507				 * continuing and allowing the page mapping to
    508				 * be removed.
    509				 */
    510				set_page_dirty(head_page);
    511			}
    512			umem_odp->dma_list[idx] = 0;
    513			umem_odp->npages--;
    514		}
    515	}
    516}
    517EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);