cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dfl-afu-dma-region.c (10543B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
      4 *
      5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
      6 *
      7 * Authors:
      8 *   Wu Hao <hao.wu@intel.com>
      9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
     10 */
     11
     12#include <linux/dma-mapping.h>
     13#include <linux/sched/signal.h>
     14#include <linux/uaccess.h>
     15#include <linux/mm.h>
     16
     17#include "dfl-afu.h"
     18
     19void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
     20{
     21	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
     22
     23	afu->dma_regions = RB_ROOT;
     24}
     25
     26/**
     27 * afu_dma_pin_pages - pin pages of given dma memory region
     28 * @pdata: feature device platform data
     29 * @region: dma memory region to be pinned
     30 *
     31 * Pin all the pages of given dfl_afu_dma_region.
     32 * Return 0 for success or negative error code.
     33 */
     34static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
     35			     struct dfl_afu_dma_region *region)
     36{
     37	int npages = region->length >> PAGE_SHIFT;
     38	struct device *dev = &pdata->dev->dev;
     39	int ret, pinned;
     40
     41	ret = account_locked_vm(current->mm, npages, true);
     42	if (ret)
     43		return ret;
     44
     45	region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
     46	if (!region->pages) {
     47		ret = -ENOMEM;
     48		goto unlock_vm;
     49	}
     50
     51	pinned = pin_user_pages_fast(region->user_addr, npages, FOLL_WRITE,
     52				     region->pages);
     53	if (pinned < 0) {
     54		ret = pinned;
     55		goto free_pages;
     56	} else if (pinned != npages) {
     57		ret = -EFAULT;
     58		goto unpin_pages;
     59	}
     60
     61	dev_dbg(dev, "%d pages pinned\n", pinned);
     62
     63	return 0;
     64
     65unpin_pages:
     66	unpin_user_pages(region->pages, pinned);
     67free_pages:
     68	kfree(region->pages);
     69unlock_vm:
     70	account_locked_vm(current->mm, npages, false);
     71	return ret;
     72}
     73
     74/**
     75 * afu_dma_unpin_pages - unpin pages of given dma memory region
     76 * @pdata: feature device platform data
     77 * @region: dma memory region to be unpinned
     78 *
     79 * Unpin all the pages of given dfl_afu_dma_region.
     80 * Return 0 for success or negative error code.
     81 */
     82static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
     83				struct dfl_afu_dma_region *region)
     84{
     85	long npages = region->length >> PAGE_SHIFT;
     86	struct device *dev = &pdata->dev->dev;
     87
     88	unpin_user_pages(region->pages, npages);
     89	kfree(region->pages);
     90	account_locked_vm(current->mm, npages, false);
     91
     92	dev_dbg(dev, "%ld pages unpinned\n", npages);
     93}
     94
     95/**
     96 * afu_dma_check_continuous_pages - check if pages are continuous
     97 * @region: dma memory region
     98 *
     99 * Return true if pages of given dma memory region have continuous physical
    100 * address, otherwise return false.
    101 */
    102static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
    103{
    104	int npages = region->length >> PAGE_SHIFT;
    105	int i;
    106
    107	for (i = 0; i < npages - 1; i++)
    108		if (page_to_pfn(region->pages[i]) + 1 !=
    109				page_to_pfn(region->pages[i + 1]))
    110			return false;
    111
    112	return true;
    113}
    114
    115/**
    116 * dma_region_check_iova - check if memory area is fully contained in the region
    117 * @region: dma memory region
    118 * @iova: address of the dma memory area
    119 * @size: size of the dma memory area
    120 *
    121 * Compare the dma memory area defined by @iova and @size with given dma region.
    122 * Return true if memory area is fully contained in the region, otherwise false.
    123 */
    124static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
    125				  u64 iova, u64 size)
    126{
    127	if (!size && region->iova != iova)
    128		return false;
    129
    130	return (region->iova <= iova) &&
    131		(region->length + region->iova >= iova + size);
    132}
    133
    134/**
    135 * afu_dma_region_add - add given dma region to rbtree
    136 * @pdata: feature device platform data
    137 * @region: dma region to be added
    138 *
    139 * Return 0 for success, -EEXIST if dma region has already been added.
    140 *
    141 * Needs to be called with pdata->lock heold.
    142 */
    143static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
    144			      struct dfl_afu_dma_region *region)
    145{
    146	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
    147	struct rb_node **new, *parent = NULL;
    148
    149	dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
    150		(unsigned long long)region->iova);
    151
    152	new = &afu->dma_regions.rb_node;
    153
    154	while (*new) {
    155		struct dfl_afu_dma_region *this;
    156
    157		this = container_of(*new, struct dfl_afu_dma_region, node);
    158
    159		parent = *new;
    160
    161		if (dma_region_check_iova(this, region->iova, region->length))
    162			return -EEXIST;
    163
    164		if (region->iova < this->iova)
    165			new = &((*new)->rb_left);
    166		else if (region->iova > this->iova)
    167			new = &((*new)->rb_right);
    168		else
    169			return -EEXIST;
    170	}
    171
    172	rb_link_node(&region->node, parent, new);
    173	rb_insert_color(&region->node, &afu->dma_regions);
    174
    175	return 0;
    176}
    177
    178/**
    179 * afu_dma_region_remove - remove given dma region from rbtree
    180 * @pdata: feature device platform data
    181 * @region: dma region to be removed
    182 *
    183 * Needs to be called with pdata->lock heold.
    184 */
    185static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
    186				  struct dfl_afu_dma_region *region)
    187{
    188	struct dfl_afu *afu;
    189
    190	dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
    191		(unsigned long long)region->iova);
    192
    193	afu = dfl_fpga_pdata_get_private(pdata);
    194	rb_erase(&region->node, &afu->dma_regions);
    195}
    196
    197/**
    198 * afu_dma_region_destroy - destroy all regions in rbtree
    199 * @pdata: feature device platform data
    200 *
    201 * Needs to be called with pdata->lock heold.
    202 */
    203void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
    204{
    205	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
    206	struct rb_node *node = rb_first(&afu->dma_regions);
    207	struct dfl_afu_dma_region *region;
    208
    209	while (node) {
    210		region = container_of(node, struct dfl_afu_dma_region, node);
    211
    212		dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
    213			(unsigned long long)region->iova);
    214
    215		rb_erase(node, &afu->dma_regions);
    216
    217		if (region->iova)
    218			dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
    219				       region->iova, region->length,
    220				       DMA_BIDIRECTIONAL);
    221
    222		if (region->pages)
    223			afu_dma_unpin_pages(pdata, region);
    224
    225		node = rb_next(node);
    226		kfree(region);
    227	}
    228}
    229
    230/**
    231 * afu_dma_region_find - find the dma region from rbtree based on iova and size
    232 * @pdata: feature device platform data
    233 * @iova: address of the dma memory area
    234 * @size: size of the dma memory area
    235 *
    236 * It finds the dma region from the rbtree based on @iova and @size:
    237 * - if @size == 0, it finds the dma region which starts from @iova
    238 * - otherwise, it finds the dma region which fully contains
    239 *   [@iova, @iova+size)
    240 * If nothing is matched returns NULL.
    241 *
    242 * Needs to be called with pdata->lock held.
    243 */
    244struct dfl_afu_dma_region *
    245afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
    246{
    247	struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
    248	struct rb_node *node = afu->dma_regions.rb_node;
    249	struct device *dev = &pdata->dev->dev;
    250
    251	while (node) {
    252		struct dfl_afu_dma_region *region;
    253
    254		region = container_of(node, struct dfl_afu_dma_region, node);
    255
    256		if (dma_region_check_iova(region, iova, size)) {
    257			dev_dbg(dev, "find region (iova = %llx)\n",
    258				(unsigned long long)region->iova);
    259			return region;
    260		}
    261
    262		if (iova < region->iova)
    263			node = node->rb_left;
    264		else if (iova > region->iova)
    265			node = node->rb_right;
    266		else
    267			/* the iova region is not fully covered. */
    268			break;
    269	}
    270
    271	dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
    272		(unsigned long long)iova, (unsigned long long)size);
    273
    274	return NULL;
    275}
    276
    277/**
    278 * afu_dma_region_find_iova - find the dma region from rbtree by iova
    279 * @pdata: feature device platform data
    280 * @iova: address of the dma region
    281 *
    282 * Needs to be called with pdata->lock held.
    283 */
    284static struct dfl_afu_dma_region *
    285afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
    286{
    287	return afu_dma_region_find(pdata, iova, 0);
    288}
    289
    290/**
    291 * afu_dma_map_region - map memory region for dma
    292 * @pdata: feature device platform data
    293 * @user_addr: address of the memory region
    294 * @length: size of the memory region
    295 * @iova: pointer of iova address
    296 *
    297 * Map memory region defined by @user_addr and @length, and return dma address
    298 * of the memory region via @iova.
    299 * Return 0 for success, otherwise error code.
    300 */
    301int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
    302		       u64 user_addr, u64 length, u64 *iova)
    303{
    304	struct dfl_afu_dma_region *region;
    305	int ret;
    306
    307	/*
    308	 * Check Inputs, only accept page-aligned user memory region with
    309	 * valid length.
    310	 */
    311	if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
    312		return -EINVAL;
    313
    314	/* Check overflow */
    315	if (user_addr + length < user_addr)
    316		return -EINVAL;
    317
    318	region = kzalloc(sizeof(*region), GFP_KERNEL);
    319	if (!region)
    320		return -ENOMEM;
    321
    322	region->user_addr = user_addr;
    323	region->length = length;
    324
    325	/* Pin the user memory region */
    326	ret = afu_dma_pin_pages(pdata, region);
    327	if (ret) {
    328		dev_err(&pdata->dev->dev, "failed to pin memory region\n");
    329		goto free_region;
    330	}
    331
    332	/* Only accept continuous pages, return error else */
    333	if (!afu_dma_check_continuous_pages(region)) {
    334		dev_err(&pdata->dev->dev, "pages are not continuous\n");
    335		ret = -EINVAL;
    336		goto unpin_pages;
    337	}
    338
    339	/* As pages are continuous then start to do DMA mapping */
    340	region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
    341				    region->pages[0], 0,
    342				    region->length,
    343				    DMA_BIDIRECTIONAL);
    344	if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
    345		dev_err(&pdata->dev->dev, "failed to map for dma\n");
    346		ret = -EFAULT;
    347		goto unpin_pages;
    348	}
    349
    350	*iova = region->iova;
    351
    352	mutex_lock(&pdata->lock);
    353	ret = afu_dma_region_add(pdata, region);
    354	mutex_unlock(&pdata->lock);
    355	if (ret) {
    356		dev_err(&pdata->dev->dev, "failed to add dma region\n");
    357		goto unmap_dma;
    358	}
    359
    360	return 0;
    361
    362unmap_dma:
    363	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
    364		       region->iova, region->length, DMA_BIDIRECTIONAL);
    365unpin_pages:
    366	afu_dma_unpin_pages(pdata, region);
    367free_region:
    368	kfree(region);
    369	return ret;
    370}
    371
    372/**
    373 * afu_dma_unmap_region - unmap dma memory region
    374 * @pdata: feature device platform data
    375 * @iova: dma address of the region
    376 *
    377 * Unmap dma memory region based on @iova.
    378 * Return 0 for success, otherwise error code.
    379 */
    380int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
    381{
    382	struct dfl_afu_dma_region *region;
    383
    384	mutex_lock(&pdata->lock);
    385	region = afu_dma_region_find_iova(pdata, iova);
    386	if (!region) {
    387		mutex_unlock(&pdata->lock);
    388		return -EINVAL;
    389	}
    390
    391	if (region->in_use) {
    392		mutex_unlock(&pdata->lock);
    393		return -EBUSY;
    394	}
    395
    396	afu_dma_region_remove(pdata, region);
    397	mutex_unlock(&pdata->lock);
    398
    399	dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
    400		       region->iova, region->length, DMA_BIDIRECTIONAL);
    401	afu_dma_unpin_pages(pdata, region);
    402	kfree(region);
    403
    404	return 0;
    405}