cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dmabounce.c (15422B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  arch/arm/common/dmabounce.c
      4 *
      5 *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
      6 *  limited DMA windows. These functions utilize bounce buffers to
      7 *  copy data to/from buffers located outside the DMA region. This
      8 *  only works for systems in which DMA memory is at the bottom of
      9 *  RAM, the remainder of memory is at the top and the DMA memory
     10 *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
     11 *  DMA windows will require custom implementations that reserve memory
     12 *  areas at early bootup.
     13 *
     14 *  Original version by Brad Parker (brad@heeltoe.com)
     15 *  Re-written by Christopher Hoover <ch@murgatroid.com>
     16 *  Made generic by Deepak Saxena <dsaxena@plexity.net>
     17 *
     18 *  Copyright (C) 2002 Hewlett Packard Company.
     19 *  Copyright (C) 2004 MontaVista Software, Inc.
     20 */
     21
     22#include <linux/module.h>
     23#include <linux/init.h>
     24#include <linux/slab.h>
     25#include <linux/page-flags.h>
     26#include <linux/device.h>
     27#include <linux/dma-direct.h>
     28#include <linux/dma-map-ops.h>
     29#include <linux/dmapool.h>
     30#include <linux/list.h>
     31#include <linux/scatterlist.h>
     32
     33#include <asm/cacheflush.h>
     34#include <asm/dma-iommu.h>
     35
     36#undef STATS
     37
     38#ifdef STATS
     39#define DO_STATS(X) do { X ; } while (0)
     40#else
     41#define DO_STATS(X) do { } while (0)
     42#endif
     43
     44/* ************************************************** */
     45
     46struct safe_buffer {
     47	struct list_head node;
     48
     49	/* original request */
     50	void		*ptr;
     51	size_t		size;
     52	int		direction;
     53
     54	/* safe buffer info */
     55	struct dmabounce_pool *pool;
     56	void		*safe;
     57	dma_addr_t	safe_dma_addr;
     58};
     59
     60struct dmabounce_pool {
     61	unsigned long	size;
     62	struct dma_pool	*pool;
     63#ifdef STATS
     64	unsigned long	allocs;
     65#endif
     66};
     67
     68struct dmabounce_device_info {
     69	struct device *dev;
     70	struct list_head safe_buffers;
     71#ifdef STATS
     72	unsigned long total_allocs;
     73	unsigned long map_op_count;
     74	unsigned long bounce_count;
     75	int attr_res;
     76#endif
     77	struct dmabounce_pool	small;
     78	struct dmabounce_pool	large;
     79
     80	rwlock_t lock;
     81
     82	int (*needs_bounce)(struct device *, dma_addr_t, size_t);
     83};
     84
     85#ifdef STATS
     86static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
     87			      char *buf)
     88{
     89	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
     90	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
     91		device_info->small.allocs,
     92		device_info->large.allocs,
     93		device_info->total_allocs - device_info->small.allocs -
     94			device_info->large.allocs,
     95		device_info->total_allocs,
     96		device_info->map_op_count,
     97		device_info->bounce_count);
     98}
     99
    100static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
    101#endif
    102
    103
    104/* allocate a 'safe' buffer and keep track of it */
    105static inline struct safe_buffer *
    106alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
    107		  size_t size, enum dma_data_direction dir)
    108{
    109	struct safe_buffer *buf;
    110	struct dmabounce_pool *pool;
    111	struct device *dev = device_info->dev;
    112	unsigned long flags;
    113
    114	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
    115		__func__, ptr, size, dir);
    116
    117	if (size <= device_info->small.size) {
    118		pool = &device_info->small;
    119	} else if (size <= device_info->large.size) {
    120		pool = &device_info->large;
    121	} else {
    122		pool = NULL;
    123	}
    124
    125	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
    126	if (buf == NULL) {
    127		dev_warn(dev, "%s: kmalloc failed\n", __func__);
    128		return NULL;
    129	}
    130
    131	buf->ptr = ptr;
    132	buf->size = size;
    133	buf->direction = dir;
    134	buf->pool = pool;
    135
    136	if (pool) {
    137		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
    138					   &buf->safe_dma_addr);
    139	} else {
    140		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
    141					       GFP_ATOMIC);
    142	}
    143
    144	if (buf->safe == NULL) {
    145		dev_warn(dev,
    146			 "%s: could not alloc dma memory (size=%d)\n",
    147			 __func__, size);
    148		kfree(buf);
    149		return NULL;
    150	}
    151
    152#ifdef STATS
    153	if (pool)
    154		pool->allocs++;
    155	device_info->total_allocs++;
    156#endif
    157
    158	write_lock_irqsave(&device_info->lock, flags);
    159	list_add(&buf->node, &device_info->safe_buffers);
    160	write_unlock_irqrestore(&device_info->lock, flags);
    161
    162	return buf;
    163}
    164
    165/* determine if a buffer is from our "safe" pool */
    166static inline struct safe_buffer *
    167find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
    168{
    169	struct safe_buffer *b, *rb = NULL;
    170	unsigned long flags;
    171
    172	read_lock_irqsave(&device_info->lock, flags);
    173
    174	list_for_each_entry(b, &device_info->safe_buffers, node)
    175		if (b->safe_dma_addr <= safe_dma_addr &&
    176		    b->safe_dma_addr + b->size > safe_dma_addr) {
    177			rb = b;
    178			break;
    179		}
    180
    181	read_unlock_irqrestore(&device_info->lock, flags);
    182	return rb;
    183}
    184
    185static inline void
    186free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
    187{
    188	unsigned long flags;
    189
    190	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
    191
    192	write_lock_irqsave(&device_info->lock, flags);
    193
    194	list_del(&buf->node);
    195
    196	write_unlock_irqrestore(&device_info->lock, flags);
    197
    198	if (buf->pool)
    199		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
    200	else
    201		dma_free_coherent(device_info->dev, buf->size, buf->safe,
    202				    buf->safe_dma_addr);
    203
    204	kfree(buf);
    205}
    206
    207/* ************************************************** */
    208
    209static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
    210		dma_addr_t dma_addr, const char *where)
    211{
    212	if (!dev || !dev->archdata.dmabounce)
    213		return NULL;
    214	if (dma_mapping_error(dev, dma_addr)) {
    215		dev_err(dev, "Trying to %s invalid mapping\n", where);
    216		return NULL;
    217	}
    218	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
    219}
    220
    221static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
    222{
    223	if (!dev || !dev->archdata.dmabounce)
    224		return 0;
    225
    226	if (dev->dma_mask) {
    227		unsigned long limit, mask = *dev->dma_mask;
    228
    229		limit = (mask + 1) & ~mask;
    230		if (limit && size > limit) {
    231			dev_err(dev, "DMA mapping too big (requested %#x "
    232				"mask %#Lx)\n", size, *dev->dma_mask);
    233			return -E2BIG;
    234		}
    235
    236		/* Figure out if we need to bounce from the DMA mask. */
    237		if ((dma_addr | (dma_addr + size - 1)) & ~mask)
    238			return 1;
    239	}
    240
    241	return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
    242}
    243
    244static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
    245				    enum dma_data_direction dir,
    246				    unsigned long attrs)
    247{
    248	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
    249	struct safe_buffer *buf;
    250
    251	if (device_info)
    252		DO_STATS ( device_info->map_op_count++ );
    253
    254	buf = alloc_safe_buffer(device_info, ptr, size, dir);
    255	if (buf == NULL) {
    256		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
    257		       __func__, ptr);
    258		return DMA_MAPPING_ERROR;
    259	}
    260
    261	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
    262		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
    263		buf->safe, buf->safe_dma_addr);
    264
    265	if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
    266	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
    267		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
    268			__func__, ptr, buf->safe, size);
    269		memcpy(buf->safe, ptr, size);
    270	}
    271
    272	return buf->safe_dma_addr;
    273}
    274
    275static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
    276				size_t size, enum dma_data_direction dir,
    277				unsigned long attrs)
    278{
    279	BUG_ON(buf->size != size);
    280	BUG_ON(buf->direction != dir);
    281
    282	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
    283		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
    284		buf->safe, buf->safe_dma_addr);
    285
    286	DO_STATS(dev->archdata.dmabounce->bounce_count++);
    287
    288	if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
    289	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
    290		void *ptr = buf->ptr;
    291
    292		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
    293			__func__, buf->safe, ptr, size);
    294		memcpy(ptr, buf->safe, size);
    295
    296		/*
    297		 * Since we may have written to a page cache page,
    298		 * we need to ensure that the data will be coherent
    299		 * with user mappings.
    300		 */
    301		__cpuc_flush_dcache_area(ptr, size);
    302	}
    303	free_safe_buffer(dev->archdata.dmabounce, buf);
    304}
    305
    306/* ************************************************** */
    307
    308/*
    309 * see if a buffer address is in an 'unsafe' range.  if it is
    310 * allocate a 'safe' buffer and copy the unsafe buffer into it.
    311 * substitute the safe buffer for the unsafe one.
    312 * (basically move the buffer from an unsafe area to a safe one)
    313 */
    314static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
    315		unsigned long offset, size_t size, enum dma_data_direction dir,
    316		unsigned long attrs)
    317{
    318	dma_addr_t dma_addr;
    319	int ret;
    320
    321	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
    322		__func__, page, offset, size, dir);
    323
    324	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
    325
    326	ret = needs_bounce(dev, dma_addr, size);
    327	if (ret < 0)
    328		return DMA_MAPPING_ERROR;
    329
    330	if (ret == 0) {
    331		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
    332		return dma_addr;
    333	}
    334
    335	if (PageHighMem(page)) {
    336		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
    337		return DMA_MAPPING_ERROR;
    338	}
    339
    340	return map_single(dev, page_address(page) + offset, size, dir, attrs);
    341}
    342
    343/*
    344 * see if a mapped address was really a "safe" buffer and if so, copy
    345 * the data from the safe buffer back to the unsafe buffer and free up
    346 * the safe buffer.  (basically return things back to the way they
    347 * should be)
    348 */
    349static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
    350		enum dma_data_direction dir, unsigned long attrs)
    351{
    352	struct safe_buffer *buf;
    353
    354	dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
    355		__func__, dma_addr, size, dir);
    356
    357	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
    358	if (!buf) {
    359		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
    360		return;
    361	}
    362
    363	unmap_single(dev, buf, size, dir, attrs);
    364}
    365
    366static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
    367		size_t sz, enum dma_data_direction dir)
    368{
    369	struct safe_buffer *buf;
    370	unsigned long off;
    371
    372	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
    373		__func__, addr, sz, dir);
    374
    375	buf = find_safe_buffer_dev(dev, addr, __func__);
    376	if (!buf)
    377		return 1;
    378
    379	off = addr - buf->safe_dma_addr;
    380
    381	BUG_ON(buf->direction != dir);
    382
    383	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
    384		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
    385		buf->safe, buf->safe_dma_addr);
    386
    387	DO_STATS(dev->archdata.dmabounce->bounce_count++);
    388
    389	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
    390		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
    391			__func__, buf->safe + off, buf->ptr + off, sz);
    392		memcpy(buf->ptr + off, buf->safe + off, sz);
    393	}
    394	return 0;
    395}
    396
    397static void dmabounce_sync_for_cpu(struct device *dev,
    398		dma_addr_t handle, size_t size, enum dma_data_direction dir)
    399{
    400	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
    401		return;
    402
    403	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
    404}
    405
    406static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
    407		size_t sz, enum dma_data_direction dir)
    408{
    409	struct safe_buffer *buf;
    410	unsigned long off;
    411
    412	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
    413		__func__, addr, sz, dir);
    414
    415	buf = find_safe_buffer_dev(dev, addr, __func__);
    416	if (!buf)
    417		return 1;
    418
    419	off = addr - buf->safe_dma_addr;
    420
    421	BUG_ON(buf->direction != dir);
    422
    423	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
    424		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
    425		buf->safe, buf->safe_dma_addr);
    426
    427	DO_STATS(dev->archdata.dmabounce->bounce_count++);
    428
    429	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
    430		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
    431			__func__,buf->ptr + off, buf->safe + off, sz);
    432		memcpy(buf->safe + off, buf->ptr + off, sz);
    433	}
    434	return 0;
    435}
    436
    437static void dmabounce_sync_for_device(struct device *dev,
    438		dma_addr_t handle, size_t size, enum dma_data_direction dir)
    439{
    440	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
    441		return;
    442
    443	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
    444}
    445
    446static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
    447{
    448	if (dev->archdata.dmabounce)
    449		return 0;
    450
    451	return arm_dma_ops.dma_supported(dev, dma_mask);
    452}
    453
    454static const struct dma_map_ops dmabounce_ops = {
    455	.alloc			= arm_dma_alloc,
    456	.free			= arm_dma_free,
    457	.mmap			= arm_dma_mmap,
    458	.get_sgtable		= arm_dma_get_sgtable,
    459	.map_page		= dmabounce_map_page,
    460	.unmap_page		= dmabounce_unmap_page,
    461	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
    462	.sync_single_for_device	= dmabounce_sync_for_device,
    463	.map_sg			= arm_dma_map_sg,
    464	.unmap_sg		= arm_dma_unmap_sg,
    465	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
    466	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
    467	.dma_supported		= dmabounce_dma_supported,
    468};
    469
    470static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
    471		const char *name, unsigned long size)
    472{
    473	pool->size = size;
    474	DO_STATS(pool->allocs = 0);
    475	pool->pool = dma_pool_create(name, dev, size,
    476				     0 /* byte alignment */,
    477				     0 /* no page-crossing issues */);
    478
    479	return pool->pool ? 0 : -ENOMEM;
    480}
    481
    482int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
    483		unsigned long large_buffer_size,
    484		int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
    485{
    486	struct dmabounce_device_info *device_info;
    487	int ret;
    488
    489	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
    490	if (!device_info) {
    491		dev_err(dev,
    492			"Could not allocated dmabounce_device_info\n");
    493		return -ENOMEM;
    494	}
    495
    496	ret = dmabounce_init_pool(&device_info->small, dev,
    497				  "small_dmabounce_pool", small_buffer_size);
    498	if (ret) {
    499		dev_err(dev,
    500			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
    501			small_buffer_size);
    502		goto err_free;
    503	}
    504
    505	if (large_buffer_size) {
    506		ret = dmabounce_init_pool(&device_info->large, dev,
    507					  "large_dmabounce_pool",
    508					  large_buffer_size);
    509		if (ret) {
    510			dev_err(dev,
    511				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
    512				large_buffer_size);
    513			goto err_destroy;
    514		}
    515	}
    516
    517	device_info->dev = dev;
    518	INIT_LIST_HEAD(&device_info->safe_buffers);
    519	rwlock_init(&device_info->lock);
    520	device_info->needs_bounce = needs_bounce_fn;
    521
    522#ifdef STATS
    523	device_info->total_allocs = 0;
    524	device_info->map_op_count = 0;
    525	device_info->bounce_count = 0;
    526	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
    527#endif
    528
    529	dev->archdata.dmabounce = device_info;
    530	set_dma_ops(dev, &dmabounce_ops);
    531
    532	dev_info(dev, "dmabounce: registered device\n");
    533
    534	return 0;
    535
    536 err_destroy:
    537	dma_pool_destroy(device_info->small.pool);
    538 err_free:
    539	kfree(device_info);
    540	return ret;
    541}
    542EXPORT_SYMBOL(dmabounce_register_dev);
    543
    544void dmabounce_unregister_dev(struct device *dev)
    545{
    546	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
    547
    548	dev->archdata.dmabounce = NULL;
    549	set_dma_ops(dev, NULL);
    550
    551	if (!device_info) {
    552		dev_warn(dev,
    553			 "Never registered with dmabounce but attempting"
    554			 "to unregister!\n");
    555		return;
    556	}
    557
    558	if (!list_empty(&device_info->safe_buffers)) {
    559		dev_err(dev,
    560			"Removing from dmabounce with pending buffers!\n");
    561		BUG();
    562	}
    563
    564	if (device_info->small.pool)
    565		dma_pool_destroy(device_info->small.pool);
    566	if (device_info->large.pool)
    567		dma_pool_destroy(device_info->large.pool);
    568
    569#ifdef STATS
    570	if (device_info->attr_res == 0)
    571		device_remove_file(dev, &dev_attr_dmabounce_stats);
    572#endif
    573
    574	kfree(device_info);
    575
    576	dev_info(dev, "dmabounce: device unregistered\n");
    577}
    578EXPORT_SYMBOL(dmabounce_unregister_dev);
    579
    580MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
    581MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
    582MODULE_LICENSE("GPL");