cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

direct.c (17261B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 2018-2020 Christoph Hellwig.
      4 *
      5 * DMA operations that map physical memory directly without using an IOMMU.
      6 */
      7#include <linux/memblock.h> /* for max_pfn */
      8#include <linux/export.h>
      9#include <linux/mm.h>
     10#include <linux/dma-map-ops.h>
     11#include <linux/scatterlist.h>
     12#include <linux/pfn.h>
     13#include <linux/vmalloc.h>
     14#include <linux/set_memory.h>
     15#include <linux/slab.h>
     16#include "direct.h"
     17
     18/*
     19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
     20 * it for entirely different regions. In that case the arch code needs to
     21 * override the variable below for dma-direct to work properly.
     22 */
     23unsigned int zone_dma_bits __ro_after_init = 24;
     24
     25static inline dma_addr_t phys_to_dma_direct(struct device *dev,
     26		phys_addr_t phys)
     27{
     28	if (force_dma_unencrypted(dev))
     29		return phys_to_dma_unencrypted(dev, phys);
     30	return phys_to_dma(dev, phys);
     31}
     32
     33static inline struct page *dma_direct_to_page(struct device *dev,
     34		dma_addr_t dma_addr)
     35{
     36	return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
     37}
     38
     39u64 dma_direct_get_required_mask(struct device *dev)
     40{
     41	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
     42	u64 max_dma = phys_to_dma_direct(dev, phys);
     43
     44	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
     45}
     46
     47static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
     48				  u64 *phys_limit)
     49{
     50	u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
     51
     52	/*
     53	 * Optimistically try the zone that the physical address mask falls
     54	 * into first.  If that returns memory that isn't actually addressable
     55	 * we will fallback to the next lower zone and try again.
     56	 *
     57	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
     58	 * zones.
     59	 */
     60	*phys_limit = dma_to_phys(dev, dma_limit);
     61	if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
     62		return GFP_DMA;
     63	if (*phys_limit <= DMA_BIT_MASK(32))
     64		return GFP_DMA32;
     65	return 0;
     66}
     67
     68static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
     69{
     70	dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
     71
     72	if (dma_addr == DMA_MAPPING_ERROR)
     73		return false;
     74	return dma_addr + size - 1 <=
     75		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
     76}
     77
     78static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
     79{
     80	if (!force_dma_unencrypted(dev))
     81		return 0;
     82	return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
     83}
     84
     85static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
     86{
     87	int ret;
     88
     89	if (!force_dma_unencrypted(dev))
     90		return 0;
     91	ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
     92	if (ret)
     93		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
     94	return ret;
     95}
     96
     97static void __dma_direct_free_pages(struct device *dev, struct page *page,
     98				    size_t size)
     99{
    100	if (swiotlb_free(dev, page, size))
    101		return;
    102	dma_free_contiguous(dev, page, size);
    103}
    104
    105static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
    106{
    107	struct page *page = swiotlb_alloc(dev, size);
    108
    109	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
    110		swiotlb_free(dev, page, size);
    111		return NULL;
    112	}
    113
    114	return page;
    115}
    116
    117static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
    118		gfp_t gfp, bool allow_highmem)
    119{
    120	int node = dev_to_node(dev);
    121	struct page *page = NULL;
    122	u64 phys_limit;
    123
    124	WARN_ON_ONCE(!PAGE_ALIGNED(size));
    125
    126	if (is_swiotlb_for_alloc(dev))
    127		return dma_direct_alloc_swiotlb(dev, size);
    128
    129	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
    130					   &phys_limit);
    131	page = dma_alloc_contiguous(dev, size, gfp);
    132	if (page) {
    133		if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
    134		    (!allow_highmem && PageHighMem(page))) {
    135			dma_free_contiguous(dev, page, size);
    136			page = NULL;
    137		}
    138	}
    139again:
    140	if (!page)
    141		page = alloc_pages_node(node, gfp, get_order(size));
    142	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
    143		dma_free_contiguous(dev, page, size);
    144		page = NULL;
    145
    146		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
    147		    phys_limit < DMA_BIT_MASK(64) &&
    148		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
    149			gfp |= GFP_DMA32;
    150			goto again;
    151		}
    152
    153		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
    154			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
    155			goto again;
    156		}
    157	}
    158
    159	return page;
    160}
    161
    162/*
    163 * Check if a potentially blocking operations needs to dip into the atomic
    164 * pools for the given device/gfp.
    165 */
    166static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
    167{
    168	return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
    169}
    170
    171static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
    172		dma_addr_t *dma_handle, gfp_t gfp)
    173{
    174	struct page *page;
    175	u64 phys_mask;
    176	void *ret;
    177
    178	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
    179		return NULL;
    180
    181	gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
    182					   &phys_mask);
    183	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
    184	if (!page)
    185		return NULL;
    186	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
    187	return ret;
    188}
    189
    190static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
    191		dma_addr_t *dma_handle, gfp_t gfp)
    192{
    193	struct page *page;
    194
    195	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
    196	if (!page)
    197		return NULL;
    198
    199	/* remove any dirty cache lines on the kernel alias */
    200	if (!PageHighMem(page))
    201		arch_dma_prep_coherent(page, size);
    202
    203	/* return the page pointer as the opaque cookie */
    204	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
    205	return page;
    206}
    207
    208void *dma_direct_alloc(struct device *dev, size_t size,
    209		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
    210{
    211	bool remap = false, set_uncached = false;
    212	struct page *page;
    213	void *ret;
    214
    215	size = PAGE_ALIGN(size);
    216	if (attrs & DMA_ATTR_NO_WARN)
    217		gfp |= __GFP_NOWARN;
    218
    219	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
    220	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
    221		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
    222
    223	if (!dev_is_dma_coherent(dev)) {
    224		/*
    225		 * Fallback to the arch handler if it exists.  This should
    226		 * eventually go away.
    227		 */
    228		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
    229		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
    230		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
    231		    !is_swiotlb_for_alloc(dev))
    232			return arch_dma_alloc(dev, size, dma_handle, gfp,
    233					      attrs);
    234
    235		/*
    236		 * If there is a global pool, always allocate from it for
    237		 * non-coherent devices.
    238		 */
    239		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
    240			return dma_alloc_from_global_coherent(dev, size,
    241					dma_handle);
    242
    243		/*
    244		 * Otherwise remap if the architecture is asking for it.  But
    245		 * given that remapping memory is a blocking operation we'll
    246		 * instead have to dip into the atomic pools.
    247		 */
    248		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
    249		if (remap) {
    250			if (dma_direct_use_pool(dev, gfp))
    251				return dma_direct_alloc_from_pool(dev, size,
    252						dma_handle, gfp);
    253		} else {
    254			if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
    255				return NULL;
    256			set_uncached = true;
    257		}
    258	}
    259
    260	/*
    261	 * Decrypting memory may block, so allocate the memory from the atomic
    262	 * pools if we can't block.
    263	 */
    264	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
    265		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
    266
    267	/* we always manually zero the memory once we are done */
    268	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
    269	if (!page)
    270		return NULL;
    271
    272	/*
    273	 * dma_alloc_contiguous can return highmem pages depending on a
    274	 * combination the cma= arguments and per-arch setup.  These need to be
    275	 * remapped to return a kernel virtual address.
    276	 */
    277	if (PageHighMem(page)) {
    278		remap = true;
    279		set_uncached = false;
    280	}
    281
    282	if (remap) {
    283		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
    284
    285		if (force_dma_unencrypted(dev))
    286			prot = pgprot_decrypted(prot);
    287
    288		/* remove any dirty cache lines on the kernel alias */
    289		arch_dma_prep_coherent(page, size);
    290
    291		/* create a coherent mapping */
    292		ret = dma_common_contiguous_remap(page, size, prot,
    293				__builtin_return_address(0));
    294		if (!ret)
    295			goto out_free_pages;
    296	} else {
    297		ret = page_address(page);
    298		if (dma_set_decrypted(dev, ret, size))
    299			goto out_free_pages;
    300	}
    301
    302	memset(ret, 0, size);
    303
    304	if (set_uncached) {
    305		arch_dma_prep_coherent(page, size);
    306		ret = arch_dma_set_uncached(ret, size);
    307		if (IS_ERR(ret))
    308			goto out_encrypt_pages;
    309	}
    310
    311	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
    312	return ret;
    313
    314out_encrypt_pages:
    315	if (dma_set_encrypted(dev, page_address(page), size))
    316		return NULL;
    317out_free_pages:
    318	__dma_direct_free_pages(dev, page, size);
    319	return NULL;
    320}
    321
    322void dma_direct_free(struct device *dev, size_t size,
    323		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
    324{
    325	unsigned int page_order = get_order(size);
    326
    327	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
    328	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
    329		/* cpu_addr is a struct page cookie, not a kernel address */
    330		dma_free_contiguous(dev, cpu_addr, size);
    331		return;
    332	}
    333
    334	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
    335	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
    336	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
    337	    !dev_is_dma_coherent(dev) &&
    338	    !is_swiotlb_for_alloc(dev)) {
    339		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
    340		return;
    341	}
    342
    343	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
    344	    !dev_is_dma_coherent(dev)) {
    345		if (!dma_release_from_global_coherent(page_order, cpu_addr))
    346			WARN_ON_ONCE(1);
    347		return;
    348	}
    349
    350	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
    351	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
    352	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
    353		return;
    354
    355	if (is_vmalloc_addr(cpu_addr)) {
    356		vunmap(cpu_addr);
    357	} else {
    358		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
    359			arch_dma_clear_uncached(cpu_addr, size);
    360		if (dma_set_encrypted(dev, cpu_addr, size))
    361			return;
    362	}
    363
    364	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
    365}
    366
    367struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
    368		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
    369{
    370	struct page *page;
    371	void *ret;
    372
    373	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
    374		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
    375
    376	page = __dma_direct_alloc_pages(dev, size, gfp, false);
    377	if (!page)
    378		return NULL;
    379
    380	ret = page_address(page);
    381	if (dma_set_decrypted(dev, ret, size))
    382		goto out_free_pages;
    383	memset(ret, 0, size);
    384	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
    385	return page;
    386out_free_pages:
    387	__dma_direct_free_pages(dev, page, size);
    388	return NULL;
    389}
    390
    391void dma_direct_free_pages(struct device *dev, size_t size,
    392		struct page *page, dma_addr_t dma_addr,
    393		enum dma_data_direction dir)
    394{
    395	void *vaddr = page_address(page);
    396
    397	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
    398	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
    399	    dma_free_from_pool(dev, vaddr, size))
    400		return;
    401
    402	if (dma_set_encrypted(dev, vaddr, size))
    403		return;
    404	__dma_direct_free_pages(dev, page, size);
    405}
    406
    407#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
    408    defined(CONFIG_SWIOTLB)
    409void dma_direct_sync_sg_for_device(struct device *dev,
    410		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
    411{
    412	struct scatterlist *sg;
    413	int i;
    414
    415	for_each_sg(sgl, sg, nents, i) {
    416		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
    417
    418		if (unlikely(is_swiotlb_buffer(dev, paddr)))
    419			swiotlb_sync_single_for_device(dev, paddr, sg->length,
    420						       dir);
    421
    422		if (!dev_is_dma_coherent(dev))
    423			arch_sync_dma_for_device(paddr, sg->length,
    424					dir);
    425	}
    426}
    427#endif
    428
    429#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
    430    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
    431    defined(CONFIG_SWIOTLB)
    432void dma_direct_sync_sg_for_cpu(struct device *dev,
    433		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
    434{
    435	struct scatterlist *sg;
    436	int i;
    437
    438	for_each_sg(sgl, sg, nents, i) {
    439		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
    440
    441		if (!dev_is_dma_coherent(dev))
    442			arch_sync_dma_for_cpu(paddr, sg->length, dir);
    443
    444		if (unlikely(is_swiotlb_buffer(dev, paddr)))
    445			swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
    446						    dir);
    447
    448		if (dir == DMA_FROM_DEVICE)
    449			arch_dma_mark_clean(paddr, sg->length);
    450	}
    451
    452	if (!dev_is_dma_coherent(dev))
    453		arch_sync_dma_for_cpu_all();
    454}
    455
    456void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
    457		int nents, enum dma_data_direction dir, unsigned long attrs)
    458{
    459	struct scatterlist *sg;
    460	int i;
    461
    462	for_each_sg(sgl, sg, nents, i)
    463		dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
    464			     attrs);
    465}
    466#endif
    467
    468int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
    469		enum dma_data_direction dir, unsigned long attrs)
    470{
    471	int i;
    472	struct scatterlist *sg;
    473
    474	for_each_sg(sgl, sg, nents, i) {
    475		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
    476				sg->offset, sg->length, dir, attrs);
    477		if (sg->dma_address == DMA_MAPPING_ERROR)
    478			goto out_unmap;
    479		sg_dma_len(sg) = sg->length;
    480	}
    481
    482	return nents;
    483
    484out_unmap:
    485	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
    486	return -EIO;
    487}
    488
    489dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
    490		size_t size, enum dma_data_direction dir, unsigned long attrs)
    491{
    492	dma_addr_t dma_addr = paddr;
    493
    494	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
    495		dev_err_once(dev,
    496			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
    497			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
    498		WARN_ON_ONCE(1);
    499		return DMA_MAPPING_ERROR;
    500	}
    501
    502	return dma_addr;
    503}
    504
    505int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
    506		void *cpu_addr, dma_addr_t dma_addr, size_t size,
    507		unsigned long attrs)
    508{
    509	struct page *page = dma_direct_to_page(dev, dma_addr);
    510	int ret;
    511
    512	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
    513	if (!ret)
    514		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
    515	return ret;
    516}
    517
    518bool dma_direct_can_mmap(struct device *dev)
    519{
    520	return dev_is_dma_coherent(dev) ||
    521		IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
    522}
    523
    524int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
    525		void *cpu_addr, dma_addr_t dma_addr, size_t size,
    526		unsigned long attrs)
    527{
    528	unsigned long user_count = vma_pages(vma);
    529	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
    530	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
    531	int ret = -ENXIO;
    532
    533	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
    534	if (force_dma_unencrypted(dev))
    535		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
    536
    537	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
    538		return ret;
    539	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
    540		return ret;
    541
    542	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
    543		return -ENXIO;
    544	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
    545			user_count << PAGE_SHIFT, vma->vm_page_prot);
    546}
    547
    548int dma_direct_supported(struct device *dev, u64 mask)
    549{
    550	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
    551
    552	/*
    553	 * Because 32-bit DMA masks are so common we expect every architecture
    554	 * to be able to satisfy them - either by not supporting more physical
    555	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
    556	 * architecture needs to use an IOMMU instead of the direct mapping.
    557	 */
    558	if (mask >= DMA_BIT_MASK(32))
    559		return 1;
    560
    561	/*
    562	 * This check needs to be against the actual bit mask value, so use
    563	 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
    564	 * part of the check.
    565	 */
    566	if (IS_ENABLED(CONFIG_ZONE_DMA))
    567		min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
    568	return mask >= phys_to_dma_unencrypted(dev, min_mask);
    569}
    570
    571size_t dma_direct_max_mapping_size(struct device *dev)
    572{
    573	/* If SWIOTLB is active, use its maximum mapping size */
    574	if (is_swiotlb_active(dev) &&
    575	    (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
    576		return swiotlb_max_mapping_size(dev);
    577	return SIZE_MAX;
    578}
    579
    580bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
    581{
    582	return !dev_is_dma_coherent(dev) ||
    583	       is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
    584}
    585
    586/**
    587 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
    588 * @dev:	device pointer; needed to "own" the alloced memory.
    589 * @cpu_start:  beginning of memory region covered by this offset.
    590 * @dma_start:  beginning of DMA/PCI region covered by this offset.
    591 * @size:	size of the region.
    592 *
    593 * This is for the simple case of a uniform offset which cannot
    594 * be discovered by "dma-ranges".
    595 *
    596 * It returns -ENOMEM if out of memory, -EINVAL if a map
    597 * already exists, 0 otherwise.
    598 *
    599 * Note: any call to this from a driver is a bug.  The mapping needs
    600 * to be described by the device tree or other firmware interfaces.
    601 */
    602int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
    603			 dma_addr_t dma_start, u64 size)
    604{
    605	struct bus_dma_region *map;
    606	u64 offset = (u64)cpu_start - (u64)dma_start;
    607
    608	if (dev->dma_range_map) {
    609		dev_err(dev, "attempt to add DMA range to existing map\n");
    610		return -EINVAL;
    611	}
    612
    613	if (!offset)
    614		return 0;
    615
    616	map = kcalloc(2, sizeof(*map), GFP_KERNEL);
    617	if (!map)
    618		return -ENOMEM;
    619	map[0].cpu_start = cpu_start;
    620	map[0].dma_start = dma_start;
    621	map[0].offset = offset;
    622	map[0].size = size;
    623	dev->dma_range_map = map;
    624	return 0;
    625}