cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dma-mapping.h (20289B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_DMA_MAPPING_H
      3#define _LINUX_DMA_MAPPING_H
      4
      5#include <linux/sizes.h>
      6#include <linux/string.h>
      7#include <linux/device.h>
      8#include <linux/err.h>
      9#include <linux/dma-direction.h>
     10#include <linux/scatterlist.h>
     11#include <linux/bug.h>
     12#include <linux/mem_encrypt.h>
     13
     14/**
     15 * List of possible attributes associated with a DMA mapping. The semantics
     16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
     17 */
     18
     19/*
     20 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
     21 * may be weakly ordered, that is that reads and writes may pass each other.
     22 */
     23#define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
     24/*
     25 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
     26 * buffered to improve performance.
     27 */
     28#define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
     29/*
     30 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
     31 * virtual mapping for the allocated buffer.
     32 */
     33#define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
     34/*
     35 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
     36 * the CPU cache for the given buffer assuming that it has been already
     37 * transferred to 'device' domain.
     38 */
     39#define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
     40/*
     41 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
     42 * in physical memory.
     43 */
     44#define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
     45/*
     46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
     47 * that it's probably not worth the time to try to allocate memory to in a way
     48 * that gives better TLB efficiency.
     49 */
     50#define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
     51/*
     52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
     53 * allocation failure reports (similarly to __GFP_NOWARN).
     54 */
     55#define DMA_ATTR_NO_WARN	(1UL << 8)
     56
     57/*
     58 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
     59 * accessible at an elevated privilege level (and ideally inaccessible or
     60 * at least read-only at lesser-privileged levels).
     61 */
     62#define DMA_ATTR_PRIVILEGED		(1UL << 9)
     63
     64/*
     65 * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
     66 * be given to a device to use as a DMA source or target.  It is specific to a
     67 * given device and there may be a translation between the CPU physical address
     68 * space and the bus address space.
     69 *
     70 * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
     71 * be used directly in drivers, but checked for using dma_mapping_error()
     72 * instead.
     73 */
     74#define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
     75
     76#define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
     77
     78#ifdef CONFIG_DMA_API_DEBUG
     79void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
     80void debug_dma_map_single(struct device *dev, const void *addr,
     81		unsigned long len);
     82#else
     83static inline void debug_dma_mapping_error(struct device *dev,
     84		dma_addr_t dma_addr)
     85{
     86}
     87static inline void debug_dma_map_single(struct device *dev, const void *addr,
     88		unsigned long len)
     89{
     90}
     91#endif /* CONFIG_DMA_API_DEBUG */
     92
     93#ifdef CONFIG_HAS_DMA
     94static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
     95{
     96	debug_dma_mapping_error(dev, dma_addr);
     97
     98	if (unlikely(dma_addr == DMA_MAPPING_ERROR))
     99		return -ENOMEM;
    100	return 0;
    101}
    102
    103dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
    104		size_t offset, size_t size, enum dma_data_direction dir,
    105		unsigned long attrs);
    106void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
    107		enum dma_data_direction dir, unsigned long attrs);
    108unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
    109		int nents, enum dma_data_direction dir, unsigned long attrs);
    110void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
    111				      int nents, enum dma_data_direction dir,
    112				      unsigned long attrs);
    113int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
    114		enum dma_data_direction dir, unsigned long attrs);
    115dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
    116		size_t size, enum dma_data_direction dir, unsigned long attrs);
    117void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
    118		enum dma_data_direction dir, unsigned long attrs);
    119void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
    120		enum dma_data_direction dir);
    121void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
    122		size_t size, enum dma_data_direction dir);
    123void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
    124		    int nelems, enum dma_data_direction dir);
    125void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
    126		       int nelems, enum dma_data_direction dir);
    127void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
    128		gfp_t flag, unsigned long attrs);
    129void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
    130		dma_addr_t dma_handle, unsigned long attrs);
    131void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
    132		gfp_t gfp, unsigned long attrs);
    133void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
    134		dma_addr_t dma_handle);
    135int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
    136		void *cpu_addr, dma_addr_t dma_addr, size_t size,
    137		unsigned long attrs);
    138int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
    139		void *cpu_addr, dma_addr_t dma_addr, size_t size,
    140		unsigned long attrs);
    141bool dma_can_mmap(struct device *dev);
    142int dma_supported(struct device *dev, u64 mask);
    143int dma_set_mask(struct device *dev, u64 mask);
    144int dma_set_coherent_mask(struct device *dev, u64 mask);
    145u64 dma_get_required_mask(struct device *dev);
    146size_t dma_max_mapping_size(struct device *dev);
    147bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
    148unsigned long dma_get_merge_boundary(struct device *dev);
    149struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
    150		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
    151void dma_free_noncontiguous(struct device *dev, size_t size,
    152		struct sg_table *sgt, enum dma_data_direction dir);
    153void *dma_vmap_noncontiguous(struct device *dev, size_t size,
    154		struct sg_table *sgt);
    155void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
    156int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
    157		size_t size, struct sg_table *sgt);
    158#else /* CONFIG_HAS_DMA */
    159static inline dma_addr_t dma_map_page_attrs(struct device *dev,
    160		struct page *page, size_t offset, size_t size,
    161		enum dma_data_direction dir, unsigned long attrs)
    162{
    163	return DMA_MAPPING_ERROR;
    164}
    165static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
    166		size_t size, enum dma_data_direction dir, unsigned long attrs)
    167{
    168}
    169static inline unsigned int dma_map_sg_attrs(struct device *dev,
    170		struct scatterlist *sg, int nents, enum dma_data_direction dir,
    171		unsigned long attrs)
    172{
    173	return 0;
    174}
    175static inline void dma_unmap_sg_attrs(struct device *dev,
    176		struct scatterlist *sg, int nents, enum dma_data_direction dir,
    177		unsigned long attrs)
    178{
    179}
    180static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
    181		enum dma_data_direction dir, unsigned long attrs)
    182{
    183	return -EOPNOTSUPP;
    184}
    185static inline dma_addr_t dma_map_resource(struct device *dev,
    186		phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
    187		unsigned long attrs)
    188{
    189	return DMA_MAPPING_ERROR;
    190}
    191static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
    192		size_t size, enum dma_data_direction dir, unsigned long attrs)
    193{
    194}
    195static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
    196		size_t size, enum dma_data_direction dir)
    197{
    198}
    199static inline void dma_sync_single_for_device(struct device *dev,
    200		dma_addr_t addr, size_t size, enum dma_data_direction dir)
    201{
    202}
    203static inline void dma_sync_sg_for_cpu(struct device *dev,
    204		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
    205{
    206}
    207static inline void dma_sync_sg_for_device(struct device *dev,
    208		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
    209{
    210}
    211static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
    212{
    213	return -ENOMEM;
    214}
    215static inline void *dma_alloc_attrs(struct device *dev, size_t size,
    216		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
    217{
    218	return NULL;
    219}
    220static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
    221		dma_addr_t dma_handle, unsigned long attrs)
    222{
    223}
    224static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
    225		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
    226{
    227	return NULL;
    228}
    229static inline void dmam_free_coherent(struct device *dev, size_t size,
    230		void *vaddr, dma_addr_t dma_handle)
    231{
    232}
    233static inline int dma_get_sgtable_attrs(struct device *dev,
    234		struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
    235		size_t size, unsigned long attrs)
    236{
    237	return -ENXIO;
    238}
    239static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
    240		void *cpu_addr, dma_addr_t dma_addr, size_t size,
    241		unsigned long attrs)
    242{
    243	return -ENXIO;
    244}
    245static inline bool dma_can_mmap(struct device *dev)
    246{
    247	return false;
    248}
    249static inline int dma_supported(struct device *dev, u64 mask)
    250{
    251	return 0;
    252}
    253static inline int dma_set_mask(struct device *dev, u64 mask)
    254{
    255	return -EIO;
    256}
    257static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
    258{
    259	return -EIO;
    260}
    261static inline u64 dma_get_required_mask(struct device *dev)
    262{
    263	return 0;
    264}
    265static inline size_t dma_max_mapping_size(struct device *dev)
    266{
    267	return 0;
    268}
    269static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
    270{
    271	return false;
    272}
    273static inline unsigned long dma_get_merge_boundary(struct device *dev)
    274{
    275	return 0;
    276}
    277static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
    278		size_t size, enum dma_data_direction dir, gfp_t gfp,
    279		unsigned long attrs)
    280{
    281	return NULL;
    282}
    283static inline void dma_free_noncontiguous(struct device *dev, size_t size,
    284		struct sg_table *sgt, enum dma_data_direction dir)
    285{
    286}
    287static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
    288		struct sg_table *sgt)
    289{
    290	return NULL;
    291}
    292static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
    293{
    294}
    295static inline int dma_mmap_noncontiguous(struct device *dev,
    296		struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
    297{
    298	return -EINVAL;
    299}
    300#endif /* CONFIG_HAS_DMA */
    301
    302struct page *dma_alloc_pages(struct device *dev, size_t size,
    303		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
    304void dma_free_pages(struct device *dev, size_t size, struct page *page,
    305		dma_addr_t dma_handle, enum dma_data_direction dir);
    306int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
    307		size_t size, struct page *page);
    308
    309static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
    310		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
    311{
    312	struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
    313	return page ? page_address(page) : NULL;
    314}
    315
    316static inline void dma_free_noncoherent(struct device *dev, size_t size,
    317		void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
    318{
    319	dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
    320}
    321
    322static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
    323		size_t size, enum dma_data_direction dir, unsigned long attrs)
    324{
    325	/* DMA must never operate on areas that might be remapped. */
    326	if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
    327			  "rejecting DMA map of vmalloc memory\n"))
    328		return DMA_MAPPING_ERROR;
    329	debug_dma_map_single(dev, ptr, size);
    330	return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
    331			size, dir, attrs);
    332}
    333
    334static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
    335		size_t size, enum dma_data_direction dir, unsigned long attrs)
    336{
    337	return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
    338}
    339
    340static inline void dma_sync_single_range_for_cpu(struct device *dev,
    341		dma_addr_t addr, unsigned long offset, size_t size,
    342		enum dma_data_direction dir)
    343{
    344	return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
    345}
    346
    347static inline void dma_sync_single_range_for_device(struct device *dev,
    348		dma_addr_t addr, unsigned long offset, size_t size,
    349		enum dma_data_direction dir)
    350{
    351	return dma_sync_single_for_device(dev, addr + offset, size, dir);
    352}
    353
    354/**
    355 * dma_unmap_sgtable - Unmap the given buffer for DMA
    356 * @dev:	The device for which to perform the DMA operation
    357 * @sgt:	The sg_table object describing the buffer
    358 * @dir:	DMA direction
    359 * @attrs:	Optional DMA attributes for the unmap operation
    360 *
    361 * Unmaps a buffer described by a scatterlist stored in the given sg_table
    362 * object for the @dir DMA operation by the @dev device. After this function
    363 * the ownership of the buffer is transferred back to the CPU domain.
    364 */
    365static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
    366		enum dma_data_direction dir, unsigned long attrs)
    367{
    368	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
    369}
    370
    371/**
    372 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
    373 * @dev:	The device for which to perform the DMA operation
    374 * @sgt:	The sg_table object describing the buffer
    375 * @dir:	DMA direction
    376 *
    377 * Performs the needed cache synchronization and moves the ownership of the
    378 * buffer back to the CPU domain, so it is safe to perform any access to it
    379 * by the CPU. Before doing any further DMA operations, one has to transfer
    380 * the ownership of the buffer back to the DMA domain by calling the
    381 * dma_sync_sgtable_for_device().
    382 */
    383static inline void dma_sync_sgtable_for_cpu(struct device *dev,
    384		struct sg_table *sgt, enum dma_data_direction dir)
    385{
    386	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
    387}
    388
    389/**
    390 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
    391 * @dev:	The device for which to perform the DMA operation
    392 * @sgt:	The sg_table object describing the buffer
    393 * @dir:	DMA direction
    394 *
    395 * Performs the needed cache synchronization and moves the ownership of the
    396 * buffer back to the DMA domain, so it is safe to perform the DMA operation.
    397 * Once finished, one has to call dma_sync_sgtable_for_cpu() or
    398 * dma_unmap_sgtable().
    399 */
    400static inline void dma_sync_sgtable_for_device(struct device *dev,
    401		struct sg_table *sgt, enum dma_data_direction dir)
    402{
    403	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
    404}
    405
    406#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
    407#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
    408#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
    409#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
    410#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
    411#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
    412#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
    413#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
    414
    415static inline void *dma_alloc_coherent(struct device *dev, size_t size,
    416		dma_addr_t *dma_handle, gfp_t gfp)
    417{
    418	return dma_alloc_attrs(dev, size, dma_handle, gfp,
    419			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
    420}
    421
    422static inline void dma_free_coherent(struct device *dev, size_t size,
    423		void *cpu_addr, dma_addr_t dma_handle)
    424{
    425	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
    426}
    427
    428
    429static inline u64 dma_get_mask(struct device *dev)
    430{
    431	if (dev->dma_mask && *dev->dma_mask)
    432		return *dev->dma_mask;
    433	return DMA_BIT_MASK(32);
    434}
    435
    436/*
    437 * Set both the DMA mask and the coherent DMA mask to the same thing.
    438 * Note that we don't check the return value from dma_set_coherent_mask()
    439 * as the DMA API guarantees that the coherent DMA mask can be set to
    440 * the same or smaller than the streaming DMA mask.
    441 */
    442static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
    443{
    444	int rc = dma_set_mask(dev, mask);
    445	if (rc == 0)
    446		dma_set_coherent_mask(dev, mask);
    447	return rc;
    448}
    449
    450/*
    451 * Similar to the above, except it deals with the case where the device
    452 * does not have dev->dma_mask appropriately setup.
    453 */
    454static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
    455{
    456	dev->dma_mask = &dev->coherent_dma_mask;
    457	return dma_set_mask_and_coherent(dev, mask);
    458}
    459
    460/**
    461 * dma_addressing_limited - return if the device is addressing limited
    462 * @dev:	device to check
    463 *
    464 * Return %true if the devices DMA mask is too small to address all memory in
    465 * the system, else %false.  Lack of addressing bits is the prime reason for
    466 * bounce buffering, but might not be the only one.
    467 */
    468static inline bool dma_addressing_limited(struct device *dev)
    469{
    470	return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
    471			    dma_get_required_mask(dev);
    472}
    473
    474static inline unsigned int dma_get_max_seg_size(struct device *dev)
    475{
    476	if (dev->dma_parms && dev->dma_parms->max_segment_size)
    477		return dev->dma_parms->max_segment_size;
    478	return SZ_64K;
    479}
    480
    481static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
    482{
    483	if (dev->dma_parms) {
    484		dev->dma_parms->max_segment_size = size;
    485		return 0;
    486	}
    487	return -EIO;
    488}
    489
    490static inline unsigned long dma_get_seg_boundary(struct device *dev)
    491{
    492	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
    493		return dev->dma_parms->segment_boundary_mask;
    494	return ULONG_MAX;
    495}
    496
    497/**
    498 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
    499 * @dev: device to guery the boundary for
    500 * @page_shift: ilog() of the IOMMU page size
    501 *
    502 * Return the segment boundary in IOMMU page units (which may be different from
    503 * the CPU page size) for the passed in device.
    504 *
    505 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
    506 * non-DMA API callers.
    507 */
    508static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
    509		unsigned int page_shift)
    510{
    511	if (!dev)
    512		return (U32_MAX >> page_shift) + 1;
    513	return (dma_get_seg_boundary(dev) >> page_shift) + 1;
    514}
    515
    516static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
    517{
    518	if (dev->dma_parms) {
    519		dev->dma_parms->segment_boundary_mask = mask;
    520		return 0;
    521	}
    522	return -EIO;
    523}
    524
    525static inline unsigned int dma_get_min_align_mask(struct device *dev)
    526{
    527	if (dev->dma_parms)
    528		return dev->dma_parms->min_align_mask;
    529	return 0;
    530}
    531
    532static inline int dma_set_min_align_mask(struct device *dev,
    533		unsigned int min_align_mask)
    534{
    535	if (WARN_ON_ONCE(!dev->dma_parms))
    536		return -EIO;
    537	dev->dma_parms->min_align_mask = min_align_mask;
    538	return 0;
    539}
    540
    541static inline int dma_get_cache_alignment(void)
    542{
    543#ifdef ARCH_DMA_MINALIGN
    544	return ARCH_DMA_MINALIGN;
    545#endif
    546	return 1;
    547}
    548
    549static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
    550		dma_addr_t *dma_handle, gfp_t gfp)
    551{
    552	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
    553			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
    554}
    555
    556static inline void *dma_alloc_wc(struct device *dev, size_t size,
    557				 dma_addr_t *dma_addr, gfp_t gfp)
    558{
    559	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
    560
    561	if (gfp & __GFP_NOWARN)
    562		attrs |= DMA_ATTR_NO_WARN;
    563
    564	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
    565}
    566
    567static inline void dma_free_wc(struct device *dev, size_t size,
    568			       void *cpu_addr, dma_addr_t dma_addr)
    569{
    570	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
    571			      DMA_ATTR_WRITE_COMBINE);
    572}
    573
    574static inline int dma_mmap_wc(struct device *dev,
    575			      struct vm_area_struct *vma,
    576			      void *cpu_addr, dma_addr_t dma_addr,
    577			      size_t size)
    578{
    579	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
    580			      DMA_ATTR_WRITE_COMBINE);
    581}
    582
    583#ifdef CONFIG_NEED_DMA_MAP_STATE
    584#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
    585#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
    586#define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
    587#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
    588#define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
    589#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
    590#else
    591#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
    592#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
    593#define dma_unmap_addr(PTR, ADDR_NAME)           (0)
    594#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
    595#define dma_unmap_len(PTR, LEN_NAME)             (0)
    596#define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
    597#endif
    598
    599#endif /* _LINUX_DMA_MAPPING_H */