From 543c37cb165049c3be24a0d4733e67caa2b33eef Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Tue, 24 May 2016 00:11:37 +0200 Subject: Add sancov plugin The sancov gcc plugin inserts a __sanitizer_cov_trace_pc() call at the start of basic blocks. This plugin is a helper plugin for the kcov feature. It supports all gcc versions with plugin support (from gcc-4.5 on). It is based on the gcc commit "Add fuzzing coverage support" by Dmitry Vyukov (https://gcc.gnu.org/viewcvs/gcc?limit_changes=0&view=revision&revision=231296). Signed-off-by: Emese Revfy Acked-by: Kees Cook Signed-off-by: Michal Marek --- lib/Kconfig.debug | 2 ++ 1 file changed, 2 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 77d7d034bac3..b7827dca3fec 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -708,6 +708,8 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV select DEBUG_FS + select GCC_PLUGINS + select GCC_PLUGIN_SANCOV help KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). -- cgit v1.2.3-71-gd317 From a519167e753e6a89476115375b65a7eb6ec485b3 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sat, 11 Jun 2016 09:09:28 -0700 Subject: gcc-plugins: disable under COMPILE_TEST Since adding the gcc plugin development headers is required for the gcc plugin support, we should ease into this new kernel build dependency more slowly. For now, disable the gcc plugins under COMPILE_TEST so that all*config builds will skip it. Signed-off-by: Kees Cook Signed-off-by: Michal Marek --- arch/Kconfig | 1 + lib/Kconfig.debug | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/arch/Kconfig b/arch/Kconfig index 05f1e95b796d..cae4bc587eae 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -366,6 +366,7 @@ config HAVE_GCC_PLUGINS menuconfig GCC_PLUGINS bool "GCC plugins" depends on HAVE_GCC_PLUGINS + depends on !COMPILE_TEST help GCC plugins are loadable modules that provide extra features to the compiler. They are useful for runtime instrumentation and static analysis. diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b7827dca3fec..7936e5e4da9d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -708,8 +708,8 @@ config KCOV bool "Code coverage for fuzzing" depends on ARCH_HAS_KCOV select DEBUG_FS - select GCC_PLUGINS - select GCC_PLUGIN_SANCOV + select GCC_PLUGINS if !COMPILE_TEST + select GCC_PLUGIN_SANCOV if !COMPILE_TEST help KCOV exposes kernel code coverage information in a form suitable for coverage-guided fuzzing (randomized testing). -- cgit v1.2.3-71-gd317 From 4816c9406430d0d3d4fa58a212a7a869d429b315 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Thu, 28 Jul 2016 13:29:17 +0800 Subject: lib/mpi: Fix SG miter leak In mpi_read_raw_from_sgl we may leak the SG miter resouces after reading the leading zeroes. This patch fixes this by stopping the iteration once the leading zeroes have been read. Fixes: 127827b9c295 ("lib/mpi: Do not do sg_virt") Reported-by: Nicolai Stange Tested-by: Nicolai Stange Signed-off-by: Herbert Xu --- lib/mpi/mpicoder.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index c6272ae2015e..5a0f75a3bf01 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -363,6 +363,9 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) lzeros = 0; } + miter.consumed = lzeros; + sg_miter_stop(&miter); + nbytes -= lzeros; nbits = nbytes * 8; if (nbits > MAX_EXTERN_MPI_BITS) { @@ -390,7 +393,10 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; z %= BYTES_PER_MPI_LIMB; - for (;;) { + while (sg_miter_next(&miter)) { + buff = miter.addr; + len = miter.length; + for (x = 0; x < len; x++) { a <<= 8; a |= *buff++; @@ -400,12 +406,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) } } z += x; - - if (!sg_miter_next(&miter)) - break; - - buff = miter.addr; - len = miter.length; } return val; -- cgit v1.2.3-71-gd317 From 05eb6e7263185a6bb0de9501ccf2addc52429414 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 2 Aug 2016 14:03:01 -0700 Subject: radix-tree: account nodes to memcg only if explicitly requested Radix trees may be used not only for storing page cache pages, so unconditionally accounting radix tree nodes to the current memory cgroup is bad: if a radix tree node is used for storing data shared among different cgroups we risk pinning dead memory cgroups forever. So let's only account radix tree nodes if it was explicitly requested by passing __GFP_ACCOUNT to INIT_RADIX_TREE. Currently, we only want to account page cache entries, so mark mapping->page_tree so. Fixes: 58e698af4c63 ("radix-tree: account radix_tree_node to memory cgroup") Link: http://lkml.kernel.org/r/1470057188-7864-1-git-send-email-vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: [4.6+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/inode.c | 2 +- lib/radix-tree.c | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/fs/inode.c b/fs/inode.c index 9cef4e16aeda..ad445542c285 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink); void address_space_init_once(struct address_space *mapping) { memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT); spin_lock_init(&mapping->tree_lock); init_rwsem(&mapping->i_mmap_rwsem); INIT_LIST_HEAD(&mapping->private_list); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 61b8fb529cef..1b7bf7314141 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -277,10 +277,11 @@ radix_tree_node_alloc(struct radix_tree_root *root) /* * Even if the caller has preloaded, try to allocate from the - * cache first for the new node to get accounted. + * cache first for the new node to get accounted to the memory + * cgroup. */ ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); + gfp_mask | __GFP_NOWARN); if (ret) goto out; @@ -303,8 +304,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) kmemleak_update_trace(ret); goto out; } - ret = kmem_cache_alloc(radix_tree_node_cachep, - gfp_mask | __GFP_ACCOUNT); + ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); out: BUG_ON(radix_tree_is_internal_node(ret)); return ret; @@ -351,6 +351,12 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr) struct radix_tree_node *node; int ret = -ENOMEM; + /* + * Nodes preloaded by one cgroup can be be used by another cgroup, so + * they should never be accounted to any particular memory cgroup. + */ + gfp_mask &= ~__GFP_ACCOUNT; + preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { -- cgit v1.2.3-71-gd317 From 901d805c33fc4c029fc6b2d94ee5fb7d30278045 Mon Sep 17 00:00:00 2001 From: Nicolas Iooss Date: Tue, 2 Aug 2016 14:03:10 -0700 Subject: UBSAN: fix typo in format string handle_object_size_mismatch() used %pk to format a kernel pointer with pr_err(). This seemed to be a misspelling for %pK, but using this to format a kernel pointer does not make much sence here. Therefore use %p instead, like in handle_missaligned_access(). Link: http://lkml.kernel.org/r/20160730083010.11569-1-nicolas.iooss_linux@m4x.org Signed-off-by: Nicolas Iooss Acked-by: Andrey Ryabinin Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/ubsan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/ubsan.c b/lib/ubsan.c index 8799ae5e2e42..fb0409df1bcf 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -308,7 +308,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data *data, return; ubsan_prologue(&data->location, &flags); - pr_err("%s address %pk with insufficient space\n", + pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); -- cgit v1.2.3-71-gd317 From 6b1d174b0c27b5de421eda55c2731f32b6bd9852 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 2 Aug 2016 14:04:04 -0700 Subject: ratelimit: extend to print suppressed messages on release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend the ratelimiting facility to print the amount of suppressed lines when it is being released. This use case is aimed at short-termed, burst-like users for which we want to output the suppressed lines stats only once, after it has been disposed of. For an example, see /dev/kmsg usage in a follow-on patch. Also, change the printk() line we issue on release to not use "callbacks" as it is misleading: we're not suppressing callbacks but printk() calls. This has been separated from a previous patch by Linus. Link: http://lkml.kernel.org/r/20160716061745.15795-2-bp@alien8.de Signed-off-by: Borislav Petkov Cc: Dave Young Cc: Franck Bui Cc: Greg Kroah-Hartman Cc: Ingo Molnar Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Uwe Kleine-König Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ratelimit.h | 38 +++++++++++++++++++++++++++++++++----- lib/ratelimit.c | 10 ++++++---- 2 files changed, 39 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 18102529254e..57c9e0622a38 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -2,11 +2,15 @@ #define _LINUX_RATELIMIT_H #include +#include #include #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) #define DEFAULT_RATELIMIT_BURST 10 +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + struct ratelimit_state { raw_spinlock_t lock; /* protect the state */ @@ -15,6 +19,7 @@ struct ratelimit_state { int printed; int missed; unsigned long begin; + unsigned long flags; }; #define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ @@ -34,12 +39,35 @@ struct ratelimit_state { static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { + memset(rs, 0, sizeof(*rs)); + raw_spin_lock_init(&rs->lock); - rs->interval = interval; - rs->burst = burst; - rs->printed = 0; - rs->missed = 0; - rs->begin = 0; + rs->interval = interval; + rs->burst = burst; +} + +static inline void ratelimit_default_init(struct ratelimit_state *rs) +{ + return ratelimit_state_init(rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); +} + +static inline void ratelimit_state_exit(struct ratelimit_state *rs) +{ + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) + return; + + if (rs->missed) { + pr_warn("%s: %d output lines suppressed due to ratelimiting\n", + current->comm, rs->missed); + rs->missed = 0; + } +} + +static inline void +ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) +{ + rs->flags = flags; } extern struct ratelimit_state printk_ratelimit_state; diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 2c5de86460c5..08f8043cac61 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c @@ -46,12 +46,14 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) rs->begin = jiffies; if (time_is_before_jiffies(rs->begin + rs->interval)) { - if (rs->missed) - printk(KERN_WARNING "%s: %d callbacks suppressed\n", - func, rs->missed); + if (rs->missed) { + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { + pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); + rs->missed = 0; + } + } rs->begin = jiffies; rs->printed = 0; - rs->missed = 0; } if (rs->burst && rs->burst > rs->printed) { rs->printed++; -- cgit v1.2.3-71-gd317 From f003a1f182bb821f13775338a4bf8711830f927a Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Tue, 2 Aug 2016 14:04:13 -0700 Subject: lib/iommu-helper: skip to next segment When a large enough area in the iommu bitmap is found but would span a boundary we continue the search starting from the next bit position. For large allocations this can lead to several useless invocations of bitmap_find_next_zero_area() and iommu_is_span_boundary(). Continue the search from the start of the next segment (which is the next bit position such that we'll not cross the same segment boundary again). Link: http://lkml.kernel.org/r/alpine.LFD.2.20.1606081910070.3211@schleppi Signed-off-by: Sebastian Ott Reviewed-by: Gerald Schaefer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/iommu-helper.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c index c27e269210c4..a816f3a80625 100644 --- a/lib/iommu-helper.c +++ b/lib/iommu-helper.c @@ -29,8 +29,7 @@ again: index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); if (index < size) { if (iommu_is_span_boundary(index, nr, shift, boundary_size)) { - /* we could do more effectively */ - start = index + 1; + start = ALIGN(shift + index, boundary_size) - shift; goto again; } bitmap_set(map, index, nr); -- cgit v1.2.3-71-gd317 From a9bfd3321713ecec86282dd2bec04212189f91f1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 2 Aug 2016 14:04:16 -0700 Subject: crc32: use ktime_get_ns() for measurement The crc32 test function measures the elapsed time in nanoseconds, but uses 'struct timespec' for that. We want to remove timespec from the kernel for y2038 compatibility, and ktime_get_ns() also helps make the code simpler here. It is also slightly better to use monontonic time, as we are only interested in the time difference. Link: http://lkml.kernel.org/r/20160617143932.3289626-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Cc: "David S . Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/crc32.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'lib') diff --git a/lib/crc32.c b/lib/crc32.c index 9a907d489d95..7fbd1a112b9d 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -979,7 +979,6 @@ static int __init crc32c_test(void) int i; int errors = 0; int bytes = 0; - struct timespec start, stop; u64 nsec; unsigned long flags; @@ -999,20 +998,17 @@ static int __init crc32c_test(void) local_irq_save(flags); local_irq_disable(); - getnstimeofday(&start); + nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf + test[i].start, test[i].length)) errors++; } - getnstimeofday(&stop); + nsec = ktime_get_ns() - nsec; local_irq_restore(flags); local_irq_enable(); - nsec = stop.tv_nsec - start.tv_nsec + - 1000000000 * (stop.tv_sec - start.tv_sec); - pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); if (errors) @@ -1065,7 +1061,6 @@ static int __init crc32_test(void) int i; int errors = 0; int bytes = 0; - struct timespec start, stop; u64 nsec; unsigned long flags; @@ -1088,7 +1083,7 @@ static int __init crc32_test(void) local_irq_save(flags); local_irq_disable(); - getnstimeofday(&start); + nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { if (test[i].crc_le != crc32_le(test[i].crc, test_buf + test[i].start, test[i].length)) @@ -1098,14 +1093,11 @@ static int __init crc32_test(void) test[i].start, test[i].length)) errors++; } - getnstimeofday(&stop); + nsec = ktime_get_ns() - nsec; local_irq_restore(flags); local_irq_enable(); - nsec = stop.tv_nsec - start.tv_nsec + - 1000000000 * (stop.tv_sec - start.tv_sec); - pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); -- cgit v1.2.3-71-gd317 From a4691deabf284a601149a067525759939cc563b2 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Tue, 2 Aug 2016 14:07:30 -0700 Subject: kcov: allow more fine-grained coverage instrumentation For more targeted fuzzing, it's better to disable kernel-wide instrumentation and instead enable it on a per-subsystem basis. This follows the pattern of UBSAN and allows you to compile in the kcov driver without instrumenting the whole kernel. To instrument a part of the kernel, you can use either # for a single file in the current directory KCOV_INSTRUMENT_filename.o := y or # for all the files in the current directory (excluding subdirectories) KCOV_INSTRUMENT := y or # (same as above) ccflags-y += $(CFLAGS_KCOV) or # for all the files in the current directory (including subdirectories) subdir-ccflags-y += $(CFLAGS_KCOV) Link: http://lkml.kernel.org/r/1464008380-11405-1-git-send-email-vegard.nossum@oracle.com Signed-off-by: Vegard Nossum Cc: Dmitry Vyukov Cc: Quentin Casasnovas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 11 +++++++++++ scripts/Makefile.lib | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f07842e2d69f..cc02f282d05b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -719,6 +719,17 @@ config KCOV For more details, see Documentation/kcov.txt. +config KCOV_INSTRUMENT_ALL + bool "Instrument all code by default" + depends on KCOV + default y if KCOV + help + If you are doing generic system call fuzzing (like e.g. syzkaller), + then you will want to instrument the whole kernel and you should + say y here. If you are doing more targeted fuzzing (like e.g. + filesystem fuzzing with AFL) then you will want to enable coverage + for more specific subsets of files, and should say n here. + config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index e7df0f5db7ec..76494e15417b 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -138,7 +138,7 @@ endif ifeq ($(CONFIG_KCOV),y) _c_flags += $(if $(patsubst n%,, \ - $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)y), \ + $(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)$(CONFIG_KCOV_INSTRUMENT_ALL)), \ $(CFLAGS_KCOV)) endif -- cgit v1.2.3-71-gd317 From 00085f1efa387a8ce100e3734920f7639c80caa3 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 3 Aug 2016 13:46:00 -0700 Subject: dma-mapping: use unsigned long for dma_attrs The dma-mapping core and the implementations do not change the DMA attributes passed by pointer. Thus the pointer can point to const data. However the attributes do not have to be a bitfield. Instead unsigned long will do fine: 1. This is just simpler. Both in terms of reading the code and setting attributes. Instead of initializing local attributes on the stack and passing pointer to it to dma_set_attr(), just set the bits. 2. It brings safeness and checking for const correctness because the attributes are passed by value. Semantic patches for this change (at least most of them): virtual patch virtual context @r@ identifier f, attrs; @@ f(..., - struct dma_attrs *attrs + unsigned long attrs , ...) { ... } @@ identifier r.f; @@ f(..., - NULL + 0 ) and // Options: --all-includes virtual patch virtual context @r@ identifier f, attrs; type t; @@ t f(..., struct dma_attrs *attrs); @@ identifier r.f; @@ f(..., - NULL + 0 ) Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com Signed-off-by: Krzysztof Kozlowski Acked-by: Vineet Gupta Acked-by: Robin Murphy Acked-by: Hans-Christian Noren Egtvedt Acked-by: Mark Salter [c6x] Acked-by: Jesper Nilsson [cris] Acked-by: Daniel Vetter [drm] Reviewed-by: Bart Van Assche Acked-by: Joerg Roedel [iommu] Acked-by: Fabien Dessenne [bdisp] Reviewed-by: Marek Szyprowski [vb2-core] Acked-by: David Vrabel [xen] Acked-by: Konrad Rzeszutek Wilk [xen swiotlb] Acked-by: Joerg Roedel [iommu] Acked-by: Richard Kuo [hexagon] Acked-by: Geert Uytterhoeven [m68k] Acked-by: Gerald Schaefer [s390] Acked-by: Bjorn Andersson Acked-by: Hans-Christian Noren Egtvedt [avr32] Acked-by: Vineet Gupta [arc] Acked-by: Robin Murphy [arm64 and dma-iommu] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/DMA-API.txt | 33 +++--- Documentation/DMA-attributes.txt | 2 +- arch/alpha/include/asm/dma-mapping.h | 2 - arch/alpha/kernel/pci-noop.c | 2 +- arch/alpha/kernel/pci_iommu.c | 12 +- arch/arc/mm/dma.c | 12 +- arch/arm/common/dmabounce.c | 4 +- arch/arm/include/asm/dma-mapping.h | 13 +-- arch/arm/include/asm/xen/page-coherent.h | 16 +-- arch/arm/mm/dma-mapping.c | 129 ++++++++++----------- arch/arm/xen/mm.c | 8 +- arch/arm64/mm/dma-mapping.c | 66 +++++------ arch/avr32/mm/dma-coherent.c | 12 +- arch/blackfin/kernel/dma-mapping.c | 8 +- arch/c6x/include/asm/dma-mapping.h | 4 +- arch/c6x/kernel/dma.c | 9 +- arch/c6x/mm/dma-coherent.c | 4 +- arch/cris/arch-v32/drivers/pci/dma.c | 9 +- arch/frv/mb93090-mb00/pci-dma-nommu.c | 8 +- arch/frv/mb93090-mb00/pci-dma.c | 9 +- arch/h8300/kernel/dma.c | 8 +- arch/hexagon/include/asm/dma-mapping.h | 1 - arch/hexagon/kernel/dma.c | 8 +- arch/ia64/hp/common/sba_iommu.c | 22 ++-- arch/ia64/include/asm/machvec.h | 1 - arch/ia64/kernel/pci-swiotlb.c | 4 +- arch/ia64/sn/pci/pci_dma.c | 22 ++-- arch/m68k/kernel/dma.c | 12 +- arch/metag/kernel/dma.c | 16 +-- arch/microblaze/include/asm/dma-mapping.h | 1 - arch/microblaze/kernel/dma.c | 12 +- arch/mips/cavium-octeon/dma-octeon.c | 8 +- arch/mips/loongson64/common/dma-swiotlb.c | 10 +- arch/mips/mm/dma-default.c | 20 ++-- arch/mips/netlogic/common/nlm-dma.c | 4 +- arch/mn10300/mm/dma-alloc.c | 8 +- arch/nios2/mm/dma-mapping.c | 12 +- arch/openrisc/kernel/dma.c | 21 ++-- arch/parisc/kernel/pci-dma.c | 18 +-- arch/powerpc/include/asm/dma-mapping.h | 7 +- arch/powerpc/include/asm/iommu.h | 10 +- arch/powerpc/kernel/dma-iommu.c | 12 +- arch/powerpc/kernel/dma.c | 18 +-- arch/powerpc/kernel/ibmebus.c | 12 +- arch/powerpc/kernel/iommu.c | 12 +- arch/powerpc/kernel/vio.c | 12 +- arch/powerpc/platforms/cell/iommu.c | 28 ++--- arch/powerpc/platforms/pasemi/iommu.c | 2 +- arch/powerpc/platforms/powernv/npu-dma.c | 8 +- arch/powerpc/platforms/powernv/pci-ioda.c | 4 +- arch/powerpc/platforms/powernv/pci.c | 2 +- arch/powerpc/platforms/powernv/pci.h | 2 +- arch/powerpc/platforms/ps3/system-bus.c | 18 +-- arch/powerpc/platforms/pseries/iommu.c | 6 +- arch/powerpc/sysdev/dart_iommu.c | 2 +- arch/s390/include/asm/dma-mapping.h | 1 - arch/s390/pci/pci_dma.c | 23 ++-- arch/sh/include/asm/dma-mapping.h | 4 +- arch/sh/kernel/dma-nommu.c | 4 +- arch/sh/mm/consistent.c | 4 +- arch/sparc/kernel/iommu.c | 12 +- arch/sparc/kernel/ioport.c | 24 ++-- arch/sparc/kernel/pci_sun4v.c | 12 +- arch/tile/kernel/pci-dma.c | 28 ++--- arch/unicore32/mm/dma-swiotlb.c | 4 +- arch/x86/include/asm/dma-mapping.h | 5 +- arch/x86/include/asm/swiotlb.h | 4 +- arch/x86/include/asm/xen/page-coherent.h | 9 +- arch/x86/kernel/amd_gart_64.c | 20 ++-- arch/x86/kernel/pci-calgary_64.c | 14 +-- arch/x86/kernel/pci-dma.c | 4 +- arch/x86/kernel/pci-nommu.c | 4 +- arch/x86/kernel/pci-swiotlb.c | 4 +- arch/x86/pci/sta2x11-fixup.c | 2 +- arch/x86/pci/vmd.c | 16 +-- arch/xtensa/kernel/pci-dma.c | 12 +- drivers/gpu/drm/exynos/exynos_drm_fbdev.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 12 +- drivers/gpu/drm/exynos/exynos_drm_gem.c | 20 ++-- drivers/gpu/drm/exynos/exynos_drm_gem.h | 2 +- drivers/gpu/drm/mediatek/mtk_drm_gem.c | 13 +-- drivers/gpu/drm/mediatek/mtk_drm_gem.h | 2 +- drivers/gpu/drm/msm/msm_drv.c | 13 +-- .../gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 13 +-- drivers/gpu/drm/rockchip/rockchip_drm_gem.c | 17 ++- drivers/gpu/drm/rockchip/rockchip_drm_gem.h | 2 +- drivers/infiniband/core/umem.c | 7 +- drivers/iommu/amd_iommu.c | 12 +- drivers/iommu/dma-iommu.c | 8 +- drivers/iommu/intel-iommu.c | 12 +- drivers/media/platform/sti/bdisp/bdisp-hw.c | 26 ++--- drivers/media/v4l2-core/videobuf2-dma-contig.c | 28 ++--- drivers/media/v4l2-core/videobuf2-dma-sg.c | 21 +--- drivers/media/v4l2-core/videobuf2-vmalloc.c | 2 +- drivers/misc/mic/host/mic_boot.c | 20 ++-- drivers/parisc/ccio-dma.c | 16 +-- drivers/parisc/sba_iommu.c | 16 +-- drivers/remoteproc/qcom_q6v5_pil.c | 7 +- drivers/video/fbdev/omap2/omapfb/omapfb-main.c | 12 +- drivers/video/fbdev/omap2/omapfb/omapfb.h | 3 +- drivers/xen/swiotlb-xen.c | 14 +-- include/linux/dma-attrs.h | 71 ------------ include/linux/dma-iommu.h | 6 +- include/linux/dma-mapping.h | 128 +++++++++++++------- include/linux/swiotlb.h | 10 +- include/media/videobuf2-core.h | 6 +- include/media/videobuf2-dma-contig.h | 2 - include/rdma/ib_verbs.h | 17 +-- include/xen/swiotlb-xen.h | 12 +- lib/dma-noop.c | 9 +- lib/swiotlb.c | 13 ++- 111 files changed, 705 insertions(+), 804 deletions(-) delete mode 100644 include/linux/dma-attrs.h (limited to 'lib') diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index 45ef3f279c3b..1d26eeb6b5f6 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -369,35 +369,32 @@ See also dma_map_single(). dma_addr_t dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) void dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) The four functions above are just like the counterpart functions without the _attrs suffixes, except that they pass an optional -struct dma_attrs*. - -struct dma_attrs encapsulates a set of "DMA attributes". For the -definition of struct dma_attrs see linux/dma-attrs.h. +dma_attrs. The interpretation of DMA attributes is architecture-specific, and each attribute should be documented in Documentation/DMA-attributes.txt. -If struct dma_attrs* is NULL, the semantics of each of these -functions is identical to those of the corresponding function +If dma_attrs are 0, the semantics of each of these functions +is identical to those of the corresponding function without the _attrs suffix. As a result dma_map_single_attrs() can generally replace dma_map_single(), etc. @@ -405,15 +402,15 @@ As an example of the use of the *_attrs functions, here's how you could pass an attribute DMA_ATTR_FOO when mapping memory for DMA: -#include -/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and +#include +/* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and * documented in Documentation/DMA-attributes.txt */ ... - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_FOO, &attrs); + unsigned long attr; + attr |= DMA_ATTR_FOO; .... - n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr); + n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr); .... Architectures that care about DMA_ATTR_FOO would check for its @@ -422,12 +419,10 @@ routines, e.g.: void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { .... - int foo = dma_get_attr(DMA_ATTR_FOO, attrs); - .... - if (foo) + if (attrs & DMA_ATTR_FOO) /* twizzle the frobnozzle */ .... diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index e8cf9cf873b3..2d455a5cf671 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt @@ -2,7 +2,7 @@ ============== This document describes the semantics of the DMA attributes that are -defined in linux/dma-attrs.h. +defined in linux/dma-mapping.h. DMA_ATTR_WRITE_BARRIER ---------------------- diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h index 3c3451f58ff4..c63b6ac19ee5 100644 --- a/arch/alpha/include/asm/dma-mapping.h +++ b/arch/alpha/include/asm/dma-mapping.h @@ -1,8 +1,6 @@ #ifndef _ALPHA_DMA_MAPPING_H #define _ALPHA_DMA_MAPPING_H -#include - extern struct dma_map_ops *dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 8e735b5e56bd..bb152e21e5ae 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -109,7 +109,7 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn, static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 8969bf2dfe3a..451fc9cdd323 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -349,7 +349,7 @@ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); int dac_allowed; @@ -369,7 +369,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long flags; struct pci_dev *pdev = alpha_gendev_to_pci(dev); @@ -433,7 +433,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); void *cpu_addr; @@ -478,7 +478,7 @@ try_again: static void alpha_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); @@ -651,7 +651,7 @@ sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct scatterlist *start, *end, *out; @@ -729,7 +729,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); unsigned long flags; diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index ab74b5d9186c..20afc65e22dc 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -22,7 +22,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { unsigned long order = get_order(size); struct page *page; @@ -46,7 +46,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, * (vs. always going to memory - thus are faster) */ if ((is_isa_arcv2() && ioc_exists) || - dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + (attrs & DMA_ATTR_NON_CONSISTENT)) need_coh = 0; /* @@ -90,13 +90,13 @@ static void *arc_dma_alloc(struct device *dev, size_t size, } static void arc_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); struct page *page = virt_to_page(paddr); int is_non_coh = 1; - is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || + is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || (is_isa_arcv2() && ioc_exists); if (PageHighMem(page) || !is_non_coh) @@ -130,7 +130,7 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size, static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t paddr = page_to_phys(page) + offset; _dma_cache_sync(paddr, size, dir); @@ -138,7 +138,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, } static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *s; int i; diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 1143c4d5c567..301281645d08 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -310,7 +310,7 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf, */ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dma_addr; int ret; @@ -344,7 +344,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, * should be) */ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct safe_buffer *buf; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index a83570f10124..d009f7911ffc 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,7 +5,6 @@ #include #include -#include #include #include @@ -174,7 +173,7 @@ static inline void dma_mark_clean(void *addr, size_t size) { } * to be the device-viewed address. */ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, struct dma_attrs *attrs); + gfp_t gfp, unsigned long attrs); /** * arm_dma_free - free memory allocated by arm_dma_alloc @@ -191,7 +190,7 @@ extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, * during and after this call executing are illegal. */ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs); + dma_addr_t handle, unsigned long attrs); /** * arm_dma_mmap - map a coherent DMA allocation into user space @@ -208,7 +207,7 @@ extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, */ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs); + unsigned long attrs); /* * This can be called during early boot to increase the size of the atomic @@ -262,16 +261,16 @@ extern void dmabounce_unregister_dev(struct device *); * The scatter list versions of the above methods. */ extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, struct dma_attrs *attrs); + enum dma_data_direction, unsigned long attrs); extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, struct dma_attrs *attrs); + enum dma_data_direction, unsigned long attrs); extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs); + unsigned long attrs); #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 9408a994cc91..95ce6ac3a971 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -2,15 +2,14 @@ #define _ASM_ARM_XEN_PAGE_COHERENT_H #include -#include #include void __xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void __xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); @@ -18,22 +17,20 @@ void __xen_dma_sync_single_for_device(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir); static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); } static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) { __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { unsigned long page_pfn = page_to_xen_pfn(page); unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); @@ -58,8 +55,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long pfn = PFN_DOWN(handle); /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b7eed75960fe..c6834c0cfd1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -128,16 +128,16 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned long, */ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return pfn_to_dma(dev, page_to_pfn(page)) + offset; } @@ -157,10 +157,9 @@ static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *pag * whatever the device wrote there. */ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } @@ -198,12 +197,12 @@ struct dma_map_ops arm_dma_ops = { EXPORT_SYMBOL(arm_dma_ops); static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); + dma_addr_t *handle, gfp_t gfp, unsigned long attrs); static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs); + dma_addr_t handle, unsigned long attrs); static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs); + unsigned long attrs); struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, @@ -639,11 +638,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page, dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); } -static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) +static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) { - prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? - pgprot_writecombine(prot) : - pgprot_dmacoherent(prot); + prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? + pgprot_writecombine(prot) : + pgprot_dmacoherent(prot); return prot; } @@ -751,7 +750,7 @@ static struct arm_dma_allocator remap_allocator = { static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, - struct dma_attrs *attrs, const void *caller) + unsigned long attrs, const void *caller) { u64 mask = get_coherent_dma_mask(dev); struct page *page = NULL; @@ -764,7 +763,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, .gfp = gfp, .prot = prot, .caller = caller, - .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), + .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), .coherent_flag = is_coherent ? COHERENT : NORMAL, }; @@ -834,7 +833,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, * virtual and bus address for that space. */ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, unsigned long attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); @@ -843,7 +842,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, } static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, attrs, __builtin_return_address(0)); @@ -851,7 +850,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size, static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = -ENXIO; #ifdef CONFIG_MMU @@ -879,14 +878,14 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, */ static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); } int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef CONFIG_MMU vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); @@ -898,7 +897,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, * Free a buffer as defined by the above mapping. */ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs, + dma_addr_t handle, unsigned long attrs, bool is_coherent) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); @@ -908,7 +907,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, .size = PAGE_ALIGN(size), .cpu_addr = cpu_addr, .page = page, - .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), + .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), }; buf = arm_dma_buffer_find(cpu_addr); @@ -920,20 +919,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, } void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) + dma_addr_t handle, unsigned long attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); } static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) + dma_addr_t handle, unsigned long attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); int ret; @@ -1066,7 +1065,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, * here. */ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; @@ -1100,7 +1099,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; @@ -1273,7 +1272,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping, static const int iommu_order_array[] = { 9, 8, 4, 0 }; static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, - gfp_t gfp, struct dma_attrs *attrs, + gfp_t gfp, unsigned long attrs, int coherent_flag) { struct page **pages; @@ -1289,7 +1288,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, if (!pages) return NULL; - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { unsigned long order = get_order(size); struct page *page; @@ -1307,7 +1306,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, } /* Go straight to 4K chunks if caller says it's OK. */ - if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) + if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) order_idx = ARRAY_SIZE(iommu_order_array) - 1; /* @@ -1363,12 +1362,12 @@ error: } static int __iommu_free_buffer(struct device *dev, struct page **pages, - size_t size, struct dma_attrs *attrs) + size_t size, unsigned long attrs) { int count = size >> PAGE_SHIFT; int i; - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { dma_release_from_contiguous(dev, pages[0], count); } else { for (i = 0; i < count; i++) @@ -1460,14 +1459,14 @@ static struct page **__atomic_get_pages(void *addr) return (struct page **)page; } -static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) +static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) { struct vm_struct *area; if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) return __atomic_get_pages(cpu_addr); - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) return cpu_addr; area = find_vm_area(cpu_addr); @@ -1511,7 +1510,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr, } static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs, + dma_addr_t *handle, gfp_t gfp, unsigned long attrs, int coherent_flag) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); @@ -1542,7 +1541,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, if (*handle == DMA_ERROR_CODE) goto err_buffer; - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) return pages; addr = __iommu_alloc_remap(pages, size, gfp, prot, @@ -1560,20 +1559,20 @@ err_buffer: } static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); } static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); } static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; @@ -1603,7 +1602,7 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma } static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, unsigned long attrs) { vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); @@ -1612,7 +1611,7 @@ static int arm_iommu_mmap_attrs(struct device *dev, static int arm_coherent_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, unsigned long attrs) { return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); } @@ -1622,7 +1621,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev, * Must not be called with IRQs disabled. */ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag) + dma_addr_t handle, unsigned long attrs, int coherent_flag) { struct page **pages; size = PAGE_ALIGN(size); @@ -1638,7 +1637,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } - if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { dma_common_free_remap(cpu_addr, size, VM_ARM_DMA_CONSISTENT | VM_USERMAP); } @@ -1648,20 +1647,20 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } void arm_iommu_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) + void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); } void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) + void *cpu_addr, dma_addr_t handle, unsigned long attrs) { __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, - size_t size, struct dma_attrs *attrs) + size_t size, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page **pages = __iommu_get_pages(cpu_addr, attrs); @@ -1699,7 +1698,7 @@ static int __dma_direction_to_prot(enum dma_data_direction dir) */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, - enum dma_data_direction dir, struct dma_attrs *attrs, + enum dma_data_direction dir, unsigned long attrs, bool is_coherent) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); @@ -1720,8 +1719,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); - if (!is_coherent && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); prot = __dma_direction_to_prot(dir); @@ -1742,7 +1740,7 @@ fail: } static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs, + enum dma_data_direction dir, unsigned long attrs, bool is_coherent) { struct scatterlist *s = sg, *dma = sg, *start = sg; @@ -1800,7 +1798,7 @@ bad_mapping: * obtained via sg_dma_{address,length}. */ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, true); } @@ -1818,14 +1816,14 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, * sg_dma_{address,length}. */ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, false); } static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs, - bool is_coherent) + int nents, enum dma_data_direction dir, + unsigned long attrs, bool is_coherent) { struct scatterlist *s; int i; @@ -1834,8 +1832,7 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - if (!is_coherent && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } @@ -1852,7 +1849,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, + unsigned long attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); } @@ -1868,7 +1866,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, + unsigned long attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); } @@ -1921,7 +1920,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t dma_addr; @@ -1955,9 +1954,9 @@ fail: */ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); @@ -1973,8 +1972,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, * Coherent IOMMU aware version of arm_dma_unmap_page() */ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; @@ -1998,8 +1996,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, * IOMMU aware version of arm_dma_unmap_page() */ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; @@ -2010,7 +2007,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index c5f9a9e3d1f3..d062f08f5020 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -98,11 +98,11 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, void __xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { if (is_device_dma_coherent(hwdev)) return; - if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) return; __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); @@ -110,12 +110,12 @@ void __xen_dma_map_page(struct device *hwdev, struct page *page, void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { if (is_device_dma_coherent(hwdev)) return; - if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) return; __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index f6c55afab3e2..c4284c432ae8 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -32,10 +32,10 @@ static int swiotlb __read_mostly; -static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, +static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, bool coherent) { - if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) return pgprot_writecombine(prot); return prot; } @@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size) static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); @@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { bool freed; phys_addr_t paddr = dma_to_phys(dev, dma_handle); @@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size, static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { struct page *page; void *ptr, *coherent_ptr; @@ -188,7 +188,7 @@ no_mem: static void __dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); @@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size, static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dev_addr; @@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { if (!is_device_dma_coherent(dev)) __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); @@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i, ret; @@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, static void __swiotlb_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev, static int __swiotlb_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = -ENXIO; unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> @@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev, static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); @@ -435,21 +435,21 @@ out: static void *__dummy_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { return NULL; } static void __dummy_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { } static int __dummy_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { return -ENXIO; } @@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev, static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return DMA_ERROR_CODE; } static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { } static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return 0; } @@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, static void __dummy_unmap_sg(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { } @@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys) static void *__iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); @@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, } static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, struct dma_attrs *attrs) + dma_addr_t handle, unsigned long attrs) { size_t iosize = size; @@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, * Hence how dodgy the below logic looks... */ if (__in_atomic_pool(cpu_addr, size)) { - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); __free_from_pool(cpu_addr, size); } else if (is_vmalloc_addr(cpu_addr)){ struct vm_struct *area = find_vm_area(cpu_addr); @@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, iommu_dma_free(dev, area->pages, iosize, &handle); dma_common_free_remap(cpu_addr, size, VM_USERMAP); } else { - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); __free_pages(virt_to_page(cpu_addr), get_order(size)); } } static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { struct vm_struct *area; int ret; @@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, - size_t size, struct dma_attrs *attrs) + size_t size, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area = find_vm_area(cpu_addr); @@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev, static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); int prot = dma_direction_to_prot(dir, coherent); dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); if (!iommu_dma_mapping_error(dev, dev_addr) && - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_single_for_device(dev, dev_addr, size, dir); return dev_addr; @@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); @@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev, static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { bool coherent = is_device_dma_coherent(dev); - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_device(dev, sgl, nelems, dir); return iommu_dma_map_sg(dev, sgl, nelems, @@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, static void __iommu_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c index 92cf1fb2b3e6..58610d0df7ed 100644 --- a/arch/avr32/mm/dma-coherent.c +++ b/arch/avr32/mm/dma-coherent.c @@ -99,7 +99,7 @@ static void __dma_free(struct device *dev, size_t size, } static void *avr32_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { struct page *page; dma_addr_t phys; @@ -109,7 +109,7 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, return NULL; phys = page_to_phys(page); - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { + if (attrs & DMA_ATTR_WRITE_COMBINE) { /* Now, map the page into P3 with write-combining turned on */ *handle = phys; return __ioremap(phys, size, _PAGE_BUFFER); @@ -119,11 +119,11 @@ static void *avr32_dma_alloc(struct device *dev, size_t size, } static void avr32_dma_free(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) + void *cpu_addr, dma_addr_t handle, unsigned long attrs) { struct page *page; - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { + if (attrs & DMA_ATTR_WRITE_COMBINE) { iounmap(cpu_addr); page = phys_to_page(handle); @@ -142,7 +142,7 @@ static void avr32_dma_free(struct device *dev, size_t size, static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { void *cpu_addr = page_address(page) + offset; @@ -152,7 +152,7 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct scatterlist *sg; diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index 771afe6e4264..53fbbb61aa86 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c @@ -79,7 +79,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages) } static void *bfin_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -94,7 +94,7 @@ static void *bfin_dma_alloc(struct device *dev, size_t size, } static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { __free_dma_pages((unsigned long)vaddr, get_pages(size)); } @@ -111,7 +111,7 @@ EXPORT_SYMBOL(__dma_sync); static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -139,7 +139,7 @@ static void bfin_dma_sync_sg_for_device(struct device *dev, static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h index 6b5cd7b0cf32..5717b1e52d96 100644 --- a/arch/c6x/include/asm/dma-mapping.h +++ b/arch/c6x/include/asm/dma-mapping.h @@ -26,8 +26,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) extern void coherent_mem_init(u32 start, u32 size); void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, struct dma_attrs *attrs); + gfp_t gfp, unsigned long attrs); void c6x_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs); + dma_addr_t dma_handle, unsigned long attrs); #endif /* _ASM_C6X_DMA_MAPPING_H */ diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c index 8a80f3a250c0..db4a6a301f5e 100644 --- a/arch/c6x/kernel/dma.c +++ b/arch/c6x/kernel/dma.c @@ -38,7 +38,7 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size, static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t handle = virt_to_phys(page_address(page) + offset); @@ -47,13 +47,13 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, } static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) + size_t size, enum dma_data_direction dir, unsigned long attrs) { c6x_dma_sync(handle, size, dir); } static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; @@ -67,8 +67,7 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, } static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index f7ee63af2541..95e38ad27c69 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -74,7 +74,7 @@ static void __free_dma_pages(u32 addr, int order) * virtual and DMA address for that space. */ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, unsigned long attrs) { u32 paddr; int order; @@ -99,7 +99,7 @@ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, * Free DMA coherent memory as defined by the above mapping. */ void c6x_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { int order; diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 8d5efa58cce1..1f0636793f0c 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c @@ -17,7 +17,7 @@ #include static void *v32_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -37,22 +37,21 @@ static void *v32_dma_alloc(struct device *dev, size_t size, } static void v32_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { free_pages((unsigned long)vaddr, get_order(size)); } static inline dma_addr_t v32_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, - struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { return page_to_phys(page) + offset; } static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { printk("Map sg\n"); return nents; diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 082be49b5df0..90f2e4cb33d6 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(dma_alloc_lock); static LIST_HEAD(dma_alloc_list); static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, unsigned long attrs) { struct dma_alloc_record *new; struct list_head *this = &dma_alloc_list; @@ -86,7 +86,7 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_ha } static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct dma_alloc_record *rec; unsigned long flags; @@ -107,7 +107,7 @@ static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct scatterlist *sg; @@ -124,7 +124,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { BUG_ON(direction == DMA_NONE); flush_dcache_page(page); diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 316b7b65348d..f585745b1abc 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -19,8 +19,7 @@ #include static void *frv_dma_alloc(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -32,14 +31,14 @@ static void *frv_dma_alloc(struct device *hwdev, size_t size, } static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { consistent_free(vaddr); } static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long dampr2; void *vaddr; @@ -69,7 +68,7 @@ static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { flush_dcache_page(page); return (dma_addr_t) page_to_phys(page) + offset; diff --git a/arch/h8300/kernel/dma.c b/arch/h8300/kernel/dma.c index eeb13d3f2424..3651da045806 100644 --- a/arch/h8300/kernel/dma.c +++ b/arch/h8300/kernel/dma.c @@ -12,7 +12,7 @@ static void *dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; @@ -32,7 +32,7 @@ static void *dma_alloc(struct device *dev, size_t size, static void dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { free_pages((unsigned long)vaddr, get_order(size)); @@ -41,14 +41,14 @@ static void dma_free(struct device *dev, size_t size, static dma_addr_t map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return page_to_phys(page) + offset; } static int map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h index aa6203464520..7ef58df909fc 100644 --- a/arch/hexagon/include/asm/dma-mapping.h +++ b/arch/hexagon/include/asm/dma-mapping.h @@ -26,7 +26,6 @@ #include #include #include -#include #include struct device; diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 9e3ddf792bd3..b9017785fb71 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -51,7 +51,7 @@ static struct gen_pool *coherent_pool; static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; @@ -84,7 +84,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, } static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, struct dma_attrs *attrs) + dma_addr_t dma_addr, unsigned long attrs) { gen_pool_free(coherent_pool, (unsigned long) vaddr, size); } @@ -105,7 +105,7 @@ static int check_addr(const char *name, struct device *hwdev, static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; @@ -172,7 +172,7 @@ static inline void dma_sync(void *addr, size_t size, static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t bus = page_to_phys(page) + offset; WARN_ON(size == 0); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a6d6190c9d24..630ee8073899 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -919,7 +919,7 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long poff, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct ioc *ioc; void *addr = page_address(page) + poff; @@ -1005,7 +1005,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return sba_map_page(dev, virt_to_page(addr), (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); @@ -1046,7 +1046,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) * See Documentation/DMA-API-HOWTO.txt */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -1115,7 +1115,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, } void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { sba_unmap_page(dev, iova, size, dir, attrs); } @@ -1130,7 +1130,7 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, */ static void * sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flags, struct dma_attrs *attrs) + gfp_t flags, unsigned long attrs) { struct ioc *ioc; void *addr; @@ -1175,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * device to map single to get an iova mapping. */ *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, - size, 0, NULL); + size, 0, 0); return addr; } @@ -1191,9 +1191,9 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, * See Documentation/DMA-API-HOWTO.txt */ static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); + sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -1442,7 +1442,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); /** * sba_map_sg - map Scatter/Gather list * @dev: instance of PCI owned by the driver that's asking. @@ -1455,7 +1455,7 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, */ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -1551,7 +1551,7 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, */ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef ASSERT_PDIR_SANITY struct ioc *ioc; diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h index 9c39bdfc2da8..ed7f09089f12 100644 --- a/arch/ia64/include/asm/machvec.h +++ b/arch/ia64/include/asm/machvec.h @@ -22,7 +22,6 @@ struct pci_bus; struct task_struct; struct pci_dev; struct msi_desc; -struct dma_attrs; typedef void ia64_mv_setup_t (char **); typedef void ia64_mv_cpu_init_t (void); diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 939260aeac98..2933208c0285 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb); static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) gfp |= GFP_DMA; @@ -25,7 +25,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 8f59907007cb..74c934a997bb 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -77,7 +77,7 @@ EXPORT_SYMBOL(sn_dma_set_mask); */ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *cpuaddr; unsigned long phys_addr; @@ -138,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size, * any associated IOMMU mappings. */ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -176,21 +176,18 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *cpu_addr = page_address(page) + offset; dma_addr_t dma_addr; unsigned long phys_addr; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); - int dmabarr; - - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(!dev_is_pci(dev)); phys_addr = __pa(cpu_addr); - if (dmabarr) + if (attrs & DMA_ATTR_WRITE_BARRIER) dma_addr = provider->dma_map_consistent(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); else @@ -218,7 +215,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, */ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); @@ -240,7 +237,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, */ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct pci_dev *pdev = to_pci_dev(dev); @@ -273,16 +270,13 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, */ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long phys_addr; struct scatterlist *saved_sg = sgl, *sg; struct pci_dev *pdev = to_pci_dev(dev); struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); int i; - int dmabarr; - - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); BUG_ON(!dev_is_pci(dev)); @@ -292,7 +286,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, for_each_sg(sgl, sg, nhwentries, i) { dma_addr_t dma_addr; phys_addr = SG_ENT_PHYS_ADDRESS(sg); - if (dmabarr) + if (attrs & DMA_ATTR_WRITE_BARRIER) dma_addr = provider->dma_map_consistent(pdev, phys_addr, sg->length, diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c index cbc78b4117b5..8cf97cbadc91 100644 --- a/arch/m68k/kernel/dma.c +++ b/arch/m68k/kernel/dma.c @@ -19,7 +19,7 @@ #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t flag, struct dma_attrs *attrs) + gfp_t flag, unsigned long attrs) { struct page *page, **map; pgprot_t pgprot; @@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, } static void m68k_dma_free(struct device *dev, size_t size, void *addr, - dma_addr_t handle, struct dma_attrs *attrs) + dma_addr_t handle, unsigned long attrs) { pr_debug("dma_free_coherent: %p, %x\n", addr, handle); vfree(addr); @@ -73,7 +73,7 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr, #include static void *m68k_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; /* ignore region specifiers */ @@ -91,7 +91,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, } static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { free_pages((unsigned long)vaddr, get_order(size)); } @@ -130,7 +130,7 @@ static void m68k_dma_sync_sg_for_device(struct device *dev, static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t handle = page_to_phys(page) + offset; @@ -139,7 +139,7 @@ static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, } static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) + int nents, enum dma_data_direction dir, unsigned long attrs) { int i; struct scatterlist *sg; diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c index e12368d02155..0db31e24c541 100644 --- a/arch/metag/kernel/dma.c +++ b/arch/metag/kernel/dma.c @@ -172,7 +172,7 @@ out: * virtual and bus address for that space. */ static void *metag_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { struct page *page; struct metag_vm_region *c; @@ -268,7 +268,7 @@ no_page: * free a page as defined by the above mapping. */ static void metag_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct metag_vm_region *c; unsigned long flags, addr; @@ -331,13 +331,13 @@ no_area: static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long flags, user_size, kern_size; struct metag_vm_region *c; int ret = -ENXIO; - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + if (attrs & DMA_ATTR_WRITE_COMBINE) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -482,7 +482,7 @@ static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction) static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { dma_sync_for_device((void *)(page_to_phys(page) + offset), size, direction); @@ -491,14 +491,14 @@ static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -516,7 +516,7 @@ static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h index 1884783d15c0..1768d4bdc8d3 100644 --- a/arch/microblaze/include/asm/dma-mapping.h +++ b/arch/microblaze/include/asm/dma-mapping.h @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index bf4dec229437..ec04dc1e2527 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c @@ -17,7 +17,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); @@ -42,7 +42,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); @@ -53,7 +53,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -78,7 +78,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset; @@ -88,7 +88,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { /* There is not necessary to do cache cleanup * @@ -157,7 +157,7 @@ dma_direct_sync_sg_for_device(struct device *dev, static int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef CONFIG_MMU unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 2cd45f5f9481..fd69528b24fb 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev, static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, direction, attrs); @@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, } static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) + int nents, enum dma_data_direction direction, unsigned long attrs) { int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); mb(); @@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev, } static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, } static void octeon_dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c index 4ffa6fc81c8f..1a80b6f73ab2 100644 --- a/arch/mips/loongson64/common/dma-swiotlb.c +++ b/arch/mips/loongson64/common/dma-swiotlb.c @@ -10,7 +10,7 @@ #include static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, } static void loongson_dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_handle); } @@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size, static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, dir, attrs); @@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { - int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL); + int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); mb(); return r; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index cb557d28cb21..b2eadd6fa9a1 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -131,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size, } static void *mips_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; struct page *page = NULL; @@ -141,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, * XXX: seems like the coherent and non-coherent implementations could * be consolidated. */ - if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + if (attrs & DMA_ATTR_NON_CONSISTENT) return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); gfp = massage_gfp_flags(dev, gfp); @@ -176,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size, } static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) vaddr; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; - if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + if (attrs & DMA_ATTR_NON_CONSISTENT) { mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); return; } @@ -200,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -214,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, pfn = page_to_pfn(virt_to_page((void *)addr)); - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) + if (attrs & DMA_ATTR_WRITE_COMBINE) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -291,7 +291,7 @@ static inline void __dma_sync(struct page *page, } static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) + size_t size, enum dma_data_direction direction, unsigned long attrs) { if (cpu_needs_post_dma_flush(dev)) __dma_sync(dma_addr_to_page(dev, dma_addr), @@ -301,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, } static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) + int nents, enum dma_data_direction direction, unsigned long attrs) { int i; struct scatterlist *sg; @@ -322,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { if (!plat_device_is_coherent(dev)) __dma_sync(page, offset, size, direction); @@ -332,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct scatterlist *sg; diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c index 3758715d4ab6..0630693bec2a 100644 --- a/arch/mips/netlogic/common/nlm-dma.c +++ b/arch/mips/netlogic/common/nlm-dma.c @@ -45,7 +45,7 @@ static char *nlm_swiotlb; static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); @@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, } static void nlm_dma_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_handle); } diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c index 8842394cb49a..4f4b9029f0ea 100644 --- a/arch/mn10300/mm/dma-alloc.c +++ b/arch/mn10300/mm/dma-alloc.c @@ -21,7 +21,7 @@ static unsigned long pci_sram_allocated = 0xbc000000; static void *mn10300_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { unsigned long addr; void *ret; @@ -63,7 +63,7 @@ done: } static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) vaddr & ~0x20000000; @@ -75,7 +75,7 @@ static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -92,7 +92,7 @@ static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { return page_to_bus(page) + offset; } diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 90422c367ed3..d800fad87896 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -59,7 +59,7 @@ static inline void __dma_sync_for_cpu(void *vaddr, size_t size, } static void *nios2_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; @@ -84,7 +84,7 @@ static void *nios2_dma_alloc(struct device *dev, size_t size, } static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); @@ -93,7 +93,7 @@ static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; @@ -113,7 +113,7 @@ static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { void *addr = page_address(page) + offset; @@ -123,14 +123,14 @@ static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); } static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { void *addr; int i; diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index 0b77ddb1ee07..140c99140649 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -83,7 +82,7 @@ page_clear_nocache(pte_t *pte, unsigned long addr, static void * or1k_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long va; void *page; @@ -101,7 +100,7 @@ or1k_dma_alloc(struct device *dev, size_t size, va = (unsigned long)page; - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { /* * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. @@ -117,7 +116,7 @@ or1k_dma_alloc(struct device *dev, size_t size, static void or1k_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { unsigned long va = (unsigned long)vaddr; struct mm_walk walk = { @@ -125,7 +124,7 @@ or1k_dma_free(struct device *dev, size_t size, void *vaddr, .mm = &init_mm }; - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { /* walk_page_range shouldn't be able to fail here */ WARN_ON(walk_page_range(va, va + size, &walk)); } @@ -137,7 +136,7 @@ static dma_addr_t or1k_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long cl; dma_addr_t addr = page_to_phys(page) + offset; @@ -170,7 +169,7 @@ or1k_map_page(struct device *dev, struct page *page, static void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { /* Nothing special to do here... */ } @@ -178,14 +177,14 @@ or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, static int or1k_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, - s->length, dir, NULL); + s->length, dir, 0); } return nents; @@ -194,13 +193,13 @@ or1k_map_sg(struct device *dev, struct scatterlist *sg, static void or1k_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { - or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); + or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0); } } diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index a27e4928bf73..02d9ed0f3949 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -414,7 +414,7 @@ pcxl_dma_init(void) __initcall(pcxl_dma_init); static void *pa11_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { unsigned long vaddr; unsigned long paddr; @@ -441,7 +441,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size, } static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { int order; @@ -454,7 +454,7 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { void *addr = page_address(page) + offset; BUG_ON(direction == DMA_NONE); @@ -465,7 +465,7 @@ static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(direction == DMA_NONE); @@ -484,7 +484,7 @@ static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct scatterlist *sg; @@ -503,7 +503,7 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; struct scatterlist *sg; @@ -577,11 +577,11 @@ struct dma_map_ops pcxl_dma_ops = { }; static void *pcx_dma_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { void *addr; - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) return NULL; addr = (void *)__get_free_pages(flag, get_order(size)); @@ -592,7 +592,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, } static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t iova, struct dma_attrs *attrs) + dma_addr_t iova, unsigned long attrs) { free_pages((unsigned long)vaddr, get_order(size)); return; diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 77816acd4fd9..84e3f8dd5e4f 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -13,7 +13,6 @@ /* need struct page definitions */ #include #include -#include #include #include #include @@ -25,14 +24,14 @@ /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs); + unsigned long attrs); extern void __dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs); + unsigned long attrs); extern int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, - size_t size, struct dma_attrs *attrs); + size_t size, unsigned long attrs); #ifdef CONFIG_NOT_COHERENT_CACHE /* diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f49a72a9062d..2c1d50792944 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -53,7 +53,7 @@ struct iommu_table_ops { long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); #ifdef CONFIG_IOMMU_API /* * Exchanges existing TCE with new TCE plus direction bits; @@ -248,12 +248,12 @@ extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, size_t size, dma_addr_t *dma_handle, @@ -264,10 +264,10 @@ extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, struct page *page, unsigned long offset, size_t size, unsigned long mask, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); extern void iommu_init_early_pSeries(void); extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 41a7d9d49a5a..fb7cbaa37658 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -18,7 +18,7 @@ */ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, @@ -27,7 +27,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); } @@ -40,7 +40,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, device_to_mask(dev), direction, attrs); @@ -49,7 +49,7 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, attrs); @@ -58,7 +58,7 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, device_to_mask(dev), direction, attrs); @@ -66,7 +66,7 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 3f1472a78f39..e64a6016fba7 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -64,7 +64,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) void *__dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; #ifdef CONFIG_NOT_COHERENT_CACHE @@ -121,7 +121,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size, void __dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { #ifdef CONFIG_NOT_COHERENT_CACHE __dma_free_coherent(size, vaddr); @@ -132,7 +132,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size, static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu_table *iommu; @@ -156,7 +156,7 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu_table *iommu; @@ -177,7 +177,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long pfn; @@ -195,7 +195,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -211,7 +211,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { } @@ -232,7 +232,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(dir == DMA_NONE); __dma_sync_page(page, offset, size, dir); @@ -243,7 +243,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { } diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index a89f4f7a66bd..c1ca9282f4a0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c @@ -65,7 +65,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { void *mem; @@ -78,7 +78,7 @@ static void *ibmebus_alloc_coherent(struct device *dev, static void ibmebus_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { kfree(vaddr); } @@ -88,7 +88,7 @@ static dma_addr_t ibmebus_map_page(struct device *dev, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return (dma_addr_t)(page_address(page) + offset); } @@ -97,7 +97,7 @@ static void ibmebus_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return; } @@ -105,7 +105,7 @@ static void ibmebus_unmap_page(struct device *dev, static int ibmebus_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -121,7 +121,7 @@ static int ibmebus_map_sg(struct device *dev, static void ibmebus_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return; } diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a8e3490b54e3..37d6e741be82 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -307,7 +307,7 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long entry; dma_addr_t ret = DMA_ERROR_CODE; @@ -431,7 +431,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dma_next = 0, dma_addr; struct scatterlist *s, *outs, *segstart; @@ -574,7 +574,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; @@ -753,7 +753,7 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name) dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, struct page *page, unsigned long offset, size_t size, unsigned long mask, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dma_handle = DMA_ERROR_CODE; void *vaddr; @@ -790,7 +790,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned int npages; @@ -845,7 +845,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, nio_pages = size >> tbl->it_page_shift; io_order = get_iommu_order(size, tbl); mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, - mask >> tbl->it_page_shift, io_order, NULL); + mask >> tbl->it_page_shift, io_order, 0); if (mapping == DMA_ERROR_CODE) { free_pages((unsigned long)ret, order); return NULL; diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 8d7358f3a273..b3813ddb2fb4 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -482,7 +482,7 @@ static void vio_cmo_balance(struct work_struct *work) static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); void *ret; @@ -503,7 +503,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); @@ -515,7 +515,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; @@ -539,7 +539,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; @@ -552,7 +552,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; @@ -588,7 +588,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, static void vio_dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); struct iommu_table *tbl; diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 9027d7c48507..f7d1a4953ea0 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -166,7 +166,7 @@ static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, static int tce_build_cell(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int i; unsigned long *io_pte, base_pte; @@ -193,7 +193,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #endif - if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) + if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) base_pte &= ~CBE_IOPTE_SO_RW; io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); @@ -526,7 +526,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, - (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); + (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); return window; } @@ -572,7 +572,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { if (iommu_fixed_is_weak) return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), @@ -586,7 +586,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, static void dma_fixed_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { if (iommu_fixed_is_weak) iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, @@ -598,9 +598,9 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) return dma_direct_ops.map_page(dev, page, offset, size, direction, attrs); else @@ -611,9 +611,9 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) dma_direct_ops.unmap_page(dev, dma_addr, size, direction, attrs); else @@ -623,9 +623,9 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); else return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, @@ -635,9 +635,9 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); else ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, @@ -1162,7 +1162,7 @@ static int __init setup_iommu_fixed(char *str) pciep = of_find_node_by_type(NULL, "pcie-endpoint"); if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) - iommu_fixed_is_weak = 1; + iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; of_node_put(pciep); diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 43dd3fb514e0..309d9ccccd50 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -88,7 +88,7 @@ static int iommu_table_iobmap_inited; static int iobmap_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u32 *ip; u32 rpn; diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 4383a5ff82ba..00e1a0195c78 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -73,7 +73,7 @@ EXPORT_SYMBOL(pnv_pci_get_npu_dev); static void *dma_npu_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { NPU_DMA_OP_UNSUPPORTED(); return NULL; @@ -81,7 +81,7 @@ static void *dma_npu_alloc(struct device *dev, size_t size, static void dma_npu_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { NPU_DMA_OP_UNSUPPORTED(); } @@ -89,7 +89,7 @@ static void dma_npu_free(struct device *dev, size_t size, static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { NPU_DMA_OP_UNSUPPORTED(); return 0; @@ -97,7 +97,7 @@ static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { NPU_DMA_OP_UNSUPPORTED(); return 0; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 891fc4a453df..6b9528307f62 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1806,7 +1806,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl, static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, attrs); @@ -1950,7 +1950,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, attrs); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 6701dd5ded20..a21d831c1114 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -704,7 +704,7 @@ static __be64 *pnv_tce(struct iommu_table *tbl, long idx) int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 proto_tce = iommu_direction_to_tce_perm(direction); u64 rpn = __pa(uaddr) >> tbl->it_page_shift; diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index d088d4f06116..e64df7894d6e 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h @@ -181,7 +181,7 @@ struct pnv_phb { extern struct pci_ops pnv_pci_ops; extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs); + unsigned long attrs); extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); extern int pnv_tce_xchg(struct iommu_table *tbl, long index, unsigned long *hpa, enum dma_data_direction *direction); diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 5606fe36faf2..8af1c15aef85 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -516,7 +516,7 @@ core_initcall(ps3_system_bus_init); */ static void * ps3_alloc_coherent(struct device *_dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { int result; struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); @@ -553,7 +553,7 @@ clean_none: } static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); @@ -569,7 +569,7 @@ static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); int result; @@ -592,7 +592,7 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); int result; @@ -626,7 +626,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, } static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) + size_t size, enum dma_data_direction direction, unsigned long attrs) { struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); int result; @@ -640,7 +640,7 @@ static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, } static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) + int nents, enum dma_data_direction direction, unsigned long attrs) { #if defined(CONFIG_PS3_DYNAMIC_DMA) BUG_ON("do"); @@ -670,14 +670,14 @@ static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG(); return 0; } static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) + int nents, enum dma_data_direction direction, unsigned long attrs) { #if defined(CONFIG_PS3_DYNAMIC_DMA) BUG_ON("do"); @@ -686,7 +686,7 @@ static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG(); } diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 770a753b52c9..0024e451bb36 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -123,7 +123,7 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group, static int tce_build_pSeries(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 proto_tce; __be64 *tcep, *tces; @@ -173,7 +173,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long); static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 rc = 0; u64 proto_tce, tce; @@ -216,7 +216,7 @@ static DEFINE_PER_CPU(__be64 *, tce_page); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { u64 rc = 0; u64 proto_tce; diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 26904f4879ec..3573d54b2770 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -185,7 +185,7 @@ static void dart_flush(struct iommu_table *tbl) static int dart_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned int *dp, *orig_dp; unsigned int rpn; diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 3249b7464889..ffaba07f50ab 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 070f1ae5cfad..7297fce9bf80 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -286,7 +286,7 @@ static inline void zpci_err_dma(unsigned long rc, unsigned long addr) static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long nr_pages, iommu_page_index; @@ -332,7 +332,7 @@ out_err: static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long iommu_page_index; @@ -355,7 +355,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, static void *s390_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct page *page; @@ -370,7 +370,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size, pa = page_to_phys(page); memset((void *) pa, 0, size); - map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL); + map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); if (dma_mapping_error(dev, map)) { free_pages(pa, get_order(size)); return NULL; @@ -384,19 +384,19 @@ static void *s390_dma_alloc(struct device *dev, size_t size, static void s390_dma_free(struct device *dev, size_t size, void *pa, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); size = PAGE_ALIGN(size); atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); - s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); + s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); free_pages((unsigned long) pa, get_order(size)); } static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, int nr_elements, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { int mapped_elements = 0; struct scatterlist *s; @@ -405,7 +405,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, for_each_sg(sg, s, nr_elements, i) { struct page *page = sg_page(s); s->dma_address = s390_dma_map_pages(dev, page, s->offset, - s->length, dir, NULL); + s->length, dir, 0); if (!dma_mapping_error(dev, s->dma_address)) { s->dma_length = s->length; mapped_elements++; @@ -419,7 +419,7 @@ unmap: for_each_sg(sg, s, mapped_elements, i) { if (s->dma_address) s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, - dir, NULL); + dir, 0); s->dma_address = 0; s->dma_length = 0; } @@ -429,13 +429,14 @@ unmap: static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nr_elements, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; for_each_sg(sg, s, nr_elements, i) { - s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); + s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, + 0); s->dma_address = 0; s->dma_length = 0; } diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h index e11cf0c8206b..0052ad40e86d 100644 --- a/arch/sh/include/asm/dma-mapping.h +++ b/arch/sh/include/asm/dma-mapping.h @@ -17,9 +17,9 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, /* arch/sh/mm/consistent.c */ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs); + unsigned long attrs); extern void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs); + unsigned long attrs); #endif /* __ASM_SH_DMA_MAPPING_H */ diff --git a/arch/sh/kernel/dma-nommu.c b/arch/sh/kernel/dma-nommu.c index 5b0bfcda6d0b..eadb669a7329 100644 --- a/arch/sh/kernel/dma-nommu.c +++ b/arch/sh/kernel/dma-nommu.c @@ -13,7 +13,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t addr = page_to_phys(page) + offset; @@ -25,7 +25,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, static int nommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index b81d9dbf9fef..92b6976fde59 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -34,7 +34,7 @@ fs_initcall(dma_init); void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret, *ret_nocache; int order = get_order(size); @@ -66,7 +66,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { int order = get_order(size); unsigned long pfn = dma_handle >> PAGE_SHIFT; diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 37686828c3d9..5c615abff030 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -196,7 +196,7 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) static void *dma_4u_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long order, first_page; struct iommu *iommu; @@ -245,7 +245,7 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, static void dma_4u_free_coherent(struct device *dev, size_t size, void *cpu, dma_addr_t dvma, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu *iommu; unsigned long order, npages; @@ -263,7 +263,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -385,7 +385,7 @@ do_flush_sync: static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu *iommu; struct strbuf *strbuf; @@ -431,7 +431,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot, ctx; @@ -607,7 +607,7 @@ static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long flags, ctx; struct scatterlist *sg; diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index ffd5ff4678cf..2344103414d1 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -260,7 +260,7 @@ EXPORT_SYMBOL(sbus_set_sbus64); */ static void *sbus_alloc_coherent(struct device *dev, size_t len, dma_addr_t *dma_addrp, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { struct platform_device *op = to_platform_device(dev); unsigned long len_total = PAGE_ALIGN(len); @@ -315,7 +315,7 @@ err_nopages: } static void sbus_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba, struct dma_attrs *attrs) + dma_addr_t ba, unsigned long attrs) { struct resource *res; struct page *pgv; @@ -355,7 +355,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, static dma_addr_t sbus_map_page(struct device *dev, struct page *page, unsigned long offset, size_t len, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = page_address(page) + offset; @@ -371,20 +371,20 @@ static dma_addr_t sbus_map_page(struct device *dev, struct page *page, } static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { mmu_release_scsi_one(dev, ba, n); } static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { mmu_get_scsi_sgl(dev, sg, n); return n; } static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { mmu_release_scsi_sgl(dev, sg, n); } @@ -429,7 +429,7 @@ arch_initcall(sparc_register_ioport); */ static void *pci32_alloc_coherent(struct device *dev, size_t len, dma_addr_t *pba, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long len_total = PAGE_ALIGN(len); void *va; @@ -482,7 +482,7 @@ err_nopages: * past this call are illegal. */ static void pci32_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba, struct dma_attrs *attrs) + dma_addr_t ba, unsigned long attrs) { struct resource *res; @@ -518,14 +518,14 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, static dma_addr_t pci32_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { /* IIep is write-through, not flushing. */ return page_to_phys(page) + offset; } static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { if (dir != PCI_DMA_TODEVICE) dma_make_coherent(ba, PAGE_ALIGN(size)); @@ -548,7 +548,7 @@ static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, */ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int n; @@ -567,7 +567,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, */ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int n; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 836e8cef47e2..61c6f935accc 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -130,7 +130,7 @@ static inline long iommu_batch_end(void) static void *dma_4v_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long flags, order, first_page, npages, n; struct iommu *iommu; @@ -213,7 +213,7 @@ static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, } static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, - dma_addr_t dvma, struct dma_attrs *attrs) + dma_addr_t dvma, unsigned long attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -235,7 +235,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long offset, size_t sz, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu *iommu; unsigned long flags, npages, oaddr; @@ -294,7 +294,7 @@ iommu_map_fail: static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, size_t sz, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -322,7 +322,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s, *outs, *segstart; unsigned long flags, handle, prot; @@ -466,7 +466,7 @@ iommu_map_failed: static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct pci_pbm_info *pbm; struct scatterlist *sg; diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c index b6bc0547a4f6..09bb774b39cd 100644 --- a/arch/tile/kernel/pci-dma.c +++ b/arch/tile/kernel/pci-dma.c @@ -34,7 +34,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { u64 dma_mask = (dev && dev->coherent_dma_mask) ? dev->coherent_dma_mask : DMA_BIT_MASK(32); @@ -78,7 +78,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size, */ static void tile_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } @@ -202,7 +202,7 @@ static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size, static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -224,7 +224,7 @@ static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -240,7 +240,7 @@ static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -252,7 +252,7 @@ static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -343,7 +343,7 @@ EXPORT_SYMBOL(tile_dma_map_ops); static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { int node = dev_to_node(dev); int order = get_order(size); @@ -368,14 +368,14 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, */ static void tile_pci_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { homecache_free_pages((unsigned long)vaddr, get_order(size)); } static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -400,7 +400,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, static void tile_pci_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *sg; int i; @@ -416,7 +416,7 @@ static void tile_pci_dma_unmap_sg(struct device *dev, static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -429,7 +429,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { BUG_ON(!valid_dma_direction(direction)); @@ -531,7 +531,7 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops); #ifdef CONFIG_SWIOTLB static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { gfp |= GFP_DMA; return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); @@ -539,7 +539,7 @@ static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, static void tile_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c index 16c08b2143a7..3e9f6489ba38 100644 --- a/arch/unicore32/mm/dma-swiotlb.c +++ b/arch/unicore32/mm/dma-swiotlb.c @@ -19,14 +19,14 @@ static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { return swiotlb_alloc_coherent(dev, size, dma_handle, flags); } static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { swiotlb_free_coherent(dev, size, vaddr, dma_addr); } diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 3a27b93e6261..44461626830e 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -48,11 +47,11 @@ extern int dma_supported(struct device *hwdev, u64 mask); extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs); + unsigned long attrs); extern void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs); + unsigned long attrs); #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index ab05d73e2bb7..d2f69b9ff732 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -31,9 +31,9 @@ static inline void dma_mark_clean(void *addr, size_t size) {} extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs); + unsigned long attrs); extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs); + unsigned long attrs); #endif /* _ASM_X86_SWIOTLB_H */ diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h index acd844c017d3..f02f025ff988 100644 --- a/arch/x86/include/asm/xen/page-coherent.h +++ b/arch/x86/include/asm/xen/page-coherent.h @@ -2,12 +2,11 @@ #define _ASM_X86_XEN_PAGE_COHERENT_H #include -#include #include static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *vstart = (void*)__get_free_pages(flags, get_order(size)); *dma_handle = virt_to_phys(vstart); @@ -16,18 +15,18 @@ static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { free_pages((unsigned long) cpu_addr, get_order(size)); } static inline void xen_dma_map_page(struct device *hwdev, struct page *page, dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) { } + enum dma_data_direction dir, unsigned long attrs) { } static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) { } + unsigned long attrs) { } static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { } diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 42d27a62a404..63ff468a7986 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -241,7 +241,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, static dma_addr_t gart_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long bus; phys_addr_t paddr = page_to_phys(page) + offset; @@ -263,7 +263,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, */ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long iommu_page; int npages; @@ -285,7 +285,7 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, * Wrapper for pci_unmap_single working with scatterlists. */ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *s; int i; @@ -293,7 +293,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; - gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); + gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); } } @@ -315,7 +315,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, addr = dma_map_area(dev, addr, s->length, dir, 0); if (addr == bad_dma_addr) { if (i > 0) - gart_unmap_sg(dev, sg, i, dir, NULL); + gart_unmap_sg(dev, sg, i, dir, 0); nents = 0; sg[0].dma_length = 0; break; @@ -386,7 +386,7 @@ dma_map_cont(struct device *dev, struct scatterlist *start, int nelems, * Merge chunks that have page aligned sizes into a continuous mapping. */ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *s, *ps, *start_sg, *sgmap; int need = 0, nextneed, i, out, start; @@ -456,7 +456,7 @@ static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, error: flush_gart(); - gart_unmap_sg(dev, sg, out, dir, NULL); + gart_unmap_sg(dev, sg, out, dir, 0); /* When it was forced or merged try again in a dumb way */ if (force_iommu || iommu_merge) { @@ -476,7 +476,7 @@ error: /* allocate and map a coherent mapping */ static void * gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, - gfp_t flag, struct dma_attrs *attrs) + gfp_t flag, unsigned long attrs) { dma_addr_t paddr; unsigned long align_mask; @@ -508,9 +508,9 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, /* free a coherent mapping */ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, struct dma_attrs *attrs) + dma_addr_t dma_addr, unsigned long attrs) { - gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); + gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); } diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 833b1d329c47..5d400ba1349d 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -340,7 +340,7 @@ static inline struct iommu_table *find_iommu_table(struct device *dev) static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -364,7 +364,7 @@ static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, static int calgary_map_sg(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu_table *tbl = find_iommu_table(dev); struct scatterlist *s; @@ -396,7 +396,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg, return nelems; error: - calgary_unmap_sg(dev, sg, nelems, dir, NULL); + calgary_unmap_sg(dev, sg, nelems, dir, 0); for_each_sg(sg, s, nelems, i) { sg->dma_address = DMA_ERROR_CODE; sg->dma_length = 0; @@ -407,7 +407,7 @@ error: static dma_addr_t calgary_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *vaddr = page_address(page) + offset; unsigned long uaddr; @@ -422,7 +422,7 @@ static dma_addr_t calgary_map_page(struct device *dev, struct page *page, static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct iommu_table *tbl = find_iommu_table(dev); unsigned int npages; @@ -432,7 +432,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, } static void* calgary_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { void *ret = NULL; dma_addr_t mapping; @@ -466,7 +466,7 @@ error: static void calgary_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned int npages; struct iommu_table *tbl = find_iommu_table(dev); diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 6ba014c61d62..d30c37750765 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -77,7 +77,7 @@ void __init pci_iommu_alloc(void) } void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long dma_mask; struct page *page; @@ -120,7 +120,7 @@ again: } void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, struct dma_attrs *attrs) + dma_addr_t dma_addr, unsigned long attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = virt_to_page(vaddr); diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index da15918d1c81..00e71ce396a8 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -28,7 +28,7 @@ check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) static dma_addr_t nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t bus = page_to_phys(page) + offset; WARN_ON(size == 0); @@ -55,7 +55,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page, */ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 5069ef560d83..b47edb8f5256 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -16,7 +16,7 @@ int swiotlb __read_mostly; void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *vaddr; @@ -37,7 +37,7 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, void x86_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) swiotlb_free_coherent(dev, size, vaddr, dma_addr); diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 5ceda85b8687..052c1cb76305 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -169,7 +169,7 @@ static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { void *vaddr; diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index e88b4176260f..b814ca675131 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -274,14 +274,14 @@ static struct dma_map_ops *vmd_dma_ops(struct device *dev) } static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, - gfp_t flag, struct dma_attrs *attrs) + gfp_t flag, unsigned long attrs) { return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, attrs); } static void vmd_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t addr, struct dma_attrs *attrs) + dma_addr_t addr, unsigned long attrs) { return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, attrs); @@ -289,7 +289,7 @@ static void vmd_free(struct device *dev, size_t size, void *vaddr, static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, size, attrs); @@ -297,7 +297,7 @@ static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t addr, size_t size, - struct dma_attrs *attrs) + unsigned long attrs) { return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, addr, size, attrs); @@ -306,26 +306,26 @@ static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, static dma_addr_t vmd_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, dir, attrs); } static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); } static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); } static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); } diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index cd66698348ca..1e68806d6695 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -142,7 +142,7 @@ static void xtensa_sync_sg_for_device(struct device *dev, static void *xtensa_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { unsigned long ret; unsigned long uncached = 0; @@ -171,7 +171,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, } static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long)vaddr + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; @@ -185,7 +185,7 @@ static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t dma_handle = page_to_phys(page) + offset; @@ -195,14 +195,14 @@ static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); } static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; @@ -217,7 +217,7 @@ static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, static void xtensa_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scatterlist *s; int i; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index fb49443bfd32..4cfb39d543b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -52,7 +52,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info, ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 8564c3da0d22..4bf00f57ffe8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -235,7 +234,7 @@ struct g2d_data { struct mutex cmdlist_mutex; dma_addr_t cmdlist_pool; void *cmdlist_pool_virt; - struct dma_attrs cmdlist_dma_attrs; + unsigned long cmdlist_dma_attrs; /* runqueue*/ struct g2d_runqueue_node *runqueue_node; @@ -256,13 +255,12 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) int ret; struct g2d_buf_info *buf_info; - init_dma_attrs(&g2d->cmdlist_dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, &g2d->cmdlist_pool, GFP_KERNEL, - &g2d->cmdlist_dma_attrs); + g2d->cmdlist_dma_attrs); if (!g2d->cmdlist_pool_virt) { dev_err(dev, "failed to allocate dma memory\n"); return -ENOMEM; @@ -295,7 +293,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) err: dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); return ret; } @@ -309,7 +307,7 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d) dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); } } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index cdf9f1af4347..f2ae72ba7d5a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -24,7 +24,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) { struct drm_device *dev = exynos_gem->base.dev; - enum dma_attr attr; + unsigned long attr; unsigned int nr_pages; struct sg_table sgt; int ret = -ENOMEM; @@ -34,7 +34,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) return 0; } - init_dma_attrs(&exynos_gem->dma_attrs); + exynos_gem->dma_attrs = 0; /* * if EXYNOS_BO_CONTIG, fully physically contiguous memory @@ -42,7 +42,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) * as possible. */ if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs); + exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; /* * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping @@ -54,8 +54,8 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) else attr = DMA_ATTR_NON_CONSISTENT; - dma_set_attr(attr, &exynos_gem->dma_attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs); + exynos_gem->dma_attrs |= attr; + exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; nr_pages = exynos_gem->size >> PAGE_SHIFT; @@ -67,7 +67,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, &exynos_gem->dma_addr, GFP_KERNEL, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (!exynos_gem->cookie) { DRM_ERROR("failed to allocate buffer.\n"); goto err_free; @@ -75,7 +75,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to get sgtable.\n"); goto err_dma_free; @@ -99,7 +99,7 @@ err_sgt_free: sg_free_table(&sgt); err_dma_free: dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, - exynos_gem->dma_addr, &exynos_gem->dma_attrs); + exynos_gem->dma_addr, exynos_gem->dma_attrs); err_free: drm_free_large(exynos_gem->pages); @@ -120,7 +120,7 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, (dma_addr_t)exynos_gem->dma_addr, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); drm_free_large(exynos_gem->pages); } @@ -346,7 +346,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, exynos_gem->dma_addr, exynos_gem->size, - &exynos_gem->dma_attrs); + exynos_gem->dma_attrs); if (ret < 0) { DRM_ERROR("failed to mmap.\n"); return ret; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 78100742281d..df7c543d6558 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -50,7 +50,7 @@ struct exynos_drm_gem { void *cookie; void __iomem *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; struct page **pages; struct sg_table *sgt; }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index fa2ec0cd00e8..7abc550ebc00 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -54,15 +54,14 @@ struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, obj = &mtk_gem->base; - init_dma_attrs(&mtk_gem->dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs); + mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; if (!alloc_kmap) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs); + mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, &mtk_gem->dma_addr, GFP_KERNEL, - &mtk_gem->dma_attrs); + mtk_gem->dma_attrs); if (!mtk_gem->cookie) { DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); ret = -ENOMEM; @@ -93,7 +92,7 @@ void mtk_drm_gem_free_object(struct drm_gem_object *obj) drm_prime_gem_destroy(obj, mtk_gem->sg); else dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, - mtk_gem->dma_addr, &mtk_gem->dma_attrs); + mtk_gem->dma_addr, mtk_gem->dma_attrs); /* release file pointer to gem object. */ drm_gem_object_release(obj); @@ -173,7 +172,7 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, vma->vm_pgoff = 0; ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, - mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs); + mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); if (ret) drm_gem_vm_close(vma); @@ -224,7 +223,7 @@ struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, mtk_gem->dma_addr, obj->size, - &mtk_gem->dma_attrs); + mtk_gem->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); kfree(sgt); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h index 3a2a5624a1cb..2752718fa5b2 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h @@ -35,7 +35,7 @@ struct mtk_drm_gem_obj { void *cookie; void *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; struct sg_table *sg; }; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 26f859ec24b3..8a0237008f74 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -238,11 +238,10 @@ static int msm_drm_uninit(struct device *dev) } if (priv->vram.paddr) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; drm_mm_takedown(&priv->vram.mm); dma_free_attrs(dev, priv->vram.size, NULL, - priv->vram.paddr, &attrs); + priv->vram.paddr, attrs); } component_unbind_all(dev, ddev); @@ -310,21 +309,21 @@ static int msm_init_vram(struct drm_device *dev) } if (size) { - DEFINE_DMA_ATTRS(attrs); + unsigned long attrs = 0; void *p; priv->vram.size = size; drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; + attrs |= DMA_ATTR_WRITE_COMBINE; /* note that for no-kernel-mapping, the vaddr returned * is bogus, but non-null if allocation succeeded: */ p = dma_alloc_attrs(dev->dev, size, - &priv->vram.paddr, GFP_KERNEL, &attrs); + &priv->vram.paddr, GFP_KERNEL, attrs); if (!p) { dev_err(dev->dev, "failed to allocate VRAM\n"); priv->vram.paddr = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 6b8f2a19b2d9..a6a7fa0d7679 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -109,7 +109,7 @@ struct gk20a_instmem { u16 iommu_bit; /* Only used by DMA API */ - struct dma_attrs attrs; + unsigned long attrs; }; #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) @@ -293,7 +293,7 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) goto out; dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, &imem->attrs); + node->handle, imem->attrs); out: return node; @@ -386,7 +386,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, &node->handle, GFP_KERNEL, - &imem->attrs); + imem->attrs); if (!node->base.vaddr) { nvkm_error(subdev, "cannot allocate DMA memory\n"); return -ENOMEM; @@ -597,10 +597,9 @@ gk20a_instmem_new(struct nvkm_device *device, int index, nvkm_info(&imem->base.subdev, "using IOMMU\n"); } else { - init_dma_attrs(&imem->attrs); - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); - dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); + imem->attrs = DMA_ATTR_NON_CONSISTENT | + DMA_ATTR_WEAK_ORDERING | + DMA_ATTR_WRITE_COMBINE; nvkm_info(&imem->base.subdev, "using DMA API\n"); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 059e902f872d..b70f9423379c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -17,8 +17,6 @@ #include #include -#include - #include "rockchip_drm_drv.h" #include "rockchip_drm_gem.h" @@ -28,15 +26,14 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj, struct drm_gem_object *obj = &rk_obj->base; struct drm_device *drm = obj->dev; - init_dma_attrs(&rk_obj->dma_attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); + rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; if (!alloc_kmap) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs); + rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, &rk_obj->dma_addr, GFP_KERNEL, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); if (!rk_obj->kvaddr) { DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); return -ENOMEM; @@ -51,7 +48,7 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj) struct drm_device *drm = obj->dev; dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); } static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, @@ -70,7 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, vma->vm_pgoff = 0; ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, - obj->size, &rk_obj->dma_attrs); + obj->size, rk_obj->dma_attrs); if (ret) drm_gem_vm_close(vma); @@ -262,7 +259,7 @@ struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj) ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, rk_obj->dma_addr, obj->size, - &rk_obj->dma_attrs); + rk_obj->dma_attrs); if (ret) { DRM_ERROR("failed to allocate sgt, %d\n", ret); kfree(sgt); @@ -276,7 +273,7 @@ void *rockchip_gem_prime_vmap(struct drm_gem_object *obj) { struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs)) + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) return NULL; return rk_obj->kvaddr; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h index ad22618473a4..18b3488db4ec 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h @@ -23,7 +23,7 @@ struct rockchip_gem_object { void *kvaddr; dma_addr_t dma_addr; - struct dma_attrs dma_attrs; + unsigned long dma_attrs; }; struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index fe4d2e1a8b58..c68746ce6624 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include @@ -92,12 +91,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, unsigned long npages; int ret; int i; - DEFINE_DMA_ATTRS(attrs); + unsigned long dma_attrs = 0; struct scatterlist *sg, *sg_list_start; int need_release = 0; if (dmasync) - dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); + dma_attrs |= DMA_ATTR_WRITE_BARRIER; if (!size) return ERR_PTR(-EINVAL); @@ -215,7 +214,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->sg_head.sgl, umem->npages, DMA_BIDIRECTIONAL, - &attrs); + dma_attrs); if (umem->nmap <= 0) { ret = -ENOMEM; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 33c177ba93be..96de97a46079 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -2375,7 +2375,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, static dma_addr_t map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { phys_addr_t paddr = page_to_phys(page) + offset; struct protection_domain *domain; @@ -2398,7 +2398,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, * The exported unmap_single function for dma_ops. */ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; @@ -2444,7 +2444,7 @@ static int sg_num_pages(struct device *dev, */ static int map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { int mapped_pages = 0, npages = 0, prot = 0, i; struct protection_domain *domain; @@ -2525,7 +2525,7 @@ out_err: */ static void unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; @@ -2548,7 +2548,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, */ static void *alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { u64 dma_mask = dev->coherent_dma_mask; struct protection_domain *domain; @@ -2604,7 +2604,7 @@ out_free: */ static void free_coherent(struct device *dev, size_t size, void *virt_addr, dma_addr_t dma_addr, - struct dma_attrs *attrs) + unsigned long attrs) { struct protection_domain *domain; struct dma_ops_domain *dma_dom; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ea5a9ebf0f78..08a1e2f3690f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -286,7 +286,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size, * or NULL on failure. */ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - struct dma_attrs *attrs, int prot, dma_addr_t *handle, + unsigned long attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); @@ -306,7 +306,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, } else { size = ALIGN(size, min_size); } - if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) + if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) alloc_sizes = min_size; count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -400,7 +400,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, } void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); } @@ -560,7 +560,7 @@ out_restore_sg: } void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { /* * The scatterlist segments are mapped into a single diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index afbaa2c69a59..ebb5bf3ddbd9 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3552,7 +3552,7 @@ error: static dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { return __intel_map_single(dev, page_to_phys(page) + offset, size, dir, *dev->dma_mask); @@ -3711,14 +3711,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { intel_unmap(dev, dev_addr, size); } static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) + unsigned long attrs) { struct page *page = NULL; int order; @@ -3764,7 +3764,7 @@ static void *intel_alloc_coherent(struct device *dev, size_t size, } static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { int order; struct page *page = virt_to_page(vaddr); @@ -3779,7 +3779,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; unsigned long nrpages = 0; @@ -3808,7 +3808,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, } static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { int i; struct dmar_domain *domain; diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c index 3df66d11c795..b7892f3efd98 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-hw.c +++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c @@ -430,14 +430,11 @@ int bdisp_hw_get_and_clear_irq(struct bdisp_dev *bdisp) */ void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) { - if (ctx && ctx->node[0]) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + if (ctx && ctx->node[0]) dma_free_attrs(ctx->bdisp_dev->dev, sizeof(struct bdisp_node) * MAX_NB_NODE, - ctx->node[0], ctx->node_paddr[0], &attrs); - } + ctx->node[0], ctx->node_paddr[0], + DMA_ATTR_WRITE_COMBINE); } /** @@ -455,12 +452,10 @@ int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx) unsigned int i, node_size = sizeof(struct bdisp_node); void *base; dma_addr_t paddr; - DEFINE_DMA_ATTRS(attrs); /* Allocate all the nodes within a single memory page */ - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, - GFP_KERNEL | GFP_DMA, &attrs); + GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE); if (!base) { dev_err(dev, "%s no mem\n", __func__); return -ENOMEM; @@ -493,13 +488,9 @@ void bdisp_hw_free_filters(struct device *dev) { int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); - if (bdisp_h_filter[0].virt) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + if (bdisp_h_filter[0].virt) dma_free_attrs(dev, size, bdisp_h_filter[0].virt, - bdisp_h_filter[0].paddr, &attrs); - } + bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE); } /** @@ -516,12 +507,11 @@ int bdisp_hw_alloc_filters(struct device *dev) unsigned int i, size; void *base; dma_addr_t paddr; - DEFINE_DMA_ATTRS(attrs); /* Allocate all the filters within a single memory page */ size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs); + base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, + DMA_ATTR_WRITE_COMBINE); if (!base) return -ENOMEM; diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c index 863f658a3fa1..b09b2c9b6b63 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c @@ -27,7 +27,7 @@ struct vb2_dc_buf { unsigned long size; void *cookie; dma_addr_t dma_addr; - struct dma_attrs attrs; + unsigned long attrs; enum dma_data_direction dma_dir; struct sg_table *dma_sgt; struct frame_vector *vec; @@ -130,12 +130,12 @@ static void vb2_dc_put(void *buf_priv) kfree(buf->sgt_base); } dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, - &buf->attrs); + buf->attrs); put_device(buf->dev); kfree(buf); } -static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, +static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { @@ -146,16 +146,16 @@ static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, return ERR_PTR(-ENOMEM); if (attrs) - buf->attrs = *attrs; + buf->attrs = attrs; buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, - GFP_KERNEL | gfp_flags, &buf->attrs); + GFP_KERNEL | gfp_flags, buf->attrs); if (!buf->cookie) { dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); kfree(buf); return ERR_PTR(-ENOMEM); } - if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs)) + if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) buf->vaddr = buf->cookie; /* Prevent the device from being released while the buffer is used */ @@ -189,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) vma->vm_pgoff = 0; ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, - buf->dma_addr, buf->size, &buf->attrs); + buf->dma_addr, buf->size, buf->attrs); if (ret) { pr_err("Remapping memory failed, error: %d\n", ret); @@ -372,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) } ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, - buf->size, &buf->attrs); + buf->size, buf->attrs); if (ret < 0) { dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); kfree(sgt); @@ -421,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv) struct page **pages; if (sgt) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* * No need to sync to CPU, it's already synced to the CPU * since the finish() memop will have been called before this. */ dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); pages = frame_vector_pages(buf->vec); /* sgt should exist only if vector contains pages... */ BUG_ON(IS_ERR(pages)); @@ -484,9 +481,6 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, struct sg_table *sgt; unsigned long contig_size; unsigned long dma_align = dma_get_cache_alignment(); - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); /* Only cache aligned DMA transfers are reliable */ if (!IS_ALIGNED(vaddr | size, dma_align)) { @@ -548,7 +542,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (sgt->nents <= 0) { pr_err("failed to map scatterlist\n"); ret = -EIO; @@ -572,7 +566,7 @@ out: fail_map_sg: dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); fail_sgt_init: sg_free_table(sgt); diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c index a39db8a6db7a..bd82d709ee82 100644 --- a/drivers/media/v4l2-core/videobuf2-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c @@ -95,7 +95,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, return 0; } -static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs, +static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { @@ -103,9 +103,6 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at struct sg_table *sgt; int ret; int num_pages; - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); if (WARN_ON(dev == NULL)) return NULL; @@ -144,7 +141,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (!sgt->nents) goto fail_map; @@ -179,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv) int i = buf->num_pages; if (atomic_dec_and_test(&buf->refcount)) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); @@ -228,10 +222,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, { struct vb2_dma_sg_buf *buf; struct sg_table *sgt; - DEFINE_DMA_ATTRS(attrs); struct frame_vector *vec; - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); buf = kzalloc(sizeof *buf, GFP_KERNEL); if (!buf) return NULL; @@ -262,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, * prepare() memop is called. */ sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, - buf->dma_dir, &attrs); + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); if (!sgt->nents) goto userptr_fail_map; @@ -286,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) struct vb2_dma_sg_buf *buf = buf_priv; struct sg_table *sgt = &buf->sg_table; int i = buf->num_pages; - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); dprintk(1, "%s: Releasing userspace buffer of %d pages\n", __func__, buf->num_pages); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, - &attrs); + DMA_ATTR_SKIP_CPU_SYNC); if (buf->vaddr) vm_unmap_ram(buf->vaddr, buf->num_pages); sg_free_table(buf->dma_sgt); diff --git a/drivers/media/v4l2-core/videobuf2-vmalloc.c b/drivers/media/v4l2-core/videobuf2-vmalloc.c index 7e8a07ed8d82..c2820a6e164d 100644 --- a/drivers/media/v4l2-core/videobuf2-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf2-vmalloc.c @@ -33,7 +33,7 @@ struct vb2_vmalloc_buf { static void vb2_vmalloc_put(void *buf_priv); -static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs, +static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, unsigned long size, enum dma_data_direction dma_dir, gfp_t gfp_flags) { diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index e047efd83f57..9599d732aff3 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -38,7 +38,7 @@ static inline struct mic_device *vpdev_to_mdev(struct device *dev) static dma_addr_t _mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs) + enum dma_data_direction dir, unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = vpdev_to_mdev(dev); @@ -48,7 +48,7 @@ _mic_dma_map_page(struct device *dev, struct page *page, static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = vpdev_to_mdev(dev); @@ -144,7 +144,7 @@ static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) static void *__mic_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -164,7 +164,7 @@ static void *__mic_dma_alloc(struct device *dev, size_t size, } static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -176,7 +176,7 @@ static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, static dma_addr_t __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct scif_hw_dev *scdev = dev_get_drvdata(dev); @@ -188,7 +188,7 @@ __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, static void __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -198,7 +198,7 @@ __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -229,7 +229,7 @@ err: static void __mic_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct scif_hw_dev *scdev = dev_get_drvdata(dev); struct mic_device *mdev = scdev_to_mdev(scdev); @@ -327,7 +327,7 @@ static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) static dma_addr_t mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { void *va = phys_to_virt(page_to_phys(page)) + offset; struct mic_device *mdev = dev_get_drvdata(dev->parent); @@ -338,7 +338,7 @@ mic_dma_map_page(struct device *dev, struct page *page, static void mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct mic_device *mdev = dev_get_drvdata(dev->parent); mic_unmap_single(mdev, dma_addr, size); diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index e24b05996a1b..3ed6238f8f6e 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -790,7 +790,7 @@ ccio_map_single(struct device *dev, void *addr, size_t size, static dma_addr_t ccio_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return ccio_map_single(dev, page_address(page) + offset, size, direction); @@ -806,7 +806,7 @@ ccio_map_page(struct device *dev, struct page *page, unsigned long offset, */ static void ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; unsigned long flags; @@ -844,7 +844,7 @@ ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, */ static void * ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { void *ret; #if 0 @@ -878,9 +878,9 @@ ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, */ static void ccio_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - ccio_unmap_page(dev, dma_handle, size, 0, NULL); + ccio_unmap_page(dev, dma_handle, size, 0, 0); free_pages((unsigned long)cpu_addr, get_order(size)); } @@ -907,7 +907,7 @@ ccio_free(struct device *dev, size_t size, void *cpu_addr, */ static int ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -984,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, */ static void ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; @@ -1004,7 +1004,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; #endif ccio_unmap_page(dev, sg_dma_address(sglist), - sg_dma_len(sglist), direction, NULL); + sg_dma_len(sglist), direction, 0); ++sglist; } diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 42ec4600b7e4..151b86b6d2e2 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -783,7 +783,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, static dma_addr_t sba_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long attrs) { return sba_map_single(dev, page_address(page) + offset, size, direction); @@ -801,7 +801,7 @@ sba_map_page(struct device *dev, struct page *page, unsigned long offset, */ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; #if DELAYED_RESOURCE_CNT > 0 @@ -876,7 +876,7 @@ sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, * See Documentation/DMA-API-HOWTO.txt */ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, struct dma_attrs *attrs) + gfp_t gfp, unsigned long attrs) { void *ret; @@ -908,9 +908,9 @@ static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle */ static void sba_free(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle, struct dma_attrs *attrs) + dma_addr_t dma_handle, unsigned long attrs) { - sba_unmap_page(hwdev, dma_handle, size, 0, NULL); + sba_unmap_page(hwdev, dma_handle, size, 0, 0); free_pages((unsigned long) vaddr, get_order(size)); } @@ -943,7 +943,7 @@ int dump_run_sg = 0; */ static int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; int coalesced, filled = 0; @@ -1026,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, */ static void sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, - enum dma_data_direction direction, struct dma_attrs *attrs) + enum dma_data_direction direction, unsigned long attrs) { struct ioc *ioc; #ifdef ASSERT_PDIR_SANITY @@ -1051,7 +1051,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, while (sg_dma_len(sglist) && nents--) { sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), - direction, NULL); + direction, 0); #ifdef SBA_COLLECT_STATS ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; ioc->usingle_calls--; /* kluge since call is unmap_sg() */ diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c index 24791886219a..2a1b2c7d8f2c 100644 --- a/drivers/remoteproc/qcom_q6v5_pil.c +++ b/drivers/remoteproc/qcom_q6v5_pil.c @@ -349,13 +349,12 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc, static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) { - DEFINE_DMA_ATTRS(attrs); + unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; void *ptr; int ret; - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); - ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); + ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); if (!ptr) { dev_err(qproc->dev, "failed to allocate mdt buffer\n"); return -ENOMEM; @@ -372,7 +371,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) else if (ret < 0) dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); - dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); + dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); return ret < 0 ? ret : 0; } diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 2fb90cb6803f..1d7c012f09db 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -1332,7 +1332,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi) } dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle, - &rg->attrs); + rg->attrs); rg->token = NULL; rg->vaddr = NULL; @@ -1370,7 +1370,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, struct omapfb2_device *fbdev = ofbi->fbdev; struct omapfb2_mem_region *rg; void *token; - DEFINE_DMA_ATTRS(attrs); + unsigned long attrs; dma_addr_t dma_handle; int r; @@ -1386,15 +1386,15 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, size = PAGE_ALIGN(size); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + attrs = DMA_ATTR_WRITE_COMBINE; if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); token = dma_alloc_attrs(fbdev->dev, size, &dma_handle, - GFP_KERNEL, &attrs); + GFP_KERNEL, attrs); if (token == NULL) { dev_err(fbdev->dev, "failed to allocate framebuffer\n"); @@ -1408,7 +1408,7 @@ static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size, r = omap_vrfb_request_ctx(&rg->vrfb); if (r) { dma_free_attrs(fbdev->dev, size, token, dma_handle, - &attrs); + attrs); dev_err(fbdev->dev, "vrfb create ctx failed\n"); return r; } diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h index bcb9ff4a607d..555487d6dbea 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb.h +++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h @@ -28,7 +28,6 @@ #endif #include -#include #include #include