cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

at_xdmac.c (71267B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
      4 *
      5 * Copyright (C) 2014 Atmel Corporation
      6 *
      7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
      8 */
      9
     10#include <asm/barrier.h>
     11#include <dt-bindings/dma/at91.h>
     12#include <linux/clk.h>
     13#include <linux/dmaengine.h>
     14#include <linux/dmapool.h>
     15#include <linux/interrupt.h>
     16#include <linux/irq.h>
     17#include <linux/kernel.h>
     18#include <linux/list.h>
     19#include <linux/module.h>
     20#include <linux/of_dma.h>
     21#include <linux/of_platform.h>
     22#include <linux/platform_device.h>
     23#include <linux/pm.h>
     24
     25#include "dmaengine.h"
     26
     27/* Global registers */
     28#define AT_XDMAC_GTYPE		0x00	/* Global Type Register */
     29#define		AT_XDMAC_NB_CH(i)	(((i) & 0x1F) + 1)		/* Number of Channels Minus One */
     30#define		AT_XDMAC_FIFO_SZ(i)	(((i) >> 5) & 0x7FF)		/* Number of Bytes */
     31#define		AT_XDMAC_NB_REQ(i)	((((i) >> 16) & 0x3F) + 1)	/* Number of Peripheral Requests Minus One */
     32#define AT_XDMAC_GCFG		0x04	/* Global Configuration Register */
     33#define		AT_XDMAC_WRHP(i)		(((i) & 0xF) << 4)
     34#define		AT_XDMAC_WRMP(i)		(((i) & 0xF) << 8)
     35#define		AT_XDMAC_WRLP(i)		(((i) & 0xF) << 12)
     36#define		AT_XDMAC_RDHP(i)		(((i) & 0xF) << 16)
     37#define		AT_XDMAC_RDMP(i)		(((i) & 0xF) << 20)
     38#define		AT_XDMAC_RDLP(i)		(((i) & 0xF) << 24)
     39#define		AT_XDMAC_RDSG(i)		(((i) & 0xF) << 28)
     40#define AT_XDMAC_GCFG_M2M	(AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
     41#define AT_XDMAC_GCFG_P2M	(AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
     42				AT_XDMAC_WRHP(0x5))
     43#define AT_XDMAC_GWAC		0x08	/* Global Weighted Arbiter Configuration Register */
     44#define		AT_XDMAC_PW0(i)		(((i) & 0xF) << 0)
     45#define		AT_XDMAC_PW1(i)		(((i) & 0xF) << 4)
     46#define		AT_XDMAC_PW2(i)		(((i) & 0xF) << 8)
     47#define		AT_XDMAC_PW3(i)		(((i) & 0xF) << 12)
     48#define AT_XDMAC_GWAC_M2M	0
     49#define AT_XDMAC_GWAC_P2M	(AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
     50
     51#define AT_XDMAC_GIE		0x0C	/* Global Interrupt Enable Register */
     52#define AT_XDMAC_GID		0x10	/* Global Interrupt Disable Register */
     53#define AT_XDMAC_GIM		0x14	/* Global Interrupt Mask Register */
     54#define AT_XDMAC_GIS		0x18	/* Global Interrupt Status Register */
     55#define AT_XDMAC_GE		0x1C	/* Global Channel Enable Register */
     56#define AT_XDMAC_GD		0x20	/* Global Channel Disable Register */
     57#define AT_XDMAC_GS		0x24	/* Global Channel Status Register */
     58#define AT_XDMAC_VERSION	0xFFC	/* XDMAC Version Register */
     59
     60/* Channel relative registers offsets */
     61#define AT_XDMAC_CIE		0x00	/* Channel Interrupt Enable Register */
     62#define		AT_XDMAC_CIE_BIE	BIT(0)	/* End of Block Interrupt Enable Bit */
     63#define		AT_XDMAC_CIE_LIE	BIT(1)	/* End of Linked List Interrupt Enable Bit */
     64#define		AT_XDMAC_CIE_DIE	BIT(2)	/* End of Disable Interrupt Enable Bit */
     65#define		AT_XDMAC_CIE_FIE	BIT(3)	/* End of Flush Interrupt Enable Bit */
     66#define		AT_XDMAC_CIE_RBEIE	BIT(4)	/* Read Bus Error Interrupt Enable Bit */
     67#define		AT_XDMAC_CIE_WBEIE	BIT(5)	/* Write Bus Error Interrupt Enable Bit */
     68#define		AT_XDMAC_CIE_ROIE	BIT(6)	/* Request Overflow Interrupt Enable Bit */
     69#define AT_XDMAC_CID		0x04	/* Channel Interrupt Disable Register */
     70#define		AT_XDMAC_CID_BID	BIT(0)	/* End of Block Interrupt Disable Bit */
     71#define		AT_XDMAC_CID_LID	BIT(1)	/* End of Linked List Interrupt Disable Bit */
     72#define		AT_XDMAC_CID_DID	BIT(2)	/* End of Disable Interrupt Disable Bit */
     73#define		AT_XDMAC_CID_FID	BIT(3)	/* End of Flush Interrupt Disable Bit */
     74#define		AT_XDMAC_CID_RBEID	BIT(4)	/* Read Bus Error Interrupt Disable Bit */
     75#define		AT_XDMAC_CID_WBEID	BIT(5)	/* Write Bus Error Interrupt Disable Bit */
     76#define		AT_XDMAC_CID_ROID	BIT(6)	/* Request Overflow Interrupt Disable Bit */
     77#define AT_XDMAC_CIM		0x08	/* Channel Interrupt Mask Register */
     78#define		AT_XDMAC_CIM_BIM	BIT(0)	/* End of Block Interrupt Mask Bit */
     79#define		AT_XDMAC_CIM_LIM	BIT(1)	/* End of Linked List Interrupt Mask Bit */
     80#define		AT_XDMAC_CIM_DIM	BIT(2)	/* End of Disable Interrupt Mask Bit */
     81#define		AT_XDMAC_CIM_FIM	BIT(3)	/* End of Flush Interrupt Mask Bit */
     82#define		AT_XDMAC_CIM_RBEIM	BIT(4)	/* Read Bus Error Interrupt Mask Bit */
     83#define		AT_XDMAC_CIM_WBEIM	BIT(5)	/* Write Bus Error Interrupt Mask Bit */
     84#define		AT_XDMAC_CIM_ROIM	BIT(6)	/* Request Overflow Interrupt Mask Bit */
     85#define AT_XDMAC_CIS		0x0C	/* Channel Interrupt Status Register */
     86#define		AT_XDMAC_CIS_BIS	BIT(0)	/* End of Block Interrupt Status Bit */
     87#define		AT_XDMAC_CIS_LIS	BIT(1)	/* End of Linked List Interrupt Status Bit */
     88#define		AT_XDMAC_CIS_DIS	BIT(2)	/* End of Disable Interrupt Status Bit */
     89#define		AT_XDMAC_CIS_FIS	BIT(3)	/* End of Flush Interrupt Status Bit */
     90#define		AT_XDMAC_CIS_RBEIS	BIT(4)	/* Read Bus Error Interrupt Status Bit */
     91#define		AT_XDMAC_CIS_WBEIS	BIT(5)	/* Write Bus Error Interrupt Status Bit */
     92#define		AT_XDMAC_CIS_ROIS	BIT(6)	/* Request Overflow Interrupt Status Bit */
     93#define AT_XDMAC_CSA		0x10	/* Channel Source Address Register */
     94#define AT_XDMAC_CDA		0x14	/* Channel Destination Address Register */
     95#define AT_XDMAC_CNDA		0x18	/* Channel Next Descriptor Address Register */
     96#define		AT_XDMAC_CNDA_NDAIF(i)	((i) & 0x1)			/* Channel x Next Descriptor Interface */
     97#define		AT_XDMAC_CNDA_NDA(i)	((i) & 0xfffffffc)		/* Channel x Next Descriptor Address */
     98#define AT_XDMAC_CNDC		0x1C	/* Channel Next Descriptor Control Register */
     99#define		AT_XDMAC_CNDC_NDE		(0x1 << 0)		/* Channel x Next Descriptor Enable */
    100#define		AT_XDMAC_CNDC_NDSUP		(0x1 << 1)		/* Channel x Next Descriptor Source Update */
    101#define		AT_XDMAC_CNDC_NDDUP		(0x1 << 2)		/* Channel x Next Descriptor Destination Update */
    102#define		AT_XDMAC_CNDC_NDVIEW_MASK	GENMASK(28, 27)
    103#define		AT_XDMAC_CNDC_NDVIEW_NDV0	(0x0 << 3)		/* Channel x Next Descriptor View 0 */
    104#define		AT_XDMAC_CNDC_NDVIEW_NDV1	(0x1 << 3)		/* Channel x Next Descriptor View 1 */
    105#define		AT_XDMAC_CNDC_NDVIEW_NDV2	(0x2 << 3)		/* Channel x Next Descriptor View 2 */
    106#define		AT_XDMAC_CNDC_NDVIEW_NDV3	(0x3 << 3)		/* Channel x Next Descriptor View 3 */
    107#define AT_XDMAC_CUBC		0x20	/* Channel Microblock Control Register */
    108#define AT_XDMAC_CBC		0x24	/* Channel Block Control Register */
    109#define AT_XDMAC_CC		0x28	/* Channel Configuration Register */
    110#define		AT_XDMAC_CC_TYPE	(0x1 << 0)	/* Channel Transfer Type */
    111#define			AT_XDMAC_CC_TYPE_MEM_TRAN	(0x0 << 0)	/* Memory to Memory Transfer */
    112#define			AT_XDMAC_CC_TYPE_PER_TRAN	(0x1 << 0)	/* Peripheral to Memory or Memory to Peripheral Transfer */
    113#define		AT_XDMAC_CC_MBSIZE_MASK	(0x3 << 1)
    114#define			AT_XDMAC_CC_MBSIZE_SINGLE	(0x0 << 1)
    115#define			AT_XDMAC_CC_MBSIZE_FOUR		(0x1 << 1)
    116#define			AT_XDMAC_CC_MBSIZE_EIGHT	(0x2 << 1)
    117#define			AT_XDMAC_CC_MBSIZE_SIXTEEN	(0x3 << 1)
    118#define		AT_XDMAC_CC_DSYNC	(0x1 << 4)	/* Channel Synchronization */
    119#define			AT_XDMAC_CC_DSYNC_PER2MEM	(0x0 << 4)
    120#define			AT_XDMAC_CC_DSYNC_MEM2PER	(0x1 << 4)
    121#define		AT_XDMAC_CC_PROT	(0x1 << 5)	/* Channel Protection */
    122#define			AT_XDMAC_CC_PROT_SEC		(0x0 << 5)
    123#define			AT_XDMAC_CC_PROT_UNSEC		(0x1 << 5)
    124#define		AT_XDMAC_CC_SWREQ	(0x1 << 6)	/* Channel Software Request Trigger */
    125#define			AT_XDMAC_CC_SWREQ_HWR_CONNECTED	(0x0 << 6)
    126#define			AT_XDMAC_CC_SWREQ_SWR_CONNECTED	(0x1 << 6)
    127#define		AT_XDMAC_CC_MEMSET	(0x1 << 7)	/* Channel Fill Block of memory */
    128#define			AT_XDMAC_CC_MEMSET_NORMAL_MODE	(0x0 << 7)
    129#define			AT_XDMAC_CC_MEMSET_HW_MODE	(0x1 << 7)
    130#define		AT_XDMAC_CC_CSIZE(i)	((0x7 & (i)) << 8)	/* Channel Chunk Size */
    131#define		AT_XDMAC_CC_DWIDTH_OFFSET	11
    132#define		AT_XDMAC_CC_DWIDTH_MASK	(0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
    133#define		AT_XDMAC_CC_DWIDTH(i)	((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)	/* Channel Data Width */
    134#define			AT_XDMAC_CC_DWIDTH_BYTE		0x0
    135#define			AT_XDMAC_CC_DWIDTH_HALFWORD	0x1
    136#define			AT_XDMAC_CC_DWIDTH_WORD		0x2
    137#define			AT_XDMAC_CC_DWIDTH_DWORD	0x3
    138#define		AT_XDMAC_CC_SIF(i)	((0x1 & (i)) << 13)	/* Channel Source Interface Identifier */
    139#define		AT_XDMAC_CC_DIF(i)	((0x1 & (i)) << 14)	/* Channel Destination Interface Identifier */
    140#define		AT_XDMAC_CC_SAM_MASK	(0x3 << 16)	/* Channel Source Addressing Mode */
    141#define			AT_XDMAC_CC_SAM_FIXED_AM	(0x0 << 16)
    142#define			AT_XDMAC_CC_SAM_INCREMENTED_AM	(0x1 << 16)
    143#define			AT_XDMAC_CC_SAM_UBS_AM		(0x2 << 16)
    144#define			AT_XDMAC_CC_SAM_UBS_DS_AM	(0x3 << 16)
    145#define		AT_XDMAC_CC_DAM_MASK	(0x3 << 18)	/* Channel Source Addressing Mode */
    146#define			AT_XDMAC_CC_DAM_FIXED_AM	(0x0 << 18)
    147#define			AT_XDMAC_CC_DAM_INCREMENTED_AM	(0x1 << 18)
    148#define			AT_XDMAC_CC_DAM_UBS_AM		(0x2 << 18)
    149#define			AT_XDMAC_CC_DAM_UBS_DS_AM	(0x3 << 18)
    150#define		AT_XDMAC_CC_INITD	(0x1 << 21)	/* Channel Initialization Terminated (read only) */
    151#define			AT_XDMAC_CC_INITD_TERMINATED	(0x0 << 21)
    152#define			AT_XDMAC_CC_INITD_IN_PROGRESS	(0x1 << 21)
    153#define		AT_XDMAC_CC_RDIP	(0x1 << 22)	/* Read in Progress (read only) */
    154#define			AT_XDMAC_CC_RDIP_DONE		(0x0 << 22)
    155#define			AT_XDMAC_CC_RDIP_IN_PROGRESS	(0x1 << 22)
    156#define		AT_XDMAC_CC_WRIP	(0x1 << 23)	/* Write in Progress (read only) */
    157#define			AT_XDMAC_CC_WRIP_DONE		(0x0 << 23)
    158#define			AT_XDMAC_CC_WRIP_IN_PROGRESS	(0x1 << 23)
    159#define		AT_XDMAC_CC_PERID(i)	((0x7f & (i)) << 24)	/* Channel Peripheral Identifier */
    160#define AT_XDMAC_CDS_MSP	0x2C	/* Channel Data Stride Memory Set Pattern */
    161#define AT_XDMAC_CSUS		0x30	/* Channel Source Microblock Stride */
    162#define AT_XDMAC_CDUS		0x34	/* Channel Destination Microblock Stride */
    163
    164/* Microblock control members */
    165#define AT_XDMAC_MBR_UBC_UBLEN_MAX	0xFFFFFFUL	/* Maximum Microblock Length */
    166#define AT_XDMAC_MBR_UBC_NDE		(0x1 << 24)	/* Next Descriptor Enable */
    167#define AT_XDMAC_MBR_UBC_NSEN		(0x1 << 25)	/* Next Descriptor Source Update */
    168#define AT_XDMAC_MBR_UBC_NDEN		(0x1 << 26)	/* Next Descriptor Destination Update */
    169#define AT_XDMAC_MBR_UBC_NDV0		(0x0 << 27)	/* Next Descriptor View 0 */
    170#define AT_XDMAC_MBR_UBC_NDV1		(0x1 << 27)	/* Next Descriptor View 1 */
    171#define AT_XDMAC_MBR_UBC_NDV2		(0x2 << 27)	/* Next Descriptor View 2 */
    172#define AT_XDMAC_MBR_UBC_NDV3		(0x3 << 27)	/* Next Descriptor View 3 */
    173
    174#define AT_XDMAC_MAX_CHAN	0x20
    175#define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
    176#define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
    177#define AT_XDMAC_RESIDUE_MAX_RETRIES	5
    178
    179#define AT_XDMAC_DMA_BUSWIDTHS\
    180	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
    181	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
    182	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
    183	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
    184	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
    185
    186enum atc_status {
    187	AT_XDMAC_CHAN_IS_CYCLIC = 0,
    188	AT_XDMAC_CHAN_IS_PAUSED,
    189};
    190
    191struct at_xdmac_layout {
    192	/* Global Channel Read Suspend Register */
    193	u8				grs;
    194	/* Global Write Suspend Register */
    195	u8				gws;
    196	/* Global Channel Read Write Suspend Register */
    197	u8				grws;
    198	/* Global Channel Read Write Resume Register */
    199	u8				grwr;
    200	/* Global Channel Software Request Register */
    201	u8				gswr;
    202	/* Global channel Software Request Status Register */
    203	u8				gsws;
    204	/* Global Channel Software Flush Request Register */
    205	u8				gswf;
    206	/* Channel reg base */
    207	u8				chan_cc_reg_base;
    208	/* Source/Destination Interface must be specified or not */
    209	bool				sdif;
    210	/* AXI queue priority configuration supported */
    211	bool				axi_config;
    212};
    213
    214/* ----- Channels ----- */
    215struct at_xdmac_chan {
    216	struct dma_chan			chan;
    217	void __iomem			*ch_regs;
    218	u32				mask;		/* Channel Mask */
    219	u32				cfg;		/* Channel Configuration Register */
    220	u8				perid;		/* Peripheral ID */
    221	u8				perif;		/* Peripheral Interface */
    222	u8				memif;		/* Memory Interface */
    223	u32				save_cc;
    224	u32				save_cim;
    225	u32				save_cnda;
    226	u32				save_cndc;
    227	u32				irq_status;
    228	unsigned long			status;
    229	struct tasklet_struct		tasklet;
    230	struct dma_slave_config		sconfig;
    231
    232	spinlock_t			lock;
    233
    234	struct list_head		xfers_list;
    235	struct list_head		free_descs_list;
    236};
    237
    238
    239/* ----- Controller ----- */
    240struct at_xdmac {
    241	struct dma_device	dma;
    242	void __iomem		*regs;
    243	int			irq;
    244	struct clk		*clk;
    245	u32			save_gim;
    246	struct dma_pool		*at_xdmac_desc_pool;
    247	const struct at_xdmac_layout	*layout;
    248	struct at_xdmac_chan	chan[];
    249};
    250
    251
    252/* ----- Descriptors ----- */
    253
    254/* Linked List Descriptor */
    255struct at_xdmac_lld {
    256	u32 mbr_nda;	/* Next Descriptor Member */
    257	u32 mbr_ubc;	/* Microblock Control Member */
    258	u32 mbr_sa;	/* Source Address Member */
    259	u32 mbr_da;	/* Destination Address Member */
    260	u32 mbr_cfg;	/* Configuration Register */
    261	u32 mbr_bc;	/* Block Control Register */
    262	u32 mbr_ds;	/* Data Stride Register */
    263	u32 mbr_sus;	/* Source Microblock Stride Register */
    264	u32 mbr_dus;	/* Destination Microblock Stride Register */
    265};
    266
    267/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
    268struct at_xdmac_desc {
    269	struct at_xdmac_lld		lld;
    270	enum dma_transfer_direction	direction;
    271	struct dma_async_tx_descriptor	tx_dma_desc;
    272	struct list_head		desc_node;
    273	/* Following members are only used by the first descriptor */
    274	bool				active_xfer;
    275	unsigned int			xfer_size;
    276	struct list_head		descs_list;
    277	struct list_head		xfer_node;
    278} __aligned(sizeof(u64));
    279
    280static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
    281	.grs = 0x28,
    282	.gws = 0x2C,
    283	.grws = 0x30,
    284	.grwr = 0x34,
    285	.gswr = 0x38,
    286	.gsws = 0x3C,
    287	.gswf = 0x40,
    288	.chan_cc_reg_base = 0x50,
    289	.sdif = true,
    290	.axi_config = false,
    291};
    292
    293static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
    294	.grs = 0x30,
    295	.gws = 0x38,
    296	.grws = 0x40,
    297	.grwr = 0x44,
    298	.gswr = 0x48,
    299	.gsws = 0x4C,
    300	.gswf = 0x50,
    301	.chan_cc_reg_base = 0x60,
    302	.sdif = false,
    303	.axi_config = true,
    304};
    305
    306static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
    307{
    308	return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
    309}
    310
    311#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
    312#define at_xdmac_write(atxdmac, reg, value) \
    313	writel_relaxed((value), (atxdmac)->regs + (reg))
    314
    315#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
    316#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
    317
    318static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
    319{
    320	return container_of(dchan, struct at_xdmac_chan, chan);
    321}
    322
    323static struct device *chan2dev(struct dma_chan *chan)
    324{
    325	return &chan->dev->device;
    326}
    327
    328static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
    329{
    330	return container_of(ddev, struct at_xdmac, dma);
    331}
    332
    333static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
    334{
    335	return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
    336}
    337
    338static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
    339{
    340	return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
    341}
    342
    343static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
    344{
    345	return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
    346}
    347
    348static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
    349{
    350	return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
    351}
    352
    353static inline u8 at_xdmac_get_dwidth(u32 cfg)
    354{
    355	return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
    356};
    357
    358static unsigned int init_nr_desc_per_channel = 64;
    359module_param(init_nr_desc_per_channel, uint, 0644);
    360MODULE_PARM_DESC(init_nr_desc_per_channel,
    361		 "initial descriptors per channel (default: 64)");
    362
    363
    364static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
    365{
    366	return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
    367}
    368
    369static void at_xdmac_off(struct at_xdmac *atxdmac)
    370{
    371	at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
    372
    373	/* Wait that all chans are disabled. */
    374	while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
    375		cpu_relax();
    376
    377	at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
    378}
    379
    380/* Call with lock hold. */
    381static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
    382				struct at_xdmac_desc *first)
    383{
    384	struct at_xdmac	*atxdmac = to_at_xdmac(atchan->chan.device);
    385	u32		reg;
    386
    387	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
    388
    389	/* Set transfer as active to not try to start it again. */
    390	first->active_xfer = true;
    391
    392	/* Tell xdmac where to get the first descriptor. */
    393	reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
    394	if (atxdmac->layout->sdif)
    395		reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
    396
    397	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
    398
    399	/*
    400	 * When doing non cyclic transfer we need to use the next
    401	 * descriptor view 2 since some fields of the configuration register
    402	 * depend on transfer size and src/dest addresses.
    403	 */
    404	if (at_xdmac_chan_is_cyclic(atchan))
    405		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
    406	else if ((first->lld.mbr_ubc &
    407		  AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
    408		reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
    409	else
    410		reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
    411	/*
    412	 * Even if the register will be updated from the configuration in the
    413	 * descriptor when using view 2 or higher, the PROT bit won't be set
    414	 * properly. This bit can be modified only by using the channel
    415	 * configuration register.
    416	 */
    417	at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
    418
    419	reg |= AT_XDMAC_CNDC_NDDUP
    420	       | AT_XDMAC_CNDC_NDSUP
    421	       | AT_XDMAC_CNDC_NDE;
    422	at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
    423
    424	dev_vdbg(chan2dev(&atchan->chan),
    425		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
    426		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
    427		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
    428		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
    429		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
    430		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
    431		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
    432
    433	at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
    434	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
    435	/*
    436	 * Request Overflow Error is only for peripheral synchronized transfers
    437	 */
    438	if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
    439		reg |= AT_XDMAC_CIE_ROIE;
    440
    441	/*
    442	 * There is no end of list when doing cyclic dma, we need to get
    443	 * an interrupt after each periods.
    444	 */
    445	if (at_xdmac_chan_is_cyclic(atchan))
    446		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
    447				    reg | AT_XDMAC_CIE_BIE);
    448	else
    449		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
    450				    reg | AT_XDMAC_CIE_LIE);
    451	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
    452	dev_vdbg(chan2dev(&atchan->chan),
    453		 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
    454	wmb();
    455	at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
    456
    457	dev_vdbg(chan2dev(&atchan->chan),
    458		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
    459		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
    460		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
    461		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
    462		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
    463		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
    464		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
    465
    466}
    467
    468static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
    469{
    470	struct at_xdmac_desc	*desc = txd_to_at_desc(tx);
    471	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(tx->chan);
    472	dma_cookie_t		cookie;
    473	unsigned long		irqflags;
    474
    475	spin_lock_irqsave(&atchan->lock, irqflags);
    476	cookie = dma_cookie_assign(tx);
    477
    478	list_add_tail(&desc->xfer_node, &atchan->xfers_list);
    479	spin_unlock_irqrestore(&atchan->lock, irqflags);
    480
    481	dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
    482		 __func__, atchan, desc);
    483
    484	return cookie;
    485}
    486
    487static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
    488						 gfp_t gfp_flags)
    489{
    490	struct at_xdmac_desc	*desc;
    491	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
    492	dma_addr_t		phys;
    493
    494	desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
    495	if (desc) {
    496		INIT_LIST_HEAD(&desc->descs_list);
    497		dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
    498		desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
    499		desc->tx_dma_desc.phys = phys;
    500	}
    501
    502	return desc;
    503}
    504
    505static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
    506{
    507	memset(&desc->lld, 0, sizeof(desc->lld));
    508	INIT_LIST_HEAD(&desc->descs_list);
    509	desc->direction = DMA_TRANS_NONE;
    510	desc->xfer_size = 0;
    511	desc->active_xfer = false;
    512}
    513
    514/* Call must be protected by lock. */
    515static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
    516{
    517	struct at_xdmac_desc *desc;
    518
    519	if (list_empty(&atchan->free_descs_list)) {
    520		desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
    521	} else {
    522		desc = list_first_entry(&atchan->free_descs_list,
    523					struct at_xdmac_desc, desc_node);
    524		list_del(&desc->desc_node);
    525		at_xdmac_init_used_desc(desc);
    526	}
    527
    528	return desc;
    529}
    530
    531static void at_xdmac_queue_desc(struct dma_chan *chan,
    532				struct at_xdmac_desc *prev,
    533				struct at_xdmac_desc *desc)
    534{
    535	if (!prev || !desc)
    536		return;
    537
    538	prev->lld.mbr_nda = desc->tx_dma_desc.phys;
    539	prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
    540
    541	dev_dbg(chan2dev(chan),	"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
    542		__func__, prev, &prev->lld.mbr_nda);
    543}
    544
    545static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
    546						  struct at_xdmac_desc *desc)
    547{
    548	if (!desc)
    549		return;
    550
    551	desc->lld.mbr_bc++;
    552
    553	dev_dbg(chan2dev(chan),
    554		"%s: incrementing the block count of the desc 0x%p\n",
    555		__func__, desc);
    556}
    557
    558static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
    559				       struct of_dma *of_dma)
    560{
    561	struct at_xdmac		*atxdmac = of_dma->of_dma_data;
    562	struct at_xdmac_chan	*atchan;
    563	struct dma_chan		*chan;
    564	struct device		*dev = atxdmac->dma.dev;
    565
    566	if (dma_spec->args_count != 1) {
    567		dev_err(dev, "dma phandler args: bad number of args\n");
    568		return NULL;
    569	}
    570
    571	chan = dma_get_any_slave_channel(&atxdmac->dma);
    572	if (!chan) {
    573		dev_err(dev, "can't get a dma channel\n");
    574		return NULL;
    575	}
    576
    577	atchan = to_at_xdmac_chan(chan);
    578	atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
    579	atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
    580	atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
    581	dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
    582		 atchan->memif, atchan->perif, atchan->perid);
    583
    584	return chan;
    585}
    586
    587static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
    588				      enum dma_transfer_direction direction)
    589{
    590	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
    591	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
    592	int			csize, dwidth;
    593
    594	if (direction == DMA_DEV_TO_MEM) {
    595		atchan->cfg =
    596			AT91_XDMAC_DT_PERID(atchan->perid)
    597			| AT_XDMAC_CC_DAM_INCREMENTED_AM
    598			| AT_XDMAC_CC_SAM_FIXED_AM
    599			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
    600			| AT_XDMAC_CC_DSYNC_PER2MEM
    601			| AT_XDMAC_CC_MBSIZE_SIXTEEN
    602			| AT_XDMAC_CC_TYPE_PER_TRAN;
    603		if (atxdmac->layout->sdif)
    604			atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
    605				       AT_XDMAC_CC_SIF(atchan->perif);
    606
    607		csize = ffs(atchan->sconfig.src_maxburst) - 1;
    608		if (csize < 0) {
    609			dev_err(chan2dev(chan), "invalid src maxburst value\n");
    610			return -EINVAL;
    611		}
    612		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
    613		dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
    614		if (dwidth < 0) {
    615			dev_err(chan2dev(chan), "invalid src addr width value\n");
    616			return -EINVAL;
    617		}
    618		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
    619	} else if (direction == DMA_MEM_TO_DEV) {
    620		atchan->cfg =
    621			AT91_XDMAC_DT_PERID(atchan->perid)
    622			| AT_XDMAC_CC_DAM_FIXED_AM
    623			| AT_XDMAC_CC_SAM_INCREMENTED_AM
    624			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
    625			| AT_XDMAC_CC_DSYNC_MEM2PER
    626			| AT_XDMAC_CC_MBSIZE_SIXTEEN
    627			| AT_XDMAC_CC_TYPE_PER_TRAN;
    628		if (atxdmac->layout->sdif)
    629			atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
    630				       AT_XDMAC_CC_SIF(atchan->memif);
    631
    632		csize = ffs(atchan->sconfig.dst_maxburst) - 1;
    633		if (csize < 0) {
    634			dev_err(chan2dev(chan), "invalid src maxburst value\n");
    635			return -EINVAL;
    636		}
    637		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
    638		dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
    639		if (dwidth < 0) {
    640			dev_err(chan2dev(chan), "invalid dst addr width value\n");
    641			return -EINVAL;
    642		}
    643		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
    644	}
    645
    646	dev_dbg(chan2dev(chan),	"%s: cfg=0x%08x\n", __func__, atchan->cfg);
    647
    648	return 0;
    649}
    650
    651/*
    652 * Only check that maxburst and addr width values are supported by the
    653 * the controller but not that the configuration is good to perform the
    654 * transfer since we don't know the direction at this stage.
    655 */
    656static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
    657{
    658	if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
    659	    || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
    660		return -EINVAL;
    661
    662	if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
    663	    || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
    664		return -EINVAL;
    665
    666	return 0;
    667}
    668
    669static int at_xdmac_set_slave_config(struct dma_chan *chan,
    670				      struct dma_slave_config *sconfig)
    671{
    672	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
    673
    674	if (at_xdmac_check_slave_config(sconfig)) {
    675		dev_err(chan2dev(chan), "invalid slave configuration\n");
    676		return -EINVAL;
    677	}
    678
    679	memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
    680
    681	return 0;
    682}
    683
    684static struct dma_async_tx_descriptor *
    685at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
    686		       unsigned int sg_len, enum dma_transfer_direction direction,
    687		       unsigned long flags, void *context)
    688{
    689	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
    690	struct at_xdmac_desc		*first = NULL, *prev = NULL;
    691	struct scatterlist		*sg;
    692	int				i;
    693	unsigned int			xfer_size = 0;
    694	unsigned long			irqflags;
    695	struct dma_async_tx_descriptor	*ret = NULL;
    696
    697	if (!sgl)
    698		return NULL;
    699
    700	if (!is_slave_direction(direction)) {
    701		dev_err(chan2dev(chan), "invalid DMA direction\n");
    702		return NULL;
    703	}
    704
    705	dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
    706		 __func__, sg_len,
    707		 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
    708		 flags);
    709
    710	/* Protect dma_sconfig field that can be modified by set_slave_conf. */
    711	spin_lock_irqsave(&atchan->lock, irqflags);
    712
    713	if (at_xdmac_compute_chan_conf(chan, direction))
    714		goto spin_unlock;
    715
    716	/* Prepare descriptors. */
    717	for_each_sg(sgl, sg, sg_len, i) {
    718		struct at_xdmac_desc	*desc = NULL;
    719		u32			len, mem, dwidth, fixed_dwidth;
    720
    721		len = sg_dma_len(sg);
    722		mem = sg_dma_address(sg);
    723		if (unlikely(!len)) {
    724			dev_err(chan2dev(chan), "sg data length is zero\n");
    725			goto spin_unlock;
    726		}
    727		dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
    728			 __func__, i, len, mem);
    729
    730		desc = at_xdmac_get_desc(atchan);
    731		if (!desc) {
    732			dev_err(chan2dev(chan), "can't get descriptor\n");
    733			if (first)
    734				list_splice_tail_init(&first->descs_list,
    735						      &atchan->free_descs_list);
    736			goto spin_unlock;
    737		}
    738
    739		/* Linked list descriptor setup. */
    740		if (direction == DMA_DEV_TO_MEM) {
    741			desc->lld.mbr_sa = atchan->sconfig.src_addr;
    742			desc->lld.mbr_da = mem;
    743		} else {
    744			desc->lld.mbr_sa = mem;
    745			desc->lld.mbr_da = atchan->sconfig.dst_addr;
    746		}
    747		dwidth = at_xdmac_get_dwidth(atchan->cfg);
    748		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
    749			       ? dwidth
    750			       : AT_XDMAC_CC_DWIDTH_BYTE;
    751		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
    752			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
    753			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
    754			| (len >> fixed_dwidth);				/* microblock length */
    755		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
    756				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
    757		dev_dbg(chan2dev(chan),
    758			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
    759			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
    760
    761		/* Chain lld. */
    762		if (prev)
    763			at_xdmac_queue_desc(chan, prev, desc);
    764
    765		prev = desc;
    766		if (!first)
    767			first = desc;
    768
    769		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
    770			 __func__, desc, first);
    771		list_add_tail(&desc->desc_node, &first->descs_list);
    772		xfer_size += len;
    773	}
    774
    775
    776	first->tx_dma_desc.flags = flags;
    777	first->xfer_size = xfer_size;
    778	first->direction = direction;
    779	ret = &first->tx_dma_desc;
    780
    781spin_unlock:
    782	spin_unlock_irqrestore(&atchan->lock, irqflags);
    783	return ret;
    784}
    785
    786static struct dma_async_tx_descriptor *
    787at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
    788			 size_t buf_len, size_t period_len,
    789			 enum dma_transfer_direction direction,
    790			 unsigned long flags)
    791{
    792	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
    793	struct at_xdmac_desc	*first = NULL, *prev = NULL;
    794	unsigned int		periods = buf_len / period_len;
    795	int			i;
    796	unsigned long		irqflags;
    797
    798	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
    799		__func__, &buf_addr, buf_len, period_len,
    800		direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
    801
    802	if (!is_slave_direction(direction)) {
    803		dev_err(chan2dev(chan), "invalid DMA direction\n");
    804		return NULL;
    805	}
    806
    807	if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
    808		dev_err(chan2dev(chan), "channel currently used\n");
    809		return NULL;
    810	}
    811
    812	if (at_xdmac_compute_chan_conf(chan, direction))
    813		return NULL;
    814
    815	for (i = 0; i < periods; i++) {
    816		struct at_xdmac_desc	*desc = NULL;
    817
    818		spin_lock_irqsave(&atchan->lock, irqflags);
    819		desc = at_xdmac_get_desc(atchan);
    820		if (!desc) {
    821			dev_err(chan2dev(chan), "can't get descriptor\n");
    822			if (first)
    823				list_splice_tail_init(&first->descs_list,
    824						      &atchan->free_descs_list);
    825			spin_unlock_irqrestore(&atchan->lock, irqflags);
    826			return NULL;
    827		}
    828		spin_unlock_irqrestore(&atchan->lock, irqflags);
    829		dev_dbg(chan2dev(chan),
    830			"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
    831			__func__, desc, &desc->tx_dma_desc.phys);
    832
    833		if (direction == DMA_DEV_TO_MEM) {
    834			desc->lld.mbr_sa = atchan->sconfig.src_addr;
    835			desc->lld.mbr_da = buf_addr + i * period_len;
    836		} else {
    837			desc->lld.mbr_sa = buf_addr + i * period_len;
    838			desc->lld.mbr_da = atchan->sconfig.dst_addr;
    839		}
    840		desc->lld.mbr_cfg = atchan->cfg;
    841		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
    842			| AT_XDMAC_MBR_UBC_NDEN
    843			| AT_XDMAC_MBR_UBC_NSEN
    844			| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
    845
    846		dev_dbg(chan2dev(chan),
    847			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
    848			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
    849
    850		/* Chain lld. */
    851		if (prev)
    852			at_xdmac_queue_desc(chan, prev, desc);
    853
    854		prev = desc;
    855		if (!first)
    856			first = desc;
    857
    858		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
    859			 __func__, desc, first);
    860		list_add_tail(&desc->desc_node, &first->descs_list);
    861	}
    862
    863	at_xdmac_queue_desc(chan, prev, first);
    864	first->tx_dma_desc.flags = flags;
    865	first->xfer_size = buf_len;
    866	first->direction = direction;
    867
    868	return &first->tx_dma_desc;
    869}
    870
    871static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
    872{
    873	u32 width;
    874
    875	/*
    876	 * Check address alignment to select the greater data width we
    877	 * can use.
    878	 *
    879	 * Some XDMAC implementations don't provide dword transfer, in
    880	 * this case selecting dword has the same behavior as
    881	 * selecting word transfers.
    882	 */
    883	if (!(addr & 7)) {
    884		width = AT_XDMAC_CC_DWIDTH_DWORD;
    885		dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
    886	} else if (!(addr & 3)) {
    887		width = AT_XDMAC_CC_DWIDTH_WORD;
    888		dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
    889	} else if (!(addr & 1)) {
    890		width = AT_XDMAC_CC_DWIDTH_HALFWORD;
    891		dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
    892	} else {
    893		width = AT_XDMAC_CC_DWIDTH_BYTE;
    894		dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
    895	}
    896
    897	return width;
    898}
    899
    900static struct at_xdmac_desc *
    901at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
    902				struct at_xdmac_chan *atchan,
    903				struct at_xdmac_desc *prev,
    904				dma_addr_t src, dma_addr_t dst,
    905				struct dma_interleaved_template *xt,
    906				struct data_chunk *chunk)
    907{
    908	struct at_xdmac_desc	*desc;
    909	u32			dwidth;
    910	unsigned long		flags;
    911	size_t			ublen;
    912	/*
    913	 * WARNING: The channel configuration is set here since there is no
    914	 * dmaengine_slave_config call in this case. Moreover we don't know the
    915	 * direction, it involves we can't dynamically set the source and dest
    916	 * interface so we have to use the same one. Only interface 0 allows EBI
    917	 * access. Hopefully we can access DDR through both ports (at least on
    918	 * SAMA5D4x), so we can use the same interface for source and dest,
    919	 * that solves the fact we don't know the direction.
    920	 * ERRATA: Even if useless for memory transfers, the PERID has to not
    921	 * match the one of another channel. If not, it could lead to spurious
    922	 * flag status.
    923	 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
    924	 * Thus, no need to have the SIF/DIF interfaces here.
    925	 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
    926	 * zero.
    927	 */
    928	u32			chan_cc = AT_XDMAC_CC_PERID(0x7f)
    929					| AT_XDMAC_CC_MBSIZE_SIXTEEN
    930					| AT_XDMAC_CC_TYPE_MEM_TRAN;
    931
    932	dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
    933	if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
    934		dev_dbg(chan2dev(chan),
    935			"%s: chunk too big (%zu, max size %lu)...\n",
    936			__func__, chunk->size,
    937			AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
    938		return NULL;
    939	}
    940
    941	if (prev)
    942		dev_dbg(chan2dev(chan),
    943			"Adding items at the end of desc 0x%p\n", prev);
    944
    945	if (xt->src_inc) {
    946		if (xt->src_sgl)
    947			chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
    948		else
    949			chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
    950	}
    951
    952	if (xt->dst_inc) {
    953		if (xt->dst_sgl)
    954			chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
    955		else
    956			chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
    957	}
    958
    959	spin_lock_irqsave(&atchan->lock, flags);
    960	desc = at_xdmac_get_desc(atchan);
    961	spin_unlock_irqrestore(&atchan->lock, flags);
    962	if (!desc) {
    963		dev_err(chan2dev(chan), "can't get descriptor\n");
    964		return NULL;
    965	}
    966
    967	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
    968
    969	ublen = chunk->size >> dwidth;
    970
    971	desc->lld.mbr_sa = src;
    972	desc->lld.mbr_da = dst;
    973	desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
    974	desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
    975
    976	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
    977		| AT_XDMAC_MBR_UBC_NDEN
    978		| AT_XDMAC_MBR_UBC_NSEN
    979		| ublen;
    980	desc->lld.mbr_cfg = chan_cc;
    981
    982	dev_dbg(chan2dev(chan),
    983		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
    984		__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
    985		desc->lld.mbr_ubc, desc->lld.mbr_cfg);
    986
    987	/* Chain lld. */
    988	if (prev)
    989		at_xdmac_queue_desc(chan, prev, desc);
    990
    991	return desc;
    992}
    993
    994static struct dma_async_tx_descriptor *
    995at_xdmac_prep_interleaved(struct dma_chan *chan,
    996			  struct dma_interleaved_template *xt,
    997			  unsigned long flags)
    998{
    999	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1000	struct at_xdmac_desc	*prev = NULL, *first = NULL;
   1001	dma_addr_t		dst_addr, src_addr;
   1002	size_t			src_skip = 0, dst_skip = 0, len = 0;
   1003	struct data_chunk	*chunk;
   1004	int			i;
   1005
   1006	if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
   1007		return NULL;
   1008
   1009	/*
   1010	 * TODO: Handle the case where we have to repeat a chain of
   1011	 * descriptors...
   1012	 */
   1013	if ((xt->numf > 1) && (xt->frame_size > 1))
   1014		return NULL;
   1015
   1016	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
   1017		__func__, &xt->src_start, &xt->dst_start,	xt->numf,
   1018		xt->frame_size, flags);
   1019
   1020	src_addr = xt->src_start;
   1021	dst_addr = xt->dst_start;
   1022
   1023	if (xt->numf > 1) {
   1024		first = at_xdmac_interleaved_queue_desc(chan, atchan,
   1025							NULL,
   1026							src_addr, dst_addr,
   1027							xt, xt->sgl);
   1028
   1029		/* Length of the block is (BLEN+1) microblocks. */
   1030		for (i = 0; i < xt->numf - 1; i++)
   1031			at_xdmac_increment_block_count(chan, first);
   1032
   1033		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
   1034			__func__, first, first);
   1035		list_add_tail(&first->desc_node, &first->descs_list);
   1036	} else {
   1037		for (i = 0; i < xt->frame_size; i++) {
   1038			size_t src_icg = 0, dst_icg = 0;
   1039			struct at_xdmac_desc *desc;
   1040
   1041			chunk = xt->sgl + i;
   1042
   1043			dst_icg = dmaengine_get_dst_icg(xt, chunk);
   1044			src_icg = dmaengine_get_src_icg(xt, chunk);
   1045
   1046			src_skip = chunk->size + src_icg;
   1047			dst_skip = chunk->size + dst_icg;
   1048
   1049			dev_dbg(chan2dev(chan),
   1050				"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
   1051				__func__, chunk->size, src_icg, dst_icg);
   1052
   1053			desc = at_xdmac_interleaved_queue_desc(chan, atchan,
   1054							       prev,
   1055							       src_addr, dst_addr,
   1056							       xt, chunk);
   1057			if (!desc) {
   1058				list_splice_tail_init(&first->descs_list,
   1059						      &atchan->free_descs_list);
   1060				return NULL;
   1061			}
   1062
   1063			if (!first)
   1064				first = desc;
   1065
   1066			dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
   1067				__func__, desc, first);
   1068			list_add_tail(&desc->desc_node, &first->descs_list);
   1069
   1070			if (xt->src_sgl)
   1071				src_addr += src_skip;
   1072
   1073			if (xt->dst_sgl)
   1074				dst_addr += dst_skip;
   1075
   1076			len += chunk->size;
   1077			prev = desc;
   1078		}
   1079	}
   1080
   1081	first->tx_dma_desc.cookie = -EBUSY;
   1082	first->tx_dma_desc.flags = flags;
   1083	first->xfer_size = len;
   1084
   1085	return &first->tx_dma_desc;
   1086}
   1087
   1088static struct dma_async_tx_descriptor *
   1089at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
   1090			 size_t len, unsigned long flags)
   1091{
   1092	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1093	struct at_xdmac_desc	*first = NULL, *prev = NULL;
   1094	size_t			remaining_size = len, xfer_size = 0, ublen;
   1095	dma_addr_t		src_addr = src, dst_addr = dest;
   1096	u32			dwidth;
   1097	/*
   1098	 * WARNING: We don't know the direction, it involves we can't
   1099	 * dynamically set the source and dest interface so we have to use the
   1100	 * same one. Only interface 0 allows EBI access. Hopefully we can
   1101	 * access DDR through both ports (at least on SAMA5D4x), so we can use
   1102	 * the same interface for source and dest, that solves the fact we
   1103	 * don't know the direction.
   1104	 * ERRATA: Even if useless for memory transfers, the PERID has to not
   1105	 * match the one of another channel. If not, it could lead to spurious
   1106	 * flag status.
   1107	 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
   1108	 * Thus, no need to have the SIF/DIF interfaces here.
   1109	 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
   1110	 * zero.
   1111	 */
   1112	u32			chan_cc = AT_XDMAC_CC_PERID(0x7f)
   1113					| AT_XDMAC_CC_DAM_INCREMENTED_AM
   1114					| AT_XDMAC_CC_SAM_INCREMENTED_AM
   1115					| AT_XDMAC_CC_MBSIZE_SIXTEEN
   1116					| AT_XDMAC_CC_TYPE_MEM_TRAN;
   1117	unsigned long		irqflags;
   1118
   1119	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
   1120		__func__, &src, &dest, len, flags);
   1121
   1122	if (unlikely(!len))
   1123		return NULL;
   1124
   1125	dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
   1126
   1127	/* Prepare descriptors. */
   1128	while (remaining_size) {
   1129		struct at_xdmac_desc	*desc = NULL;
   1130
   1131		dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
   1132
   1133		spin_lock_irqsave(&atchan->lock, irqflags);
   1134		desc = at_xdmac_get_desc(atchan);
   1135		spin_unlock_irqrestore(&atchan->lock, irqflags);
   1136		if (!desc) {
   1137			dev_err(chan2dev(chan), "can't get descriptor\n");
   1138			if (first)
   1139				list_splice_tail_init(&first->descs_list,
   1140						      &atchan->free_descs_list);
   1141			return NULL;
   1142		}
   1143
   1144		/* Update src and dest addresses. */
   1145		src_addr += xfer_size;
   1146		dst_addr += xfer_size;
   1147
   1148		if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
   1149			xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
   1150		else
   1151			xfer_size = remaining_size;
   1152
   1153		dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
   1154
   1155		/* Check remaining length and change data width if needed. */
   1156		dwidth = at_xdmac_align_width(chan,
   1157					      src_addr | dst_addr | xfer_size);
   1158		chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
   1159		chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
   1160
   1161		ublen = xfer_size >> dwidth;
   1162		remaining_size -= xfer_size;
   1163
   1164		desc->lld.mbr_sa = src_addr;
   1165		desc->lld.mbr_da = dst_addr;
   1166		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
   1167			| AT_XDMAC_MBR_UBC_NDEN
   1168			| AT_XDMAC_MBR_UBC_NSEN
   1169			| ublen;
   1170		desc->lld.mbr_cfg = chan_cc;
   1171
   1172		dev_dbg(chan2dev(chan),
   1173			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
   1174			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
   1175
   1176		/* Chain lld. */
   1177		if (prev)
   1178			at_xdmac_queue_desc(chan, prev, desc);
   1179
   1180		prev = desc;
   1181		if (!first)
   1182			first = desc;
   1183
   1184		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
   1185			 __func__, desc, first);
   1186		list_add_tail(&desc->desc_node, &first->descs_list);
   1187	}
   1188
   1189	first->tx_dma_desc.flags = flags;
   1190	first->xfer_size = len;
   1191
   1192	return &first->tx_dma_desc;
   1193}
   1194
   1195static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
   1196							 struct at_xdmac_chan *atchan,
   1197							 dma_addr_t dst_addr,
   1198							 size_t len,
   1199							 int value)
   1200{
   1201	struct at_xdmac_desc	*desc;
   1202	unsigned long		flags;
   1203	size_t			ublen;
   1204	u32			dwidth;
   1205	char			pattern;
   1206	/*
   1207	 * WARNING: The channel configuration is set here since there is no
   1208	 * dmaengine_slave_config call in this case. Moreover we don't know the
   1209	 * direction, it involves we can't dynamically set the source and dest
   1210	 * interface so we have to use the same one. Only interface 0 allows EBI
   1211	 * access. Hopefully we can access DDR through both ports (at least on
   1212	 * SAMA5D4x), so we can use the same interface for source and dest,
   1213	 * that solves the fact we don't know the direction.
   1214	 * ERRATA: Even if useless for memory transfers, the PERID has to not
   1215	 * match the one of another channel. If not, it could lead to spurious
   1216	 * flag status.
   1217	 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
   1218	 * Thus, no need to have the SIF/DIF interfaces here.
   1219	 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
   1220	 * zero.
   1221	 */
   1222	u32			chan_cc = AT_XDMAC_CC_PERID(0x7f)
   1223					| AT_XDMAC_CC_DAM_UBS_AM
   1224					| AT_XDMAC_CC_SAM_INCREMENTED_AM
   1225					| AT_XDMAC_CC_MBSIZE_SIXTEEN
   1226					| AT_XDMAC_CC_MEMSET_HW_MODE
   1227					| AT_XDMAC_CC_TYPE_MEM_TRAN;
   1228
   1229	dwidth = at_xdmac_align_width(chan, dst_addr);
   1230
   1231	if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
   1232		dev_err(chan2dev(chan),
   1233			"%s: Transfer too large, aborting...\n",
   1234			__func__);
   1235		return NULL;
   1236	}
   1237
   1238	spin_lock_irqsave(&atchan->lock, flags);
   1239	desc = at_xdmac_get_desc(atchan);
   1240	spin_unlock_irqrestore(&atchan->lock, flags);
   1241	if (!desc) {
   1242		dev_err(chan2dev(chan), "can't get descriptor\n");
   1243		return NULL;
   1244	}
   1245
   1246	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
   1247
   1248	/* Only the first byte of value is to be used according to dmaengine */
   1249	pattern = (char)value;
   1250
   1251	ublen = len >> dwidth;
   1252
   1253	desc->lld.mbr_da = dst_addr;
   1254	desc->lld.mbr_ds = (pattern << 24) |
   1255			   (pattern << 16) |
   1256			   (pattern << 8) |
   1257			   pattern;
   1258	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
   1259		| AT_XDMAC_MBR_UBC_NDEN
   1260		| AT_XDMAC_MBR_UBC_NSEN
   1261		| ublen;
   1262	desc->lld.mbr_cfg = chan_cc;
   1263
   1264	dev_dbg(chan2dev(chan),
   1265		"%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
   1266		__func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
   1267		desc->lld.mbr_cfg);
   1268
   1269	return desc;
   1270}
   1271
   1272static struct dma_async_tx_descriptor *
   1273at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
   1274			 size_t len, unsigned long flags)
   1275{
   1276	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1277	struct at_xdmac_desc	*desc;
   1278
   1279	dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
   1280		__func__, &dest, len, value, flags);
   1281
   1282	if (unlikely(!len))
   1283		return NULL;
   1284
   1285	desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
   1286	list_add_tail(&desc->desc_node, &desc->descs_list);
   1287
   1288	desc->tx_dma_desc.cookie = -EBUSY;
   1289	desc->tx_dma_desc.flags = flags;
   1290	desc->xfer_size = len;
   1291
   1292	return &desc->tx_dma_desc;
   1293}
   1294
   1295static struct dma_async_tx_descriptor *
   1296at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
   1297			    unsigned int sg_len, int value,
   1298			    unsigned long flags)
   1299{
   1300	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1301	struct at_xdmac_desc	*desc, *pdesc = NULL,
   1302				*ppdesc = NULL, *first = NULL;
   1303	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
   1304	size_t			stride = 0, pstride = 0, len = 0;
   1305	int			i;
   1306
   1307	if (!sgl)
   1308		return NULL;
   1309
   1310	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
   1311		__func__, sg_len, value, flags);
   1312
   1313	/* Prepare descriptors. */
   1314	for_each_sg(sgl, sg, sg_len, i) {
   1315		dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
   1316			__func__, &sg_dma_address(sg), sg_dma_len(sg),
   1317			value, flags);
   1318		desc = at_xdmac_memset_create_desc(chan, atchan,
   1319						   sg_dma_address(sg),
   1320						   sg_dma_len(sg),
   1321						   value);
   1322		if (!desc && first)
   1323			list_splice_tail_init(&first->descs_list,
   1324					      &atchan->free_descs_list);
   1325
   1326		if (!first)
   1327			first = desc;
   1328
   1329		/* Update our strides */
   1330		pstride = stride;
   1331		if (psg)
   1332			stride = sg_dma_address(sg) -
   1333				(sg_dma_address(psg) + sg_dma_len(psg));
   1334
   1335		/*
   1336		 * The scatterlist API gives us only the address and
   1337		 * length of each elements.
   1338		 *
   1339		 * Unfortunately, we don't have the stride, which we
   1340		 * will need to compute.
   1341		 *
   1342		 * That make us end up in a situation like this one:
   1343		 *    len    stride    len    stride    len
   1344		 * +-------+        +-------+        +-------+
   1345		 * |  N-2  |        |  N-1  |        |   N   |
   1346		 * +-------+        +-------+        +-------+
   1347		 *
   1348		 * We need all these three elements (N-2, N-1 and N)
   1349		 * to actually take the decision on whether we need to
   1350		 * queue N-1 or reuse N-2.
   1351		 *
   1352		 * We will only consider N if it is the last element.
   1353		 */
   1354		if (ppdesc && pdesc) {
   1355			if ((stride == pstride) &&
   1356			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
   1357				dev_dbg(chan2dev(chan),
   1358					"%s: desc 0x%p can be merged with desc 0x%p\n",
   1359					__func__, pdesc, ppdesc);
   1360
   1361				/*
   1362				 * Increment the block count of the
   1363				 * N-2 descriptor
   1364				 */
   1365				at_xdmac_increment_block_count(chan, ppdesc);
   1366				ppdesc->lld.mbr_dus = stride;
   1367
   1368				/*
   1369				 * Put back the N-1 descriptor in the
   1370				 * free descriptor list
   1371				 */
   1372				list_add_tail(&pdesc->desc_node,
   1373					      &atchan->free_descs_list);
   1374
   1375				/*
   1376				 * Make our N-1 descriptor pointer
   1377				 * point to the N-2 since they were
   1378				 * actually merged.
   1379				 */
   1380				pdesc = ppdesc;
   1381
   1382			/*
   1383			 * Rule out the case where we don't have
   1384			 * pstride computed yet (our second sg
   1385			 * element)
   1386			 *
   1387			 * We also want to catch the case where there
   1388			 * would be a negative stride,
   1389			 */
   1390			} else if (pstride ||
   1391				   sg_dma_address(sg) < sg_dma_address(psg)) {
   1392				/*
   1393				 * Queue the N-1 descriptor after the
   1394				 * N-2
   1395				 */
   1396				at_xdmac_queue_desc(chan, ppdesc, pdesc);
   1397
   1398				/*
   1399				 * Add the N-1 descriptor to the list
   1400				 * of the descriptors used for this
   1401				 * transfer
   1402				 */
   1403				list_add_tail(&desc->desc_node,
   1404					      &first->descs_list);
   1405				dev_dbg(chan2dev(chan),
   1406					"%s: add desc 0x%p to descs_list 0x%p\n",
   1407					__func__, desc, first);
   1408			}
   1409		}
   1410
   1411		/*
   1412		 * If we are the last element, just see if we have the
   1413		 * same size than the previous element.
   1414		 *
   1415		 * If so, we can merge it with the previous descriptor
   1416		 * since we don't care about the stride anymore.
   1417		 */
   1418		if ((i == (sg_len - 1)) &&
   1419		    sg_dma_len(psg) == sg_dma_len(sg)) {
   1420			dev_dbg(chan2dev(chan),
   1421				"%s: desc 0x%p can be merged with desc 0x%p\n",
   1422				__func__, desc, pdesc);
   1423
   1424			/*
   1425			 * Increment the block count of the N-1
   1426			 * descriptor
   1427			 */
   1428			at_xdmac_increment_block_count(chan, pdesc);
   1429			pdesc->lld.mbr_dus = stride;
   1430
   1431			/*
   1432			 * Put back the N descriptor in the free
   1433			 * descriptor list
   1434			 */
   1435			list_add_tail(&desc->desc_node,
   1436				      &atchan->free_descs_list);
   1437		}
   1438
   1439		/* Update our descriptors */
   1440		ppdesc = pdesc;
   1441		pdesc = desc;
   1442
   1443		/* Update our scatter pointers */
   1444		ppsg = psg;
   1445		psg = sg;
   1446
   1447		len += sg_dma_len(sg);
   1448	}
   1449
   1450	first->tx_dma_desc.cookie = -EBUSY;
   1451	first->tx_dma_desc.flags = flags;
   1452	first->xfer_size = len;
   1453
   1454	return &first->tx_dma_desc;
   1455}
   1456
   1457static enum dma_status
   1458at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
   1459		struct dma_tx_state *txstate)
   1460{
   1461	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1462	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
   1463	struct at_xdmac_desc	*desc, *_desc, *iter;
   1464	struct list_head	*descs_list;
   1465	enum dma_status		ret;
   1466	int			residue, retry;
   1467	u32			cur_nda, check_nda, cur_ubc, mask, value;
   1468	u8			dwidth = 0;
   1469	unsigned long		flags;
   1470	bool			initd;
   1471
   1472	ret = dma_cookie_status(chan, cookie, txstate);
   1473	if (ret == DMA_COMPLETE)
   1474		return ret;
   1475
   1476	if (!txstate)
   1477		return ret;
   1478
   1479	spin_lock_irqsave(&atchan->lock, flags);
   1480
   1481	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
   1482
   1483	/*
   1484	 * If the transfer has not been started yet, don't need to compute the
   1485	 * residue, it's the transfer length.
   1486	 */
   1487	if (!desc->active_xfer) {
   1488		dma_set_residue(txstate, desc->xfer_size);
   1489		goto spin_unlock;
   1490	}
   1491
   1492	residue = desc->xfer_size;
   1493	/*
   1494	 * Flush FIFO: only relevant when the transfer is source peripheral
   1495	 * synchronized. Flush is needed before reading CUBC because data in
   1496	 * the FIFO are not reported by CUBC. Reporting a residue of the
   1497	 * transfer length while we have data in FIFO can cause issue.
   1498	 * Usecase: atmel USART has a timeout which means I have received
   1499	 * characters but there is no more character received for a while. On
   1500	 * timeout, it requests the residue. If the data are in the DMA FIFO,
   1501	 * we will return a residue of the transfer length. It means no data
   1502	 * received. If an application is waiting for these data, it will hang
   1503	 * since we won't have another USART timeout without receiving new
   1504	 * data.
   1505	 */
   1506	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
   1507	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
   1508	if ((desc->lld.mbr_cfg & mask) == value) {
   1509		at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
   1510		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
   1511			cpu_relax();
   1512	}
   1513
   1514	/*
   1515	 * The easiest way to compute the residue should be to pause the DMA
   1516	 * but doing this can lead to miss some data as some devices don't
   1517	 * have FIFO.
   1518	 * We need to read several registers because:
   1519	 * - DMA is running therefore a descriptor change is possible while
   1520	 * reading these registers
   1521	 * - When the block transfer is done, the value of the CUBC register
   1522	 * is set to its initial value until the fetch of the next descriptor.
   1523	 * This value will corrupt the residue calculation so we have to skip
   1524	 * it.
   1525	 *
   1526	 * INITD --------                    ------------
   1527	 *              |____________________|
   1528	 *       _______________________  _______________
   1529	 * NDA       @desc2             \/   @desc3
   1530	 *       _______________________/\_______________
   1531	 *       __________  ___________  _______________
   1532	 * CUBC       0    \/ MAX desc1 \/  MAX desc2
   1533	 *       __________/\___________/\_______________
   1534	 *
   1535	 * Since descriptors are aligned on 64 bits, we can assume that
   1536	 * the update of NDA and CUBC is atomic.
   1537	 * Memory barriers are used to ensure the read order of the registers.
   1538	 * A max number of retries is set because unlikely it could never ends.
   1539	 */
   1540	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
   1541		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
   1542		rmb();
   1543		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
   1544		rmb();
   1545		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
   1546		rmb();
   1547		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
   1548		rmb();
   1549
   1550		if ((check_nda == cur_nda) && initd)
   1551			break;
   1552	}
   1553
   1554	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
   1555		ret = DMA_ERROR;
   1556		goto spin_unlock;
   1557	}
   1558
   1559	/*
   1560	 * Flush FIFO: only relevant when the transfer is source peripheral
   1561	 * synchronized. Another flush is needed here because CUBC is updated
   1562	 * when the controller sends the data write command. It can lead to
   1563	 * report data that are not written in the memory or the device. The
   1564	 * FIFO flush ensures that data are really written.
   1565	 */
   1566	if ((desc->lld.mbr_cfg & mask) == value) {
   1567		at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
   1568		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
   1569			cpu_relax();
   1570	}
   1571
   1572	/*
   1573	 * Remove size of all microblocks already transferred and the current
   1574	 * one. Then add the remaining size to transfer of the current
   1575	 * microblock.
   1576	 */
   1577	descs_list = &desc->descs_list;
   1578	list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
   1579		dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
   1580		residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
   1581		if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
   1582			desc = iter;
   1583			break;
   1584		}
   1585	}
   1586	residue += cur_ubc << dwidth;
   1587
   1588	dma_set_residue(txstate, residue);
   1589
   1590	dev_dbg(chan2dev(chan),
   1591		 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
   1592		 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
   1593
   1594spin_unlock:
   1595	spin_unlock_irqrestore(&atchan->lock, flags);
   1596	return ret;
   1597}
   1598
   1599static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
   1600{
   1601	struct at_xdmac_desc	*desc;
   1602
   1603	/*
   1604	 * If channel is enabled, do nothing, advance_work will be triggered
   1605	 * after the interruption.
   1606	 */
   1607	if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
   1608		return;
   1609
   1610	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
   1611				xfer_node);
   1612	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
   1613	if (!desc->active_xfer)
   1614		at_xdmac_start_xfer(atchan, desc);
   1615}
   1616
   1617static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
   1618{
   1619	struct at_xdmac_desc		*desc;
   1620	struct dma_async_tx_descriptor	*txd;
   1621
   1622	spin_lock_irq(&atchan->lock);
   1623	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
   1624		__func__, atchan->irq_status);
   1625	if (list_empty(&atchan->xfers_list)) {
   1626		spin_unlock_irq(&atchan->lock);
   1627		return;
   1628	}
   1629	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
   1630				xfer_node);
   1631	spin_unlock_irq(&atchan->lock);
   1632	txd = &desc->tx_dma_desc;
   1633	if (txd->flags & DMA_PREP_INTERRUPT)
   1634		dmaengine_desc_get_callback_invoke(txd, NULL);
   1635}
   1636
   1637/* Called with atchan->lock held. */
   1638static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
   1639{
   1640	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
   1641	struct at_xdmac_desc	*bad_desc;
   1642
   1643	/*
   1644	 * The descriptor currently at the head of the active list is
   1645	 * broken. Since we don't have any way to report errors, we'll
   1646	 * just have to scream loudly and try to continue with other
   1647	 * descriptors queued (if any).
   1648	 */
   1649	if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
   1650		dev_err(chan2dev(&atchan->chan), "read bus error!!!");
   1651	if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
   1652		dev_err(chan2dev(&atchan->chan), "write bus error!!!");
   1653	if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
   1654		dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
   1655
   1656	/* Channel must be disabled first as it's not done automatically */
   1657	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
   1658	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
   1659		cpu_relax();
   1660
   1661	bad_desc = list_first_entry(&atchan->xfers_list,
   1662				    struct at_xdmac_desc,
   1663				    xfer_node);
   1664
   1665	/* Print bad descriptor's details if needed */
   1666	dev_dbg(chan2dev(&atchan->chan),
   1667		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
   1668		__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
   1669		bad_desc->lld.mbr_ubc);
   1670
   1671	/* Then continue with usual descriptor management */
   1672}
   1673
   1674static void at_xdmac_tasklet(struct tasklet_struct *t)
   1675{
   1676	struct at_xdmac_chan	*atchan = from_tasklet(atchan, t, tasklet);
   1677	struct at_xdmac_desc	*desc;
   1678	struct dma_async_tx_descriptor *txd;
   1679	u32			error_mask;
   1680
   1681	if (at_xdmac_chan_is_cyclic(atchan))
   1682		return at_xdmac_handle_cyclic(atchan);
   1683
   1684	error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
   1685		AT_XDMAC_CIS_ROIS;
   1686
   1687	spin_lock_irq(&atchan->lock);
   1688
   1689	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
   1690		__func__, atchan->irq_status);
   1691
   1692	if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
   1693	    !(atchan->irq_status & error_mask)) {
   1694		spin_unlock_irq(&atchan->lock);
   1695		return;
   1696	}
   1697
   1698	if (atchan->irq_status & error_mask)
   1699		at_xdmac_handle_error(atchan);
   1700
   1701	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
   1702				xfer_node);
   1703	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
   1704	if (!desc->active_xfer) {
   1705		dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
   1706		spin_unlock_irq(&atchan->lock);
   1707		return;
   1708	}
   1709
   1710	txd = &desc->tx_dma_desc;
   1711	dma_cookie_complete(txd);
   1712	/* Remove the transfer from the transfer list. */
   1713	list_del(&desc->xfer_node);
   1714	spin_unlock_irq(&atchan->lock);
   1715
   1716	if (txd->flags & DMA_PREP_INTERRUPT)
   1717		dmaengine_desc_get_callback_invoke(txd, NULL);
   1718
   1719	dma_run_dependencies(txd);
   1720
   1721	spin_lock_irq(&atchan->lock);
   1722	/* Move the xfer descriptors into the free descriptors list. */
   1723	list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
   1724	at_xdmac_advance_work(atchan);
   1725	spin_unlock_irq(&atchan->lock);
   1726}
   1727
   1728static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
   1729{
   1730	struct at_xdmac		*atxdmac = (struct at_xdmac *)dev_id;
   1731	struct at_xdmac_chan	*atchan;
   1732	u32			imr, status, pending;
   1733	u32			chan_imr, chan_status;
   1734	int			i, ret = IRQ_NONE;
   1735
   1736	do {
   1737		imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
   1738		status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
   1739		pending = status & imr;
   1740
   1741		dev_vdbg(atxdmac->dma.dev,
   1742			 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
   1743			 __func__, status, imr, pending);
   1744
   1745		if (!pending)
   1746			break;
   1747
   1748		/* We have to find which channel has generated the interrupt. */
   1749		for (i = 0; i < atxdmac->dma.chancnt; i++) {
   1750			if (!((1 << i) & pending))
   1751				continue;
   1752
   1753			atchan = &atxdmac->chan[i];
   1754			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
   1755			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
   1756			atchan->irq_status = chan_status & chan_imr;
   1757			dev_vdbg(atxdmac->dma.dev,
   1758				 "%s: chan%d: imr=0x%x, status=0x%x\n",
   1759				 __func__, i, chan_imr, chan_status);
   1760			dev_vdbg(chan2dev(&atchan->chan),
   1761				 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
   1762				 __func__,
   1763				 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
   1764				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
   1765				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
   1766				 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
   1767				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
   1768				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
   1769
   1770			if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
   1771				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
   1772
   1773			tasklet_schedule(&atchan->tasklet);
   1774			ret = IRQ_HANDLED;
   1775		}
   1776
   1777	} while (pending);
   1778
   1779	return ret;
   1780}
   1781
   1782static void at_xdmac_issue_pending(struct dma_chan *chan)
   1783{
   1784	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
   1785	unsigned long flags;
   1786
   1787	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
   1788
   1789	spin_lock_irqsave(&atchan->lock, flags);
   1790	at_xdmac_advance_work(atchan);
   1791	spin_unlock_irqrestore(&atchan->lock, flags);
   1792
   1793	return;
   1794}
   1795
   1796static int at_xdmac_device_config(struct dma_chan *chan,
   1797				  struct dma_slave_config *config)
   1798{
   1799	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1800	int ret;
   1801	unsigned long		flags;
   1802
   1803	dev_dbg(chan2dev(chan), "%s\n", __func__);
   1804
   1805	spin_lock_irqsave(&atchan->lock, flags);
   1806	ret = at_xdmac_set_slave_config(chan, config);
   1807	spin_unlock_irqrestore(&atchan->lock, flags);
   1808
   1809	return ret;
   1810}
   1811
   1812static int at_xdmac_device_pause(struct dma_chan *chan)
   1813{
   1814	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1815	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
   1816	unsigned long		flags;
   1817
   1818	dev_dbg(chan2dev(chan), "%s\n", __func__);
   1819
   1820	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
   1821		return 0;
   1822
   1823	spin_lock_irqsave(&atchan->lock, flags);
   1824	at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
   1825	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
   1826	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
   1827		cpu_relax();
   1828	spin_unlock_irqrestore(&atchan->lock, flags);
   1829
   1830	return 0;
   1831}
   1832
   1833static int at_xdmac_device_resume(struct dma_chan *chan)
   1834{
   1835	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1836	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
   1837	unsigned long		flags;
   1838
   1839	dev_dbg(chan2dev(chan), "%s\n", __func__);
   1840
   1841	spin_lock_irqsave(&atchan->lock, flags);
   1842	if (!at_xdmac_chan_is_paused(atchan)) {
   1843		spin_unlock_irqrestore(&atchan->lock, flags);
   1844		return 0;
   1845	}
   1846
   1847	at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
   1848	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
   1849	spin_unlock_irqrestore(&atchan->lock, flags);
   1850
   1851	return 0;
   1852}
   1853
   1854static int at_xdmac_device_terminate_all(struct dma_chan *chan)
   1855{
   1856	struct at_xdmac_desc	*desc, *_desc;
   1857	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1858	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
   1859	unsigned long		flags;
   1860
   1861	dev_dbg(chan2dev(chan), "%s\n", __func__);
   1862
   1863	spin_lock_irqsave(&atchan->lock, flags);
   1864	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
   1865	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
   1866		cpu_relax();
   1867
   1868	/* Cancel all pending transfers. */
   1869	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
   1870		list_del(&desc->xfer_node);
   1871		list_splice_tail_init(&desc->descs_list,
   1872				      &atchan->free_descs_list);
   1873	}
   1874
   1875	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
   1876	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
   1877	spin_unlock_irqrestore(&atchan->lock, flags);
   1878
   1879	return 0;
   1880}
   1881
   1882static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
   1883{
   1884	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1885	struct at_xdmac_desc	*desc;
   1886	int			i;
   1887
   1888	if (at_xdmac_chan_is_enabled(atchan)) {
   1889		dev_err(chan2dev(chan),
   1890			"can't allocate channel resources (channel enabled)\n");
   1891		return -EIO;
   1892	}
   1893
   1894	if (!list_empty(&atchan->free_descs_list)) {
   1895		dev_err(chan2dev(chan),
   1896			"can't allocate channel resources (channel not free from a previous use)\n");
   1897		return -EIO;
   1898	}
   1899
   1900	for (i = 0; i < init_nr_desc_per_channel; i++) {
   1901		desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
   1902		if (!desc) {
   1903			if (i == 0) {
   1904				dev_warn(chan2dev(chan),
   1905					 "can't allocate any descriptors\n");
   1906				return -EIO;
   1907			}
   1908			dev_warn(chan2dev(chan),
   1909				"only %d descriptors have been allocated\n", i);
   1910			break;
   1911		}
   1912		list_add_tail(&desc->desc_node, &atchan->free_descs_list);
   1913	}
   1914
   1915	dma_cookie_init(chan);
   1916
   1917	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
   1918
   1919	return i;
   1920}
   1921
   1922static void at_xdmac_free_chan_resources(struct dma_chan *chan)
   1923{
   1924	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1925	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
   1926	struct at_xdmac_desc	*desc, *_desc;
   1927
   1928	list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
   1929		dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
   1930		list_del(&desc->desc_node);
   1931		dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
   1932	}
   1933
   1934	return;
   1935}
   1936
   1937static void at_xdmac_axi_config(struct platform_device *pdev)
   1938{
   1939	struct at_xdmac	*atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
   1940	bool dev_m2m = false;
   1941	u32 dma_requests;
   1942
   1943	if (!atxdmac->layout->axi_config)
   1944		return; /* Not supported */
   1945
   1946	if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
   1947				  &dma_requests)) {
   1948		dev_info(&pdev->dev, "controller in mem2mem mode.\n");
   1949		dev_m2m = true;
   1950	}
   1951
   1952	if (dev_m2m) {
   1953		at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
   1954		at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
   1955	} else {
   1956		at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
   1957		at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
   1958	}
   1959}
   1960
   1961static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
   1962{
   1963	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
   1964	struct dma_chan		*chan, *_chan;
   1965
   1966	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
   1967		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1968
   1969		/* Wait for transfer completion, except in cyclic case. */
   1970		if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
   1971			return -EAGAIN;
   1972	}
   1973	return 0;
   1974}
   1975
   1976static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
   1977{
   1978	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
   1979	struct dma_chan		*chan, *_chan;
   1980
   1981	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
   1982		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
   1983
   1984		atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
   1985		if (at_xdmac_chan_is_cyclic(atchan)) {
   1986			if (!at_xdmac_chan_is_paused(atchan))
   1987				at_xdmac_device_pause(chan);
   1988			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
   1989			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
   1990			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
   1991		}
   1992	}
   1993	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
   1994
   1995	at_xdmac_off(atxdmac);
   1996	clk_disable_unprepare(atxdmac->clk);
   1997	return 0;
   1998}
   1999
   2000static int __maybe_unused atmel_xdmac_resume(struct device *dev)
   2001{
   2002	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
   2003	struct at_xdmac_chan	*atchan;
   2004	struct dma_chan		*chan, *_chan;
   2005	struct platform_device	*pdev = container_of(dev, struct platform_device, dev);
   2006	int			i;
   2007	int ret;
   2008
   2009	ret = clk_prepare_enable(atxdmac->clk);
   2010	if (ret)
   2011		return ret;
   2012
   2013	at_xdmac_axi_config(pdev);
   2014
   2015	/* Clear pending interrupts. */
   2016	for (i = 0; i < atxdmac->dma.chancnt; i++) {
   2017		atchan = &atxdmac->chan[i];
   2018		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
   2019			cpu_relax();
   2020	}
   2021
   2022	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
   2023	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
   2024		atchan = to_at_xdmac_chan(chan);
   2025		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
   2026		if (at_xdmac_chan_is_cyclic(atchan)) {
   2027			if (at_xdmac_chan_is_paused(atchan))
   2028				at_xdmac_device_resume(chan);
   2029			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
   2030			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
   2031			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
   2032			wmb();
   2033			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
   2034		}
   2035	}
   2036	return 0;
   2037}
   2038
   2039static int at_xdmac_probe(struct platform_device *pdev)
   2040{
   2041	struct at_xdmac	*atxdmac;
   2042	int		irq, nr_channels, i, ret;
   2043	void __iomem	*base;
   2044	u32		reg;
   2045
   2046	irq = platform_get_irq(pdev, 0);
   2047	if (irq < 0)
   2048		return irq;
   2049
   2050	base = devm_platform_ioremap_resource(pdev, 0);
   2051	if (IS_ERR(base))
   2052		return PTR_ERR(base);
   2053
   2054	/*
   2055	 * Read number of xdmac channels, read helper function can't be used
   2056	 * since atxdmac is not yet allocated and we need to know the number
   2057	 * of channels to do the allocation.
   2058	 */
   2059	reg = readl_relaxed(base + AT_XDMAC_GTYPE);
   2060	nr_channels = AT_XDMAC_NB_CH(reg);
   2061	if (nr_channels > AT_XDMAC_MAX_CHAN) {
   2062		dev_err(&pdev->dev, "invalid number of channels (%u)\n",
   2063			nr_channels);
   2064		return -EINVAL;
   2065	}
   2066
   2067	atxdmac = devm_kzalloc(&pdev->dev,
   2068			       struct_size(atxdmac, chan, nr_channels),
   2069			       GFP_KERNEL);
   2070	if (!atxdmac) {
   2071		dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
   2072		return -ENOMEM;
   2073	}
   2074
   2075	atxdmac->regs = base;
   2076	atxdmac->irq = irq;
   2077
   2078	atxdmac->layout = of_device_get_match_data(&pdev->dev);
   2079	if (!atxdmac->layout)
   2080		return -ENODEV;
   2081
   2082	atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
   2083	if (IS_ERR(atxdmac->clk)) {
   2084		dev_err(&pdev->dev, "can't get dma_clk\n");
   2085		return PTR_ERR(atxdmac->clk);
   2086	}
   2087
   2088	/* Do not use dev res to prevent races with tasklet */
   2089	ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
   2090	if (ret) {
   2091		dev_err(&pdev->dev, "can't request irq\n");
   2092		return ret;
   2093	}
   2094
   2095	ret = clk_prepare_enable(atxdmac->clk);
   2096	if (ret) {
   2097		dev_err(&pdev->dev, "can't prepare or enable clock\n");
   2098		goto err_free_irq;
   2099	}
   2100
   2101	atxdmac->at_xdmac_desc_pool =
   2102		dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
   2103				sizeof(struct at_xdmac_desc), 4, 0);
   2104	if (!atxdmac->at_xdmac_desc_pool) {
   2105		dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
   2106		ret = -ENOMEM;
   2107		goto err_clk_disable;
   2108	}
   2109
   2110	dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
   2111	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
   2112	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
   2113	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
   2114	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
   2115	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
   2116	/*
   2117	 * Without DMA_PRIVATE the driver is not able to allocate more than
   2118	 * one channel, second allocation fails in private_candidate.
   2119	 */
   2120	dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
   2121	atxdmac->dma.dev				= &pdev->dev;
   2122	atxdmac->dma.device_alloc_chan_resources	= at_xdmac_alloc_chan_resources;
   2123	atxdmac->dma.device_free_chan_resources		= at_xdmac_free_chan_resources;
   2124	atxdmac->dma.device_tx_status			= at_xdmac_tx_status;
   2125	atxdmac->dma.device_issue_pending		= at_xdmac_issue_pending;
   2126	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
   2127	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
   2128	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
   2129	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
   2130	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
   2131	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
   2132	atxdmac->dma.device_config			= at_xdmac_device_config;
   2133	atxdmac->dma.device_pause			= at_xdmac_device_pause;
   2134	atxdmac->dma.device_resume			= at_xdmac_device_resume;
   2135	atxdmac->dma.device_terminate_all		= at_xdmac_device_terminate_all;
   2136	atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
   2137	atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
   2138	atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
   2139	atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
   2140
   2141	/* Disable all chans and interrupts. */
   2142	at_xdmac_off(atxdmac);
   2143
   2144	/* Init channels. */
   2145	INIT_LIST_HEAD(&atxdmac->dma.channels);
   2146	for (i = 0; i < nr_channels; i++) {
   2147		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
   2148
   2149		atchan->chan.device = &atxdmac->dma;
   2150		list_add_tail(&atchan->chan.device_node,
   2151			      &atxdmac->dma.channels);
   2152
   2153		atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
   2154		atchan->mask = 1 << i;
   2155
   2156		spin_lock_init(&atchan->lock);
   2157		INIT_LIST_HEAD(&atchan->xfers_list);
   2158		INIT_LIST_HEAD(&atchan->free_descs_list);
   2159		tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
   2160
   2161		/* Clear pending interrupts. */
   2162		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
   2163			cpu_relax();
   2164	}
   2165	platform_set_drvdata(pdev, atxdmac);
   2166
   2167	ret = dma_async_device_register(&atxdmac->dma);
   2168	if (ret) {
   2169		dev_err(&pdev->dev, "fail to register DMA engine device\n");
   2170		goto err_clk_disable;
   2171	}
   2172
   2173	ret = of_dma_controller_register(pdev->dev.of_node,
   2174					 at_xdmac_xlate, atxdmac);
   2175	if (ret) {
   2176		dev_err(&pdev->dev, "could not register of dma controller\n");
   2177		goto err_dma_unregister;
   2178	}
   2179
   2180	dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
   2181		 nr_channels, atxdmac->regs);
   2182
   2183	at_xdmac_axi_config(pdev);
   2184
   2185	return 0;
   2186
   2187err_dma_unregister:
   2188	dma_async_device_unregister(&atxdmac->dma);
   2189err_clk_disable:
   2190	clk_disable_unprepare(atxdmac->clk);
   2191err_free_irq:
   2192	free_irq(atxdmac->irq, atxdmac);
   2193	return ret;
   2194}
   2195
   2196static int at_xdmac_remove(struct platform_device *pdev)
   2197{
   2198	struct at_xdmac	*atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
   2199	int		i;
   2200
   2201	at_xdmac_off(atxdmac);
   2202	of_dma_controller_free(pdev->dev.of_node);
   2203	dma_async_device_unregister(&atxdmac->dma);
   2204	clk_disable_unprepare(atxdmac->clk);
   2205
   2206	free_irq(atxdmac->irq, atxdmac);
   2207
   2208	for (i = 0; i < atxdmac->dma.chancnt; i++) {
   2209		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
   2210
   2211		tasklet_kill(&atchan->tasklet);
   2212		at_xdmac_free_chan_resources(&atchan->chan);
   2213	}
   2214
   2215	return 0;
   2216}
   2217
   2218static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
   2219	.prepare	= atmel_xdmac_prepare,
   2220	SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
   2221};
   2222
   2223static const struct of_device_id atmel_xdmac_dt_ids[] = {
   2224	{
   2225		.compatible = "atmel,sama5d4-dma",
   2226		.data = &at_xdmac_sama5d4_layout,
   2227	}, {
   2228		.compatible = "microchip,sama7g5-dma",
   2229		.data = &at_xdmac_sama7g5_layout,
   2230	}, {
   2231		/* sentinel */
   2232	}
   2233};
   2234MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
   2235
   2236static struct platform_driver at_xdmac_driver = {
   2237	.probe		= at_xdmac_probe,
   2238	.remove		= at_xdmac_remove,
   2239	.driver = {
   2240		.name		= "at_xdmac",
   2241		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
   2242		.pm		= pm_ptr(&atmel_xdmac_dev_pm_ops),
   2243	}
   2244};
   2245
   2246static int __init at_xdmac_init(void)
   2247{
   2248	return platform_driver_register(&at_xdmac_driver);
   2249}
   2250subsys_initcall(at_xdmac_init);
   2251
   2252static void __exit at_xdmac_exit(void)
   2253{
   2254	platform_driver_unregister(&at_xdmac_driver);
   2255}
   2256module_exit(at_xdmac_exit);
   2257
   2258MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
   2259MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
   2260MODULE_LICENSE("GPL");