cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

zynqmp_dma.c (32526B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * DMA driver for Xilinx ZynqMP DMA Engine
      4 *
      5 * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
      6 */
      7
      8#include <linux/bitops.h>
      9#include <linux/dma-mapping.h>
     10#include <linux/init.h>
     11#include <linux/interrupt.h>
     12#include <linux/io.h>
     13#include <linux/module.h>
     14#include <linux/of_dma.h>
     15#include <linux/of_platform.h>
     16#include <linux/slab.h>
     17#include <linux/clk.h>
     18#include <linux/io-64-nonatomic-lo-hi.h>
     19#include <linux/pm_runtime.h>
     20
     21#include "../dmaengine.h"
     22
     23/* Register Offsets */
     24#define ZYNQMP_DMA_ISR			0x100
     25#define ZYNQMP_DMA_IMR			0x104
     26#define ZYNQMP_DMA_IER			0x108
     27#define ZYNQMP_DMA_IDS			0x10C
     28#define ZYNQMP_DMA_CTRL0		0x110
     29#define ZYNQMP_DMA_CTRL1		0x114
     30#define ZYNQMP_DMA_DATA_ATTR		0x120
     31#define ZYNQMP_DMA_DSCR_ATTR		0x124
     32#define ZYNQMP_DMA_SRC_DSCR_WRD0	0x128
     33#define ZYNQMP_DMA_SRC_DSCR_WRD1	0x12C
     34#define ZYNQMP_DMA_SRC_DSCR_WRD2	0x130
     35#define ZYNQMP_DMA_SRC_DSCR_WRD3	0x134
     36#define ZYNQMP_DMA_DST_DSCR_WRD0	0x138
     37#define ZYNQMP_DMA_DST_DSCR_WRD1	0x13C
     38#define ZYNQMP_DMA_DST_DSCR_WRD2	0x140
     39#define ZYNQMP_DMA_DST_DSCR_WRD3	0x144
     40#define ZYNQMP_DMA_SRC_START_LSB	0x158
     41#define ZYNQMP_DMA_SRC_START_MSB	0x15C
     42#define ZYNQMP_DMA_DST_START_LSB	0x160
     43#define ZYNQMP_DMA_DST_START_MSB	0x164
     44#define ZYNQMP_DMA_TOTAL_BYTE		0x188
     45#define ZYNQMP_DMA_RATE_CTRL		0x18C
     46#define ZYNQMP_DMA_IRQ_SRC_ACCT		0x190
     47#define ZYNQMP_DMA_IRQ_DST_ACCT		0x194
     48#define ZYNQMP_DMA_CTRL2		0x200
     49
     50/* Interrupt registers bit field definitions */
     51#define ZYNQMP_DMA_DONE			BIT(10)
     52#define ZYNQMP_DMA_AXI_WR_DATA		BIT(9)
     53#define ZYNQMP_DMA_AXI_RD_DATA		BIT(8)
     54#define ZYNQMP_DMA_AXI_RD_DST_DSCR	BIT(7)
     55#define ZYNQMP_DMA_AXI_RD_SRC_DSCR	BIT(6)
     56#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR	BIT(5)
     57#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR	BIT(4)
     58#define ZYNQMP_DMA_BYTE_CNT_OVRFL	BIT(3)
     59#define ZYNQMP_DMA_DST_DSCR_DONE	BIT(2)
     60#define ZYNQMP_DMA_INV_APB		BIT(0)
     61
     62/* Control 0 register bit field definitions */
     63#define ZYNQMP_DMA_OVR_FETCH		BIT(7)
     64#define ZYNQMP_DMA_POINT_TYPE_SG	BIT(6)
     65#define ZYNQMP_DMA_RATE_CTRL_EN		BIT(3)
     66
     67/* Control 1 register bit field definitions */
     68#define ZYNQMP_DMA_SRC_ISSUE		GENMASK(4, 0)
     69
     70/* Data Attribute register bit field definitions */
     71#define ZYNQMP_DMA_ARBURST		GENMASK(27, 26)
     72#define ZYNQMP_DMA_ARCACHE		GENMASK(25, 22)
     73#define ZYNQMP_DMA_ARCACHE_OFST		22
     74#define ZYNQMP_DMA_ARQOS		GENMASK(21, 18)
     75#define ZYNQMP_DMA_ARQOS_OFST		18
     76#define ZYNQMP_DMA_ARLEN		GENMASK(17, 14)
     77#define ZYNQMP_DMA_ARLEN_OFST		14
     78#define ZYNQMP_DMA_AWBURST		GENMASK(13, 12)
     79#define ZYNQMP_DMA_AWCACHE		GENMASK(11, 8)
     80#define ZYNQMP_DMA_AWCACHE_OFST		8
     81#define ZYNQMP_DMA_AWQOS		GENMASK(7, 4)
     82#define ZYNQMP_DMA_AWQOS_OFST		4
     83#define ZYNQMP_DMA_AWLEN		GENMASK(3, 0)
     84#define ZYNQMP_DMA_AWLEN_OFST		0
     85
     86/* Descriptor Attribute register bit field definitions */
     87#define ZYNQMP_DMA_AXCOHRNT		BIT(8)
     88#define ZYNQMP_DMA_AXCACHE		GENMASK(7, 4)
     89#define ZYNQMP_DMA_AXCACHE_OFST		4
     90#define ZYNQMP_DMA_AXQOS		GENMASK(3, 0)
     91#define ZYNQMP_DMA_AXQOS_OFST		0
     92
     93/* Control register 2 bit field definitions */
     94#define ZYNQMP_DMA_ENABLE		BIT(0)
     95
     96/* Buffer Descriptor definitions */
     97#define ZYNQMP_DMA_DESC_CTRL_STOP	0x10
     98#define ZYNQMP_DMA_DESC_CTRL_COMP_INT	0x4
     99#define ZYNQMP_DMA_DESC_CTRL_SIZE_256	0x2
    100#define ZYNQMP_DMA_DESC_CTRL_COHRNT	0x1
    101
    102/* Interrupt Mask specific definitions */
    103#define ZYNQMP_DMA_INT_ERR	(ZYNQMP_DMA_AXI_RD_DATA | \
    104				ZYNQMP_DMA_AXI_WR_DATA | \
    105				ZYNQMP_DMA_AXI_RD_DST_DSCR | \
    106				ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
    107				ZYNQMP_DMA_INV_APB)
    108#define ZYNQMP_DMA_INT_OVRFL	(ZYNQMP_DMA_BYTE_CNT_OVRFL | \
    109				ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
    110				ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
    111#define ZYNQMP_DMA_INT_DONE	(ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
    112#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK	(ZYNQMP_DMA_INT_DONE | \
    113					ZYNQMP_DMA_INT_ERR | \
    114					ZYNQMP_DMA_INT_OVRFL | \
    115					ZYNQMP_DMA_DST_DSCR_DONE)
    116
    117/* Max number of descriptors per channel */
    118#define ZYNQMP_DMA_NUM_DESCS	32
    119
    120/* Max transfer size per descriptor */
    121#define ZYNQMP_DMA_MAX_TRANS_LEN	0x40000000
    122
    123/* Max burst lengths */
    124#define ZYNQMP_DMA_MAX_DST_BURST_LEN    32768U
    125#define ZYNQMP_DMA_MAX_SRC_BURST_LEN    32768U
    126
    127/* Reset values for data attributes */
    128#define ZYNQMP_DMA_AXCACHE_VAL		0xF
    129
    130#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL	0x1F
    131
    132#define ZYNQMP_DMA_IDS_DEFAULT_MASK	0xFFF
    133
    134/* Bus width in bits */
    135#define ZYNQMP_DMA_BUS_WIDTH_64		64
    136#define ZYNQMP_DMA_BUS_WIDTH_128	128
    137
    138#define ZDMA_PM_TIMEOUT			100
    139
    140#define ZYNQMP_DMA_DESC_SIZE(chan)	(chan->desc_size)
    141
    142#define to_chan(chan)		container_of(chan, struct zynqmp_dma_chan, \
    143					     common)
    144#define tx_to_desc(tx)		container_of(tx, struct zynqmp_dma_desc_sw, \
    145					     async_tx)
    146
    147/**
    148 * struct zynqmp_dma_desc_ll - Hw linked list descriptor
    149 * @addr: Buffer address
    150 * @size: Size of the buffer
    151 * @ctrl: Control word
    152 * @nxtdscraddr: Next descriptor base address
    153 * @rsvd: Reserved field and for Hw internal use.
    154 */
    155struct zynqmp_dma_desc_ll {
    156	u64 addr;
    157	u32 size;
    158	u32 ctrl;
    159	u64 nxtdscraddr;
    160	u64 rsvd;
    161};
    162
    163/**
    164 * struct zynqmp_dma_desc_sw - Per Transaction structure
    165 * @src: Source address for simple mode dma
    166 * @dst: Destination address for simple mode dma
    167 * @len: Transfer length for simple mode dma
    168 * @node: Node in the channel descriptor list
    169 * @tx_list: List head for the current transfer
    170 * @async_tx: Async transaction descriptor
    171 * @src_v: Virtual address of the src descriptor
    172 * @src_p: Physical address of the src descriptor
    173 * @dst_v: Virtual address of the dst descriptor
    174 * @dst_p: Physical address of the dst descriptor
    175 */
    176struct zynqmp_dma_desc_sw {
    177	u64 src;
    178	u64 dst;
    179	u32 len;
    180	struct list_head node;
    181	struct list_head tx_list;
    182	struct dma_async_tx_descriptor async_tx;
    183	struct zynqmp_dma_desc_ll *src_v;
    184	dma_addr_t src_p;
    185	struct zynqmp_dma_desc_ll *dst_v;
    186	dma_addr_t dst_p;
    187};
    188
    189/**
    190 * struct zynqmp_dma_chan - Driver specific DMA channel structure
    191 * @zdev: Driver specific device structure
    192 * @regs: Control registers offset
    193 * @lock: Descriptor operation lock
    194 * @pending_list: Descriptors waiting
    195 * @free_list: Descriptors free
    196 * @active_list: Descriptors active
    197 * @sw_desc_pool: SW descriptor pool
    198 * @done_list: Complete descriptors
    199 * @common: DMA common channel
    200 * @desc_pool_v: Statically allocated descriptor base
    201 * @desc_pool_p: Physical allocated descriptor base
    202 * @desc_free_cnt: Descriptor available count
    203 * @dev: The dma device
    204 * @irq: Channel IRQ
    205 * @is_dmacoherent: Tells whether dma operations are coherent or not
    206 * @tasklet: Cleanup work after irq
    207 * @idle : Channel status;
    208 * @desc_size: Size of the low level descriptor
    209 * @err: Channel has errors
    210 * @bus_width: Bus width
    211 * @src_burst_len: Source burst length
    212 * @dst_burst_len: Dest burst length
    213 */
    214struct zynqmp_dma_chan {
    215	struct zynqmp_dma_device *zdev;
    216	void __iomem *regs;
    217	spinlock_t lock;
    218	struct list_head pending_list;
    219	struct list_head free_list;
    220	struct list_head active_list;
    221	struct zynqmp_dma_desc_sw *sw_desc_pool;
    222	struct list_head done_list;
    223	struct dma_chan common;
    224	void *desc_pool_v;
    225	dma_addr_t desc_pool_p;
    226	u32 desc_free_cnt;
    227	struct device *dev;
    228	int irq;
    229	bool is_dmacoherent;
    230	struct tasklet_struct tasklet;
    231	bool idle;
    232	size_t desc_size;
    233	bool err;
    234	u32 bus_width;
    235	u32 src_burst_len;
    236	u32 dst_burst_len;
    237};
    238
    239/**
    240 * struct zynqmp_dma_device - DMA device structure
    241 * @dev: Device Structure
    242 * @common: DMA device structure
    243 * @chan: Driver specific DMA channel
    244 * @clk_main: Pointer to main clock
    245 * @clk_apb: Pointer to apb clock
    246 */
    247struct zynqmp_dma_device {
    248	struct device *dev;
    249	struct dma_device common;
    250	struct zynqmp_dma_chan *chan;
    251	struct clk *clk_main;
    252	struct clk *clk_apb;
    253};
    254
    255static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
    256				     u64 value)
    257{
    258	lo_hi_writeq(value, chan->regs + reg);
    259}
    260
    261/**
    262 * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
    263 * @chan: ZynqMP DMA DMA channel pointer
    264 * @desc: Transaction descriptor pointer
    265 */
    266static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
    267				      struct zynqmp_dma_desc_sw *desc)
    268{
    269	dma_addr_t addr;
    270
    271	addr = desc->src_p;
    272	zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
    273	addr = desc->dst_p;
    274	zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
    275}
    276
    277/**
    278 * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
    279 * @chan: ZynqMP DMA channel pointer
    280 * @desc: Hw descriptor pointer
    281 */
    282static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
    283				       void *desc)
    284{
    285	struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
    286
    287	hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
    288	hw++;
    289	hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
    290}
    291
    292/**
    293 * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
    294 * @chan: ZynqMP DMA channel pointer
    295 * @sdesc: Hw descriptor pointer
    296 * @src: Source buffer address
    297 * @dst: Destination buffer address
    298 * @len: Transfer length
    299 * @prev: Previous hw descriptor pointer
    300 */
    301static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
    302				   struct zynqmp_dma_desc_ll *sdesc,
    303				   dma_addr_t src, dma_addr_t dst, size_t len,
    304				   struct zynqmp_dma_desc_ll *prev)
    305{
    306	struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
    307
    308	sdesc->size = ddesc->size = len;
    309	sdesc->addr = src;
    310	ddesc->addr = dst;
    311
    312	sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
    313	if (chan->is_dmacoherent) {
    314		sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
    315		ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
    316	}
    317
    318	if (prev) {
    319		dma_addr_t addr = chan->desc_pool_p +
    320			    ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
    321		ddesc = prev + 1;
    322		prev->nxtdscraddr = addr;
    323		ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
    324	}
    325}
    326
    327/**
    328 * zynqmp_dma_init - Initialize the channel
    329 * @chan: ZynqMP DMA channel pointer
    330 */
    331static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
    332{
    333	u32 val;
    334
    335	writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
    336	val = readl(chan->regs + ZYNQMP_DMA_ISR);
    337	writel(val, chan->regs + ZYNQMP_DMA_ISR);
    338
    339	if (chan->is_dmacoherent) {
    340		val = ZYNQMP_DMA_AXCOHRNT;
    341		val = (val & ~ZYNQMP_DMA_AXCACHE) |
    342			(ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
    343		writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
    344	}
    345
    346	val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
    347	if (chan->is_dmacoherent) {
    348		val = (val & ~ZYNQMP_DMA_ARCACHE) |
    349			(ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
    350		val = (val & ~ZYNQMP_DMA_AWCACHE) |
    351			(ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
    352	}
    353	writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
    354
    355	/* Clearing the interrupt account rgisters */
    356	val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
    357	val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
    358
    359	chan->idle = true;
    360}
    361
    362/**
    363 * zynqmp_dma_tx_submit - Submit DMA transaction
    364 * @tx: Async transaction descriptor pointer
    365 *
    366 * Return: cookie value
    367 */
    368static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
    369{
    370	struct zynqmp_dma_chan *chan = to_chan(tx->chan);
    371	struct zynqmp_dma_desc_sw *desc, *new;
    372	dma_cookie_t cookie;
    373	unsigned long irqflags;
    374
    375	new = tx_to_desc(tx);
    376	spin_lock_irqsave(&chan->lock, irqflags);
    377	cookie = dma_cookie_assign(tx);
    378
    379	if (!list_empty(&chan->pending_list)) {
    380		desc = list_last_entry(&chan->pending_list,
    381				     struct zynqmp_dma_desc_sw, node);
    382		if (!list_empty(&desc->tx_list))
    383			desc = list_last_entry(&desc->tx_list,
    384					       struct zynqmp_dma_desc_sw, node);
    385		desc->src_v->nxtdscraddr = new->src_p;
    386		desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
    387		desc->dst_v->nxtdscraddr = new->dst_p;
    388		desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
    389	}
    390
    391	list_add_tail(&new->node, &chan->pending_list);
    392	spin_unlock_irqrestore(&chan->lock, irqflags);
    393
    394	return cookie;
    395}
    396
    397/**
    398 * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
    399 * @chan: ZynqMP DMA channel pointer
    400 *
    401 * Return: The sw descriptor
    402 */
    403static struct zynqmp_dma_desc_sw *
    404zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
    405{
    406	struct zynqmp_dma_desc_sw *desc;
    407	unsigned long irqflags;
    408
    409	spin_lock_irqsave(&chan->lock, irqflags);
    410	desc = list_first_entry(&chan->free_list,
    411				struct zynqmp_dma_desc_sw, node);
    412	list_del(&desc->node);
    413	spin_unlock_irqrestore(&chan->lock, irqflags);
    414
    415	INIT_LIST_HEAD(&desc->tx_list);
    416	/* Clear the src and dst descriptor memory */
    417	memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
    418	memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
    419
    420	return desc;
    421}
    422
    423/**
    424 * zynqmp_dma_free_descriptor - Issue pending transactions
    425 * @chan: ZynqMP DMA channel pointer
    426 * @sdesc: Transaction descriptor pointer
    427 */
    428static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
    429				 struct zynqmp_dma_desc_sw *sdesc)
    430{
    431	struct zynqmp_dma_desc_sw *child, *next;
    432
    433	chan->desc_free_cnt++;
    434	list_move_tail(&sdesc->node, &chan->free_list);
    435	list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
    436		chan->desc_free_cnt++;
    437		list_move_tail(&child->node, &chan->free_list);
    438	}
    439}
    440
    441/**
    442 * zynqmp_dma_free_desc_list - Free descriptors list
    443 * @chan: ZynqMP DMA channel pointer
    444 * @list: List to parse and delete the descriptor
    445 */
    446static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
    447				      struct list_head *list)
    448{
    449	struct zynqmp_dma_desc_sw *desc, *next;
    450
    451	list_for_each_entry_safe(desc, next, list, node)
    452		zynqmp_dma_free_descriptor(chan, desc);
    453}
    454
    455/**
    456 * zynqmp_dma_alloc_chan_resources - Allocate channel resources
    457 * @dchan: DMA channel
    458 *
    459 * Return: Number of descriptors on success and failure value on error
    460 */
    461static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
    462{
    463	struct zynqmp_dma_chan *chan = to_chan(dchan);
    464	struct zynqmp_dma_desc_sw *desc;
    465	int i, ret;
    466
    467	ret = pm_runtime_resume_and_get(chan->dev);
    468	if (ret < 0)
    469		return ret;
    470
    471	chan->sw_desc_pool = kcalloc(ZYNQMP_DMA_NUM_DESCS, sizeof(*desc),
    472				     GFP_KERNEL);
    473	if (!chan->sw_desc_pool)
    474		return -ENOMEM;
    475
    476	chan->idle = true;
    477	chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
    478
    479	INIT_LIST_HEAD(&chan->free_list);
    480
    481	for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
    482		desc = chan->sw_desc_pool + i;
    483		dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
    484		desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
    485		list_add_tail(&desc->node, &chan->free_list);
    486	}
    487
    488	chan->desc_pool_v = dma_alloc_coherent(chan->dev,
    489					       (2 * ZYNQMP_DMA_DESC_SIZE(chan) *
    490					       ZYNQMP_DMA_NUM_DESCS),
    491					       &chan->desc_pool_p, GFP_KERNEL);
    492	if (!chan->desc_pool_v)
    493		return -ENOMEM;
    494
    495	for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
    496		desc = chan->sw_desc_pool + i;
    497		desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
    498					(i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
    499		desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
    500		desc->src_p = chan->desc_pool_p +
    501				(i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
    502		desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
    503	}
    504
    505	return ZYNQMP_DMA_NUM_DESCS;
    506}
    507
    508/**
    509 * zynqmp_dma_start - Start DMA channel
    510 * @chan: ZynqMP DMA channel pointer
    511 */
    512static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
    513{
    514	writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
    515	writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
    516	chan->idle = false;
    517	writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
    518}
    519
    520/**
    521 * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
    522 * @chan: ZynqMP DMA channel pointer
    523 * @status: Interrupt status value
    524 */
    525static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
    526{
    527	if (status & ZYNQMP_DMA_BYTE_CNT_OVRFL)
    528		writel(0, chan->regs + ZYNQMP_DMA_TOTAL_BYTE);
    529	if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
    530		readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
    531	if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
    532		readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
    533}
    534
    535static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
    536{
    537	u32 val, burst_val;
    538
    539	val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
    540	val |= ZYNQMP_DMA_POINT_TYPE_SG;
    541	writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
    542
    543	val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
    544	burst_val = __ilog2_u32(chan->src_burst_len);
    545	val = (val & ~ZYNQMP_DMA_ARLEN) |
    546		((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
    547	burst_val = __ilog2_u32(chan->dst_burst_len);
    548	val = (val & ~ZYNQMP_DMA_AWLEN) |
    549		((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
    550	writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
    551}
    552
    553/**
    554 * zynqmp_dma_device_config - Zynqmp dma device configuration
    555 * @dchan: DMA channel
    556 * @config: DMA device config
    557 *
    558 * Return: 0 always
    559 */
    560static int zynqmp_dma_device_config(struct dma_chan *dchan,
    561				    struct dma_slave_config *config)
    562{
    563	struct zynqmp_dma_chan *chan = to_chan(dchan);
    564
    565	chan->src_burst_len = clamp(config->src_maxburst, 1U,
    566		ZYNQMP_DMA_MAX_SRC_BURST_LEN);
    567	chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
    568		ZYNQMP_DMA_MAX_DST_BURST_LEN);
    569
    570	return 0;
    571}
    572
    573/**
    574 * zynqmp_dma_start_transfer - Initiate the new transfer
    575 * @chan: ZynqMP DMA channel pointer
    576 */
    577static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
    578{
    579	struct zynqmp_dma_desc_sw *desc;
    580
    581	if (!chan->idle)
    582		return;
    583
    584	zynqmp_dma_config(chan);
    585
    586	desc = list_first_entry_or_null(&chan->pending_list,
    587					struct zynqmp_dma_desc_sw, node);
    588	if (!desc)
    589		return;
    590
    591	list_splice_tail_init(&chan->pending_list, &chan->active_list);
    592	zynqmp_dma_update_desc_to_ctrlr(chan, desc);
    593	zynqmp_dma_start(chan);
    594}
    595
    596
    597/**
    598 * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
    599 * @chan: ZynqMP DMA channel
    600 */
    601static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
    602{
    603	struct zynqmp_dma_desc_sw *desc, *next;
    604	unsigned long irqflags;
    605
    606	spin_lock_irqsave(&chan->lock, irqflags);
    607
    608	list_for_each_entry_safe(desc, next, &chan->done_list, node) {
    609		struct dmaengine_desc_callback cb;
    610
    611		dmaengine_desc_get_callback(&desc->async_tx, &cb);
    612		if (dmaengine_desc_callback_valid(&cb)) {
    613			spin_unlock_irqrestore(&chan->lock, irqflags);
    614			dmaengine_desc_callback_invoke(&cb, NULL);
    615			spin_lock_irqsave(&chan->lock, irqflags);
    616		}
    617
    618		/* Run any dependencies, then free the descriptor */
    619		zynqmp_dma_free_descriptor(chan, desc);
    620	}
    621
    622	spin_unlock_irqrestore(&chan->lock, irqflags);
    623}
    624
    625/**
    626 * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
    627 * @chan: ZynqMP DMA channel pointer
    628 */
    629static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
    630{
    631	struct zynqmp_dma_desc_sw *desc;
    632
    633	desc = list_first_entry_or_null(&chan->active_list,
    634					struct zynqmp_dma_desc_sw, node);
    635	if (!desc)
    636		return;
    637	list_del(&desc->node);
    638	dma_cookie_complete(&desc->async_tx);
    639	list_add_tail(&desc->node, &chan->done_list);
    640}
    641
    642/**
    643 * zynqmp_dma_issue_pending - Issue pending transactions
    644 * @dchan: DMA channel pointer
    645 */
    646static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
    647{
    648	struct zynqmp_dma_chan *chan = to_chan(dchan);
    649	unsigned long irqflags;
    650
    651	spin_lock_irqsave(&chan->lock, irqflags);
    652	zynqmp_dma_start_transfer(chan);
    653	spin_unlock_irqrestore(&chan->lock, irqflags);
    654}
    655
    656/**
    657 * zynqmp_dma_free_descriptors - Free channel descriptors
    658 * @chan: ZynqMP DMA channel pointer
    659 */
    660static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
    661{
    662	unsigned long irqflags;
    663
    664	spin_lock_irqsave(&chan->lock, irqflags);
    665	zynqmp_dma_free_desc_list(chan, &chan->active_list);
    666	zynqmp_dma_free_desc_list(chan, &chan->pending_list);
    667	zynqmp_dma_free_desc_list(chan, &chan->done_list);
    668	spin_unlock_irqrestore(&chan->lock, irqflags);
    669}
    670
    671/**
    672 * zynqmp_dma_free_chan_resources - Free channel resources
    673 * @dchan: DMA channel pointer
    674 */
    675static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
    676{
    677	struct zynqmp_dma_chan *chan = to_chan(dchan);
    678
    679	zynqmp_dma_free_descriptors(chan);
    680	dma_free_coherent(chan->dev,
    681		(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
    682		chan->desc_pool_v, chan->desc_pool_p);
    683	kfree(chan->sw_desc_pool);
    684	pm_runtime_mark_last_busy(chan->dev);
    685	pm_runtime_put_autosuspend(chan->dev);
    686}
    687
    688/**
    689 * zynqmp_dma_reset - Reset the channel
    690 * @chan: ZynqMP DMA channel pointer
    691 */
    692static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
    693{
    694	unsigned long irqflags;
    695
    696	writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
    697
    698	spin_lock_irqsave(&chan->lock, irqflags);
    699	zynqmp_dma_complete_descriptor(chan);
    700	spin_unlock_irqrestore(&chan->lock, irqflags);
    701	zynqmp_dma_chan_desc_cleanup(chan);
    702	zynqmp_dma_free_descriptors(chan);
    703
    704	zynqmp_dma_init(chan);
    705}
    706
    707/**
    708 * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
    709 * @irq: IRQ number
    710 * @data: Pointer to the ZynqMP DMA channel structure
    711 *
    712 * Return: IRQ_HANDLED/IRQ_NONE
    713 */
    714static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
    715{
    716	struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
    717	u32 isr, imr, status;
    718	irqreturn_t ret = IRQ_NONE;
    719
    720	isr = readl(chan->regs + ZYNQMP_DMA_ISR);
    721	imr = readl(chan->regs + ZYNQMP_DMA_IMR);
    722	status = isr & ~imr;
    723
    724	writel(isr, chan->regs + ZYNQMP_DMA_ISR);
    725	if (status & ZYNQMP_DMA_INT_DONE) {
    726		tasklet_schedule(&chan->tasklet);
    727		ret = IRQ_HANDLED;
    728	}
    729
    730	if (status & ZYNQMP_DMA_DONE)
    731		chan->idle = true;
    732
    733	if (status & ZYNQMP_DMA_INT_ERR) {
    734		chan->err = true;
    735		tasklet_schedule(&chan->tasklet);
    736		dev_err(chan->dev, "Channel %p has errors\n", chan);
    737		ret = IRQ_HANDLED;
    738	}
    739
    740	if (status & ZYNQMP_DMA_INT_OVRFL) {
    741		zynqmp_dma_handle_ovfl_int(chan, status);
    742		dev_dbg(chan->dev, "Channel %p overflow interrupt\n", chan);
    743		ret = IRQ_HANDLED;
    744	}
    745
    746	return ret;
    747}
    748
    749/**
    750 * zynqmp_dma_do_tasklet - Schedule completion tasklet
    751 * @t: Pointer to the ZynqMP DMA channel structure
    752 */
    753static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
    754{
    755	struct zynqmp_dma_chan *chan = from_tasklet(chan, t, tasklet);
    756	u32 count;
    757	unsigned long irqflags;
    758
    759	if (chan->err) {
    760		zynqmp_dma_reset(chan);
    761		chan->err = false;
    762		return;
    763	}
    764
    765	spin_lock_irqsave(&chan->lock, irqflags);
    766	count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
    767	while (count) {
    768		zynqmp_dma_complete_descriptor(chan);
    769		count--;
    770	}
    771	spin_unlock_irqrestore(&chan->lock, irqflags);
    772
    773	zynqmp_dma_chan_desc_cleanup(chan);
    774
    775	if (chan->idle) {
    776		spin_lock_irqsave(&chan->lock, irqflags);
    777		zynqmp_dma_start_transfer(chan);
    778		spin_unlock_irqrestore(&chan->lock, irqflags);
    779	}
    780}
    781
    782/**
    783 * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
    784 * @dchan: DMA channel pointer
    785 *
    786 * Return: Always '0'
    787 */
    788static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
    789{
    790	struct zynqmp_dma_chan *chan = to_chan(dchan);
    791
    792	writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
    793	zynqmp_dma_free_descriptors(chan);
    794
    795	return 0;
    796}
    797
    798/**
    799 * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
    800 * @dchan: DMA channel
    801 * @dma_dst: Destination buffer address
    802 * @dma_src: Source buffer address
    803 * @len: Transfer length
    804 * @flags: transfer ack flags
    805 *
    806 * Return: Async transaction descriptor on success and NULL on failure
    807 */
    808static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
    809				struct dma_chan *dchan, dma_addr_t dma_dst,
    810				dma_addr_t dma_src, size_t len, ulong flags)
    811{
    812	struct zynqmp_dma_chan *chan;
    813	struct zynqmp_dma_desc_sw *new, *first = NULL;
    814	void *desc = NULL, *prev = NULL;
    815	size_t copy;
    816	u32 desc_cnt;
    817	unsigned long irqflags;
    818
    819	chan = to_chan(dchan);
    820
    821	desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
    822
    823	spin_lock_irqsave(&chan->lock, irqflags);
    824	if (desc_cnt > chan->desc_free_cnt) {
    825		spin_unlock_irqrestore(&chan->lock, irqflags);
    826		dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
    827		return NULL;
    828	}
    829	chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
    830	spin_unlock_irqrestore(&chan->lock, irqflags);
    831
    832	do {
    833		/* Allocate and populate the descriptor */
    834		new = zynqmp_dma_get_descriptor(chan);
    835
    836		copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
    837		desc = (struct zynqmp_dma_desc_ll *)new->src_v;
    838		zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
    839					     dma_dst, copy, prev);
    840		prev = desc;
    841		len -= copy;
    842		dma_src += copy;
    843		dma_dst += copy;
    844		if (!first)
    845			first = new;
    846		else
    847			list_add_tail(&new->node, &first->tx_list);
    848	} while (len);
    849
    850	zynqmp_dma_desc_config_eod(chan, desc);
    851	async_tx_ack(&first->async_tx);
    852	first->async_tx.flags = flags;
    853	return &first->async_tx;
    854}
    855
    856/**
    857 * zynqmp_dma_chan_remove - Channel remove function
    858 * @chan: ZynqMP DMA channel pointer
    859 */
    860static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
    861{
    862	if (!chan)
    863		return;
    864
    865	if (chan->irq)
    866		devm_free_irq(chan->zdev->dev, chan->irq, chan);
    867	tasklet_kill(&chan->tasklet);
    868	list_del(&chan->common.device_node);
    869}
    870
    871/**
    872 * zynqmp_dma_chan_probe - Per Channel Probing
    873 * @zdev: Driver specific device structure
    874 * @pdev: Pointer to the platform_device structure
    875 *
    876 * Return: '0' on success and failure value on error
    877 */
    878static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
    879			   struct platform_device *pdev)
    880{
    881	struct zynqmp_dma_chan *chan;
    882	struct resource *res;
    883	struct device_node *node = pdev->dev.of_node;
    884	int err;
    885
    886	chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
    887	if (!chan)
    888		return -ENOMEM;
    889	chan->dev = zdev->dev;
    890	chan->zdev = zdev;
    891
    892	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    893	chan->regs = devm_ioremap_resource(&pdev->dev, res);
    894	if (IS_ERR(chan->regs))
    895		return PTR_ERR(chan->regs);
    896
    897	chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
    898	chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
    899	chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
    900	err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
    901	if (err < 0) {
    902		dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
    903		return err;
    904	}
    905
    906	if (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64 &&
    907	    chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128) {
    908		dev_err(zdev->dev, "invalid bus-width value");
    909		return -EINVAL;
    910	}
    911
    912	chan->is_dmacoherent =  of_property_read_bool(node, "dma-coherent");
    913	zdev->chan = chan;
    914	tasklet_setup(&chan->tasklet, zynqmp_dma_do_tasklet);
    915	spin_lock_init(&chan->lock);
    916	INIT_LIST_HEAD(&chan->active_list);
    917	INIT_LIST_HEAD(&chan->pending_list);
    918	INIT_LIST_HEAD(&chan->done_list);
    919	INIT_LIST_HEAD(&chan->free_list);
    920
    921	dma_cookie_init(&chan->common);
    922	chan->common.device = &zdev->common;
    923	list_add_tail(&chan->common.device_node, &zdev->common.channels);
    924
    925	zynqmp_dma_init(chan);
    926	chan->irq = platform_get_irq(pdev, 0);
    927	if (chan->irq < 0)
    928		return -ENXIO;
    929	err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
    930			       "zynqmp-dma", chan);
    931	if (err)
    932		return err;
    933
    934	chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
    935	chan->idle = true;
    936	return 0;
    937}
    938
    939/**
    940 * of_zynqmp_dma_xlate - Translation function
    941 * @dma_spec: Pointer to DMA specifier as found in the device tree
    942 * @ofdma: Pointer to DMA controller data
    943 *
    944 * Return: DMA channel pointer on success and NULL on error
    945 */
    946static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
    947					    struct of_dma *ofdma)
    948{
    949	struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
    950
    951	return dma_get_slave_channel(&zdev->chan->common);
    952}
    953
    954/**
    955 * zynqmp_dma_suspend - Suspend method for the driver
    956 * @dev:	Address of the device structure
    957 *
    958 * Put the driver into low power mode.
    959 * Return: 0 on success and failure value on error
    960 */
    961static int __maybe_unused zynqmp_dma_suspend(struct device *dev)
    962{
    963	if (!device_may_wakeup(dev))
    964		return pm_runtime_force_suspend(dev);
    965
    966	return 0;
    967}
    968
    969/**
    970 * zynqmp_dma_resume - Resume from suspend
    971 * @dev:	Address of the device structure
    972 *
    973 * Resume operation after suspend.
    974 * Return: 0 on success and failure value on error
    975 */
    976static int __maybe_unused zynqmp_dma_resume(struct device *dev)
    977{
    978	if (!device_may_wakeup(dev))
    979		return pm_runtime_force_resume(dev);
    980
    981	return 0;
    982}
    983
    984/**
    985 * zynqmp_dma_runtime_suspend - Runtime suspend method for the driver
    986 * @dev:	Address of the device structure
    987 *
    988 * Put the driver into low power mode.
    989 * Return: 0 always
    990 */
    991static int __maybe_unused zynqmp_dma_runtime_suspend(struct device *dev)
    992{
    993	struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
    994
    995	clk_disable_unprepare(zdev->clk_main);
    996	clk_disable_unprepare(zdev->clk_apb);
    997
    998	return 0;
    999}
   1000
   1001/**
   1002 * zynqmp_dma_runtime_resume - Runtime suspend method for the driver
   1003 * @dev:	Address of the device structure
   1004 *
   1005 * Put the driver into low power mode.
   1006 * Return: 0 always
   1007 */
   1008static int __maybe_unused zynqmp_dma_runtime_resume(struct device *dev)
   1009{
   1010	struct zynqmp_dma_device *zdev = dev_get_drvdata(dev);
   1011	int err;
   1012
   1013	err = clk_prepare_enable(zdev->clk_main);
   1014	if (err) {
   1015		dev_err(dev, "Unable to enable main clock.\n");
   1016		return err;
   1017	}
   1018
   1019	err = clk_prepare_enable(zdev->clk_apb);
   1020	if (err) {
   1021		dev_err(dev, "Unable to enable apb clock.\n");
   1022		clk_disable_unprepare(zdev->clk_main);
   1023		return err;
   1024	}
   1025
   1026	return 0;
   1027}
   1028
   1029static const struct dev_pm_ops zynqmp_dma_dev_pm_ops = {
   1030	SET_SYSTEM_SLEEP_PM_OPS(zynqmp_dma_suspend, zynqmp_dma_resume)
   1031	SET_RUNTIME_PM_OPS(zynqmp_dma_runtime_suspend,
   1032			   zynqmp_dma_runtime_resume, NULL)
   1033};
   1034
   1035/**
   1036 * zynqmp_dma_probe - Driver probe function
   1037 * @pdev: Pointer to the platform_device structure
   1038 *
   1039 * Return: '0' on success and failure value on error
   1040 */
   1041static int zynqmp_dma_probe(struct platform_device *pdev)
   1042{
   1043	struct zynqmp_dma_device *zdev;
   1044	struct dma_device *p;
   1045	int ret;
   1046
   1047	zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
   1048	if (!zdev)
   1049		return -ENOMEM;
   1050
   1051	zdev->dev = &pdev->dev;
   1052	INIT_LIST_HEAD(&zdev->common.channels);
   1053
   1054	dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
   1055	dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
   1056
   1057	p = &zdev->common;
   1058	p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
   1059	p->device_terminate_all = zynqmp_dma_device_terminate_all;
   1060	p->device_issue_pending = zynqmp_dma_issue_pending;
   1061	p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
   1062	p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
   1063	p->device_tx_status = dma_cookie_status;
   1064	p->device_config = zynqmp_dma_device_config;
   1065	p->dev = &pdev->dev;
   1066
   1067	zdev->clk_main = devm_clk_get(&pdev->dev, "clk_main");
   1068	if (IS_ERR(zdev->clk_main))
   1069		return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_main),
   1070				     "main clock not found.\n");
   1071
   1072	zdev->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
   1073	if (IS_ERR(zdev->clk_apb))
   1074		return dev_err_probe(&pdev->dev, PTR_ERR(zdev->clk_apb),
   1075				     "apb clock not found.\n");
   1076
   1077	platform_set_drvdata(pdev, zdev);
   1078	pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT);
   1079	pm_runtime_use_autosuspend(zdev->dev);
   1080	pm_runtime_enable(zdev->dev);
   1081	ret = pm_runtime_resume_and_get(zdev->dev);
   1082	if (ret < 0) {
   1083		dev_err(&pdev->dev, "device wakeup failed.\n");
   1084		pm_runtime_disable(zdev->dev);
   1085	}
   1086	if (!pm_runtime_enabled(zdev->dev)) {
   1087		ret = zynqmp_dma_runtime_resume(zdev->dev);
   1088		if (ret)
   1089			return ret;
   1090	}
   1091
   1092	ret = zynqmp_dma_chan_probe(zdev, pdev);
   1093	if (ret) {
   1094		dev_err_probe(&pdev->dev, ret, "Probing channel failed\n");
   1095		goto err_disable_pm;
   1096	}
   1097
   1098	p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
   1099	p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
   1100
   1101	ret = dma_async_device_register(&zdev->common);
   1102	if (ret) {
   1103		dev_err(zdev->dev, "failed to register the dma device\n");
   1104		goto free_chan_resources;
   1105	}
   1106
   1107	ret = of_dma_controller_register(pdev->dev.of_node,
   1108					 of_zynqmp_dma_xlate, zdev);
   1109	if (ret) {
   1110		dev_err_probe(&pdev->dev, ret, "Unable to register DMA to DT\n");
   1111		dma_async_device_unregister(&zdev->common);
   1112		goto free_chan_resources;
   1113	}
   1114
   1115	pm_runtime_mark_last_busy(zdev->dev);
   1116	pm_runtime_put_sync_autosuspend(zdev->dev);
   1117
   1118	return 0;
   1119
   1120free_chan_resources:
   1121	zynqmp_dma_chan_remove(zdev->chan);
   1122err_disable_pm:
   1123	if (!pm_runtime_enabled(zdev->dev))
   1124		zynqmp_dma_runtime_suspend(zdev->dev);
   1125	pm_runtime_disable(zdev->dev);
   1126	return ret;
   1127}
   1128
   1129/**
   1130 * zynqmp_dma_remove - Driver remove function
   1131 * @pdev: Pointer to the platform_device structure
   1132 *
   1133 * Return: Always '0'
   1134 */
   1135static int zynqmp_dma_remove(struct platform_device *pdev)
   1136{
   1137	struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
   1138
   1139	of_dma_controller_free(pdev->dev.of_node);
   1140	dma_async_device_unregister(&zdev->common);
   1141
   1142	zynqmp_dma_chan_remove(zdev->chan);
   1143	pm_runtime_disable(zdev->dev);
   1144	if (!pm_runtime_enabled(zdev->dev))
   1145		zynqmp_dma_runtime_suspend(zdev->dev);
   1146
   1147	return 0;
   1148}
   1149
   1150static const struct of_device_id zynqmp_dma_of_match[] = {
   1151	{ .compatible = "xlnx,zynqmp-dma-1.0", },
   1152	{}
   1153};
   1154MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
   1155
   1156static struct platform_driver zynqmp_dma_driver = {
   1157	.driver = {
   1158		.name = "xilinx-zynqmp-dma",
   1159		.of_match_table = zynqmp_dma_of_match,
   1160		.pm = &zynqmp_dma_dev_pm_ops,
   1161	},
   1162	.probe = zynqmp_dma_probe,
   1163	.remove = zynqmp_dma_remove,
   1164};
   1165
   1166module_platform_driver(zynqmp_dma_driver);
   1167
   1168MODULE_LICENSE("GPL");
   1169MODULE_AUTHOR("Xilinx, Inc.");
   1170MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");