cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

altera-msgdma.c (26166B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * DMA driver for Altera mSGDMA IP core
      4 *
      5 * Copyright (C) 2017 Stefan Roese <sr@denx.de>
      6 *
      7 * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
      8 * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
      9 */
     10
     11#include <linux/bitops.h>
     12#include <linux/delay.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/dmapool.h>
     15#include <linux/init.h>
     16#include <linux/interrupt.h>
     17#include <linux/io.h>
     18#include <linux/iopoll.h>
     19#include <linux/module.h>
     20#include <linux/platform_device.h>
     21#include <linux/slab.h>
     22#include <linux/of_dma.h>
     23
     24#include "dmaengine.h"
     25
     26#define MSGDMA_MAX_TRANS_LEN		U32_MAX
     27#define MSGDMA_DESC_NUM			1024
     28
     29/**
     30 * struct msgdma_extended_desc - implements an extended descriptor
     31 * @read_addr_lo: data buffer source address low bits
     32 * @write_addr_lo: data buffer destination address low bits
     33 * @len: the number of bytes to transfer per descriptor
     34 * @burst_seq_num: bit 31:24 write burst
     35 *		   bit 23:16 read burst
     36 *		   bit 15:00 sequence number
     37 * @stride: bit 31:16 write stride
     38 *	    bit 15:00 read stride
     39 * @read_addr_hi: data buffer source address high bits
     40 * @write_addr_hi: data buffer destination address high bits
     41 * @control: characteristics of the transfer
     42 */
     43struct msgdma_extended_desc {
     44	u32 read_addr_lo;
     45	u32 write_addr_lo;
     46	u32 len;
     47	u32 burst_seq_num;
     48	u32 stride;
     49	u32 read_addr_hi;
     50	u32 write_addr_hi;
     51	u32 control;
     52};
     53
     54/* mSGDMA descriptor control field bit definitions */
     55#define MSGDMA_DESC_CTL_SET_CH(x)	((x) & 0xff)
     56#define MSGDMA_DESC_CTL_GEN_SOP		BIT(8)
     57#define MSGDMA_DESC_CTL_GEN_EOP		BIT(9)
     58#define MSGDMA_DESC_CTL_PARK_READS	BIT(10)
     59#define MSGDMA_DESC_CTL_PARK_WRITES	BIT(11)
     60#define MSGDMA_DESC_CTL_END_ON_EOP	BIT(12)
     61#define MSGDMA_DESC_CTL_END_ON_LEN	BIT(13)
     62#define MSGDMA_DESC_CTL_TR_COMP_IRQ	BIT(14)
     63#define MSGDMA_DESC_CTL_EARLY_IRQ	BIT(15)
     64#define MSGDMA_DESC_CTL_TR_ERR_IRQ	GENMASK(23, 16)
     65#define MSGDMA_DESC_CTL_EARLY_DONE	BIT(24)
     66
     67/*
     68 * Writing "1" the "go" bit commits the entire descriptor into the
     69 * descriptor FIFO(s)
     70 */
     71#define MSGDMA_DESC_CTL_GO		BIT(31)
     72
     73/* Tx buffer control flags */
     74#define MSGDMA_DESC_CTL_TX_FIRST	(MSGDMA_DESC_CTL_GEN_SOP |	\
     75					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
     76					 MSGDMA_DESC_CTL_GO)
     77
     78#define MSGDMA_DESC_CTL_TX_MIDDLE	(MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
     79					 MSGDMA_DESC_CTL_GO)
     80
     81#define MSGDMA_DESC_CTL_TX_LAST		(MSGDMA_DESC_CTL_GEN_EOP |	\
     82					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
     83					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
     84					 MSGDMA_DESC_CTL_GO)
     85
     86#define MSGDMA_DESC_CTL_TX_SINGLE	(MSGDMA_DESC_CTL_GEN_SOP |	\
     87					 MSGDMA_DESC_CTL_GEN_EOP |	\
     88					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
     89					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
     90					 MSGDMA_DESC_CTL_GO)
     91
     92#define MSGDMA_DESC_CTL_RX_SINGLE	(MSGDMA_DESC_CTL_END_ON_EOP |	\
     93					 MSGDMA_DESC_CTL_END_ON_LEN |	\
     94					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
     95					 MSGDMA_DESC_CTL_EARLY_IRQ |	\
     96					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
     97					 MSGDMA_DESC_CTL_GO)
     98
     99/* mSGDMA extended descriptor stride definitions */
    100#define MSGDMA_DESC_STRIDE_RD		0x00000001
    101#define MSGDMA_DESC_STRIDE_WR		0x00010000
    102#define MSGDMA_DESC_STRIDE_RW		0x00010001
    103
    104/* mSGDMA dispatcher control and status register map */
    105#define MSGDMA_CSR_STATUS		0x00	/* Read / Clear */
    106#define MSGDMA_CSR_CONTROL		0x04	/* Read / Write */
    107#define MSGDMA_CSR_RW_FILL_LEVEL	0x08	/* 31:16 - write fill level */
    108						/* 15:00 - read fill level */
    109#define MSGDMA_CSR_RESP_FILL_LEVEL	0x0c	/* response FIFO fill level */
    110#define MSGDMA_CSR_RW_SEQ_NUM		0x10	/* 31:16 - write seq number */
    111						/* 15:00 - read seq number */
    112
    113/* mSGDMA CSR status register bit definitions */
    114#define MSGDMA_CSR_STAT_BUSY			BIT(0)
    115#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY		BIT(1)
    116#define MSGDMA_CSR_STAT_DESC_BUF_FULL		BIT(2)
    117#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY		BIT(3)
    118#define MSGDMA_CSR_STAT_RESP_BUF_FULL		BIT(4)
    119#define MSGDMA_CSR_STAT_STOPPED			BIT(5)
    120#define MSGDMA_CSR_STAT_RESETTING		BIT(6)
    121#define MSGDMA_CSR_STAT_STOPPED_ON_ERR		BIT(7)
    122#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY	BIT(8)
    123#define MSGDMA_CSR_STAT_IRQ			BIT(9)
    124#define MSGDMA_CSR_STAT_MASK			GENMASK(9, 0)
    125#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ	GENMASK(8, 0)
    126
    127#define DESC_EMPTY	(MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
    128			 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
    129
    130/* mSGDMA CSR control register bit definitions */
    131#define MSGDMA_CSR_CTL_STOP			BIT(0)
    132#define MSGDMA_CSR_CTL_RESET			BIT(1)
    133#define MSGDMA_CSR_CTL_STOP_ON_ERR		BIT(2)
    134#define MSGDMA_CSR_CTL_STOP_ON_EARLY		BIT(3)
    135#define MSGDMA_CSR_CTL_GLOBAL_INTR		BIT(4)
    136#define MSGDMA_CSR_CTL_STOP_DESCS		BIT(5)
    137
    138/* mSGDMA CSR fill level bits */
    139#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)		(((v) & 0xffff0000) >> 16)
    140#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)		((v) & 0x0000ffff)
    141#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)	((v) & 0x0000ffff)
    142
    143#define MSGDMA_CSR_SEQ_NUM_GET(v)		(((v) & 0xffff0000) >> 16)
    144
    145/* mSGDMA response register map */
    146#define MSGDMA_RESP_BYTES_TRANSFERRED	0x00
    147#define MSGDMA_RESP_STATUS		0x04
    148
    149/* mSGDMA response register bit definitions */
    150#define MSGDMA_RESP_EARLY_TERM	BIT(8)
    151#define MSGDMA_RESP_ERR_MASK	0xff
    152
    153/**
    154 * struct msgdma_sw_desc - implements a sw descriptor
    155 * @async_tx: support for the async_tx api
    156 * @hw_desc: assosiated HW descriptor
    157 * @node: node to move from the free list to the tx list
    158 * @tx_list: transmit list node
    159 */
    160struct msgdma_sw_desc {
    161	struct dma_async_tx_descriptor async_tx;
    162	struct msgdma_extended_desc hw_desc;
    163	struct list_head node;
    164	struct list_head tx_list;
    165};
    166
    167/*
    168 * struct msgdma_device - DMA device structure
    169 */
    170struct msgdma_device {
    171	spinlock_t lock;
    172	struct device *dev;
    173	struct tasklet_struct irq_tasklet;
    174	struct list_head pending_list;
    175	struct list_head free_list;
    176	struct list_head active_list;
    177	struct list_head done_list;
    178	u32 desc_free_cnt;
    179	bool idle;
    180
    181	struct dma_device dmadev;
    182	struct dma_chan	dmachan;
    183	dma_addr_t hw_desq;
    184	struct msgdma_sw_desc *sw_desq;
    185	unsigned int npendings;
    186
    187	struct dma_slave_config slave_cfg;
    188
    189	int irq;
    190
    191	/* mSGDMA controller */
    192	void __iomem *csr;
    193
    194	/* mSGDMA descriptors */
    195	void __iomem *desc;
    196
    197	/* mSGDMA response */
    198	void __iomem *resp;
    199};
    200
    201#define to_mdev(chan)	container_of(chan, struct msgdma_device, dmachan)
    202#define tx_to_desc(tx)	container_of(tx, struct msgdma_sw_desc, async_tx)
    203
    204/**
    205 * msgdma_get_descriptor - Get the sw descriptor from the pool
    206 * @mdev: Pointer to the Altera mSGDMA device structure
    207 *
    208 * Return: The sw descriptor
    209 */
    210static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
    211{
    212	struct msgdma_sw_desc *desc;
    213	unsigned long flags;
    214
    215	spin_lock_irqsave(&mdev->lock, flags);
    216	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
    217	list_del(&desc->node);
    218	spin_unlock_irqrestore(&mdev->lock, flags);
    219
    220	INIT_LIST_HEAD(&desc->tx_list);
    221
    222	return desc;
    223}
    224
    225/**
    226 * msgdma_free_descriptor - Issue pending transactions
    227 * @mdev: Pointer to the Altera mSGDMA device structure
    228 * @desc: Transaction descriptor pointer
    229 */
    230static void msgdma_free_descriptor(struct msgdma_device *mdev,
    231				   struct msgdma_sw_desc *desc)
    232{
    233	struct msgdma_sw_desc *child, *next;
    234
    235	mdev->desc_free_cnt++;
    236	list_add_tail(&desc->node, &mdev->free_list);
    237	list_for_each_entry_safe(child, next, &desc->tx_list, node) {
    238		mdev->desc_free_cnt++;
    239		list_move_tail(&child->node, &mdev->free_list);
    240	}
    241}
    242
    243/**
    244 * msgdma_free_desc_list - Free descriptors list
    245 * @mdev: Pointer to the Altera mSGDMA device structure
    246 * @list: List to parse and delete the descriptor
    247 */
    248static void msgdma_free_desc_list(struct msgdma_device *mdev,
    249				  struct list_head *list)
    250{
    251	struct msgdma_sw_desc *desc, *next;
    252
    253	list_for_each_entry_safe(desc, next, list, node)
    254		msgdma_free_descriptor(mdev, desc);
    255}
    256
    257/**
    258 * msgdma_desc_config - Configure the descriptor
    259 * @desc: Hw descriptor pointer
    260 * @dst: Destination buffer address
    261 * @src: Source buffer address
    262 * @len: Transfer length
    263 * @stride: Read/write stride value to set
    264 */
    265static void msgdma_desc_config(struct msgdma_extended_desc *desc,
    266			       dma_addr_t dst, dma_addr_t src, size_t len,
    267			       u32 stride)
    268{
    269	/* Set lower 32bits of src & dst addresses in the descriptor */
    270	desc->read_addr_lo = lower_32_bits(src);
    271	desc->write_addr_lo = lower_32_bits(dst);
    272
    273	/* Set upper 32bits of src & dst addresses in the descriptor */
    274	desc->read_addr_hi = upper_32_bits(src);
    275	desc->write_addr_hi = upper_32_bits(dst);
    276
    277	desc->len = len;
    278	desc->stride = stride;
    279	desc->burst_seq_num = 0;	/* 0 will result in max burst length */
    280
    281	/*
    282	 * Don't set interrupt on xfer end yet, this will be done later
    283	 * for the "last" descriptor
    284	 */
    285	desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
    286		MSGDMA_DESC_CTL_END_ON_LEN;
    287}
    288
    289/**
    290 * msgdma_desc_config_eod - Mark the descriptor as end descriptor
    291 * @desc: Hw descriptor pointer
    292 */
    293static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
    294{
    295	desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
    296}
    297
    298/**
    299 * msgdma_tx_submit - Submit DMA transaction
    300 * @tx: Async transaction descriptor pointer
    301 *
    302 * Return: cookie value
    303 */
    304static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
    305{
    306	struct msgdma_device *mdev = to_mdev(tx->chan);
    307	struct msgdma_sw_desc *new;
    308	dma_cookie_t cookie;
    309	unsigned long flags;
    310
    311	new = tx_to_desc(tx);
    312	spin_lock_irqsave(&mdev->lock, flags);
    313	cookie = dma_cookie_assign(tx);
    314
    315	list_add_tail(&new->node, &mdev->pending_list);
    316	spin_unlock_irqrestore(&mdev->lock, flags);
    317
    318	return cookie;
    319}
    320
    321/**
    322 * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
    323 * @dchan: DMA channel
    324 * @dma_dst: Destination buffer address
    325 * @dma_src: Source buffer address
    326 * @len: Transfer length
    327 * @flags: transfer ack flags
    328 *
    329 * Return: Async transaction descriptor on success and NULL on failure
    330 */
    331static struct dma_async_tx_descriptor *
    332msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
    333		   dma_addr_t dma_src, size_t len, ulong flags)
    334{
    335	struct msgdma_device *mdev = to_mdev(dchan);
    336	struct msgdma_sw_desc *new, *first = NULL;
    337	struct msgdma_extended_desc *desc;
    338	size_t copy;
    339	u32 desc_cnt;
    340	unsigned long irqflags;
    341
    342	desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
    343
    344	spin_lock_irqsave(&mdev->lock, irqflags);
    345	if (desc_cnt > mdev->desc_free_cnt) {
    346		spin_unlock_irqrestore(&mdev->lock, irqflags);
    347		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
    348		return NULL;
    349	}
    350	mdev->desc_free_cnt -= desc_cnt;
    351	spin_unlock_irqrestore(&mdev->lock, irqflags);
    352
    353	do {
    354		/* Allocate and populate the descriptor */
    355		new = msgdma_get_descriptor(mdev);
    356
    357		copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
    358		desc = &new->hw_desc;
    359		msgdma_desc_config(desc, dma_dst, dma_src, copy,
    360				   MSGDMA_DESC_STRIDE_RW);
    361		len -= copy;
    362		dma_src += copy;
    363		dma_dst += copy;
    364		if (!first)
    365			first = new;
    366		else
    367			list_add_tail(&new->node, &first->tx_list);
    368	} while (len);
    369
    370	msgdma_desc_config_eod(desc);
    371	async_tx_ack(&first->async_tx);
    372	first->async_tx.flags = flags;
    373
    374	return &first->async_tx;
    375}
    376
    377/**
    378 * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
    379 *
    380 * @dchan: DMA channel
    381 * @sgl: Destination scatter list
    382 * @sg_len: Number of entries in destination scatter list
    383 * @dir: DMA transfer direction
    384 * @flags: transfer ack flags
    385 * @context: transfer context (unused)
    386 */
    387static struct dma_async_tx_descriptor *
    388msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
    389		     unsigned int sg_len, enum dma_transfer_direction dir,
    390		     unsigned long flags, void *context)
    391
    392{
    393	struct msgdma_device *mdev = to_mdev(dchan);
    394	struct dma_slave_config *cfg = &mdev->slave_cfg;
    395	struct msgdma_sw_desc *new, *first = NULL;
    396	void *desc = NULL;
    397	size_t len, avail;
    398	dma_addr_t dma_dst, dma_src;
    399	u32 desc_cnt = 0, i;
    400	struct scatterlist *sg;
    401	u32 stride;
    402	unsigned long irqflags;
    403
    404	for_each_sg(sgl, sg, sg_len, i)
    405		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
    406
    407	spin_lock_irqsave(&mdev->lock, irqflags);
    408	if (desc_cnt > mdev->desc_free_cnt) {
    409		spin_unlock_irqrestore(&mdev->lock, irqflags);
    410		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
    411		return NULL;
    412	}
    413	mdev->desc_free_cnt -= desc_cnt;
    414	spin_unlock_irqrestore(&mdev->lock, irqflags);
    415
    416	avail = sg_dma_len(sgl);
    417
    418	/* Run until we are out of scatterlist entries */
    419	while (true) {
    420		/* Allocate and populate the descriptor */
    421		new = msgdma_get_descriptor(mdev);
    422
    423		desc = &new->hw_desc;
    424		len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
    425
    426		if (dir == DMA_MEM_TO_DEV) {
    427			dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
    428			dma_dst = cfg->dst_addr;
    429			stride = MSGDMA_DESC_STRIDE_RD;
    430		} else {
    431			dma_src = cfg->src_addr;
    432			dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
    433			stride = MSGDMA_DESC_STRIDE_WR;
    434		}
    435		msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
    436		avail -= len;
    437
    438		if (!first)
    439			first = new;
    440		else
    441			list_add_tail(&new->node, &first->tx_list);
    442
    443		/* Fetch the next scatterlist entry */
    444		if (avail == 0) {
    445			if (sg_len == 0)
    446				break;
    447			sgl = sg_next(sgl);
    448			if (sgl == NULL)
    449				break;
    450			sg_len--;
    451			avail = sg_dma_len(sgl);
    452		}
    453	}
    454
    455	msgdma_desc_config_eod(desc);
    456	first->async_tx.flags = flags;
    457
    458	return &first->async_tx;
    459}
    460
    461static int msgdma_dma_config(struct dma_chan *dchan,
    462			     struct dma_slave_config *config)
    463{
    464	struct msgdma_device *mdev = to_mdev(dchan);
    465
    466	memcpy(&mdev->slave_cfg, config, sizeof(*config));
    467
    468	return 0;
    469}
    470
    471static void msgdma_reset(struct msgdma_device *mdev)
    472{
    473	u32 val;
    474	int ret;
    475
    476	/* Reset mSGDMA */
    477	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
    478	iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
    479
    480	ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
    481				 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
    482				 1, 10000);
    483	if (ret)
    484		dev_err(mdev->dev, "DMA channel did not reset\n");
    485
    486	/* Clear all status bits */
    487	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
    488
    489	/* Enable the DMA controller including interrupts */
    490	iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
    491		  MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
    492
    493	mdev->idle = true;
    494};
    495
    496static void msgdma_copy_one(struct msgdma_device *mdev,
    497			    struct msgdma_sw_desc *desc)
    498{
    499	void __iomem *hw_desc = mdev->desc;
    500
    501	/*
    502	 * Check if the DESC FIFO it not full. If its full, we need to wait
    503	 * for at least one entry to become free again
    504	 */
    505	while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
    506	       MSGDMA_CSR_STAT_DESC_BUF_FULL)
    507		mdelay(1);
    508
    509	/*
    510	 * The descriptor needs to get copied into the descriptor FIFO
    511	 * of the DMA controller. The descriptor will get flushed to the
    512	 * FIFO, once the last word (control word) is written. Since we
    513	 * are not 100% sure that memcpy() writes all word in the "correct"
    514	 * oder (address from low to high) on all architectures, we make
    515	 * sure this control word is written last by single coding it and
    516	 * adding some write-barriers here.
    517	 */
    518	memcpy((void __force *)hw_desc, &desc->hw_desc,
    519	       sizeof(desc->hw_desc) - sizeof(u32));
    520
    521	/* Write control word last to flush this descriptor into the FIFO */
    522	mdev->idle = false;
    523	wmb();
    524	iowrite32(desc->hw_desc.control, hw_desc +
    525		  offsetof(struct msgdma_extended_desc, control));
    526	wmb();
    527}
    528
    529/**
    530 * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
    531 * @mdev: Pointer to the Altera mSGDMA device structure
    532 * @desc: Transaction descriptor pointer
    533 */
    534static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
    535				     struct msgdma_sw_desc *desc)
    536{
    537	struct msgdma_sw_desc *sdesc, *next;
    538
    539	msgdma_copy_one(mdev, desc);
    540
    541	list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
    542		msgdma_copy_one(mdev, sdesc);
    543}
    544
    545/**
    546 * msgdma_start_transfer - Initiate the new transfer
    547 * @mdev: Pointer to the Altera mSGDMA device structure
    548 */
    549static void msgdma_start_transfer(struct msgdma_device *mdev)
    550{
    551	struct msgdma_sw_desc *desc;
    552
    553	if (!mdev->idle)
    554		return;
    555
    556	desc = list_first_entry_or_null(&mdev->pending_list,
    557					struct msgdma_sw_desc, node);
    558	if (!desc)
    559		return;
    560
    561	list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
    562	msgdma_copy_desc_to_fifo(mdev, desc);
    563}
    564
    565/**
    566 * msgdma_issue_pending - Issue pending transactions
    567 * @chan: DMA channel pointer
    568 */
    569static void msgdma_issue_pending(struct dma_chan *chan)
    570{
    571	struct msgdma_device *mdev = to_mdev(chan);
    572	unsigned long flags;
    573
    574	spin_lock_irqsave(&mdev->lock, flags);
    575	msgdma_start_transfer(mdev);
    576	spin_unlock_irqrestore(&mdev->lock, flags);
    577}
    578
    579/**
    580 * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
    581 * @mdev: Pointer to the Altera mSGDMA device structure
    582 */
    583static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
    584{
    585	struct msgdma_sw_desc *desc, *next;
    586
    587	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
    588		struct dmaengine_desc_callback cb;
    589
    590		list_del(&desc->node);
    591
    592		dmaengine_desc_get_callback(&desc->async_tx, &cb);
    593		if (dmaengine_desc_callback_valid(&cb)) {
    594			spin_unlock(&mdev->lock);
    595			dmaengine_desc_callback_invoke(&cb, NULL);
    596			spin_lock(&mdev->lock);
    597		}
    598
    599		/* Run any dependencies, then free the descriptor */
    600		msgdma_free_descriptor(mdev, desc);
    601	}
    602}
    603
    604/**
    605 * msgdma_complete_descriptor - Mark the active descriptor as complete
    606 * @mdev: Pointer to the Altera mSGDMA device structure
    607 */
    608static void msgdma_complete_descriptor(struct msgdma_device *mdev)
    609{
    610	struct msgdma_sw_desc *desc;
    611
    612	desc = list_first_entry_or_null(&mdev->active_list,
    613					struct msgdma_sw_desc, node);
    614	if (!desc)
    615		return;
    616	list_del(&desc->node);
    617	dma_cookie_complete(&desc->async_tx);
    618	list_add_tail(&desc->node, &mdev->done_list);
    619}
    620
    621/**
    622 * msgdma_free_descriptors - Free channel descriptors
    623 * @mdev: Pointer to the Altera mSGDMA device structure
    624 */
    625static void msgdma_free_descriptors(struct msgdma_device *mdev)
    626{
    627	msgdma_free_desc_list(mdev, &mdev->active_list);
    628	msgdma_free_desc_list(mdev, &mdev->pending_list);
    629	msgdma_free_desc_list(mdev, &mdev->done_list);
    630}
    631
    632/**
    633 * msgdma_free_chan_resources - Free channel resources
    634 * @dchan: DMA channel pointer
    635 */
    636static void msgdma_free_chan_resources(struct dma_chan *dchan)
    637{
    638	struct msgdma_device *mdev = to_mdev(dchan);
    639	unsigned long flags;
    640
    641	spin_lock_irqsave(&mdev->lock, flags);
    642	msgdma_free_descriptors(mdev);
    643	spin_unlock_irqrestore(&mdev->lock, flags);
    644	kfree(mdev->sw_desq);
    645}
    646
    647/**
    648 * msgdma_alloc_chan_resources - Allocate channel resources
    649 * @dchan: DMA channel
    650 *
    651 * Return: Number of descriptors on success and failure value on error
    652 */
    653static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
    654{
    655	struct msgdma_device *mdev = to_mdev(dchan);
    656	struct msgdma_sw_desc *desc;
    657	int i;
    658
    659	mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
    660	if (!mdev->sw_desq)
    661		return -ENOMEM;
    662
    663	mdev->idle = true;
    664	mdev->desc_free_cnt = MSGDMA_DESC_NUM;
    665
    666	INIT_LIST_HEAD(&mdev->free_list);
    667
    668	for (i = 0; i < MSGDMA_DESC_NUM; i++) {
    669		desc = mdev->sw_desq + i;
    670		dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
    671		desc->async_tx.tx_submit = msgdma_tx_submit;
    672		list_add_tail(&desc->node, &mdev->free_list);
    673	}
    674
    675	return MSGDMA_DESC_NUM;
    676}
    677
    678/**
    679 * msgdma_tasklet - Schedule completion tasklet
    680 * @t: Pointer to the Altera sSGDMA channel structure
    681 */
    682static void msgdma_tasklet(struct tasklet_struct *t)
    683{
    684	struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
    685	u32 count;
    686	u32 __maybe_unused size;
    687	u32 __maybe_unused status;
    688	unsigned long flags;
    689
    690	spin_lock_irqsave(&mdev->lock, flags);
    691
    692	if (mdev->resp) {
    693		/* Read number of responses that are available */
    694		count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
    695		dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
    696			__func__, __LINE__, count);
    697	} else {
    698		count = 1;
    699	}
    700
    701	while (count--) {
    702		/*
    703		 * Read both longwords to purge this response from the FIFO
    704		 * On Avalon-MM implementations, size and status do not
    705		 * have any real values, like transferred bytes or error
    706		 * bits. So we need to just drop these values.
    707		 */
    708		if (mdev->resp) {
    709			size = ioread32(mdev->resp +
    710					MSGDMA_RESP_BYTES_TRANSFERRED);
    711			status = ioread32(mdev->resp +
    712					MSGDMA_RESP_STATUS);
    713		}
    714
    715		msgdma_complete_descriptor(mdev);
    716		msgdma_chan_desc_cleanup(mdev);
    717	}
    718
    719	spin_unlock_irqrestore(&mdev->lock, flags);
    720}
    721
    722/**
    723 * msgdma_irq_handler - Altera mSGDMA Interrupt handler
    724 * @irq: IRQ number
    725 * @data: Pointer to the Altera mSGDMA device structure
    726 *
    727 * Return: IRQ_HANDLED/IRQ_NONE
    728 */
    729static irqreturn_t msgdma_irq_handler(int irq, void *data)
    730{
    731	struct msgdma_device *mdev = data;
    732	u32 status;
    733
    734	status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
    735	if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
    736		/* Start next transfer if the DMA controller is idle */
    737		spin_lock(&mdev->lock);
    738		mdev->idle = true;
    739		msgdma_start_transfer(mdev);
    740		spin_unlock(&mdev->lock);
    741	}
    742
    743	tasklet_schedule(&mdev->irq_tasklet);
    744
    745	/* Clear interrupt in mSGDMA controller */
    746	iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
    747
    748	return IRQ_HANDLED;
    749}
    750
    751/**
    752 * msgdma_chan_remove - Channel remove function
    753 * @mdev: Pointer to the Altera mSGDMA device structure
    754 */
    755static void msgdma_dev_remove(struct msgdma_device *mdev)
    756{
    757	if (!mdev)
    758		return;
    759
    760	devm_free_irq(mdev->dev, mdev->irq, mdev);
    761	tasklet_kill(&mdev->irq_tasklet);
    762	list_del(&mdev->dmachan.device_node);
    763}
    764
    765static int request_and_map(struct platform_device *pdev, const char *name,
    766			   struct resource **res, void __iomem **ptr,
    767			   bool optional)
    768{
    769	struct resource *region;
    770	struct device *device = &pdev->dev;
    771
    772	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
    773	if (*res == NULL) {
    774		if (optional) {
    775			*ptr = NULL;
    776			dev_info(device, "optional resource %s not defined\n",
    777				 name);
    778			return 0;
    779		}
    780		dev_err(device, "mandatory resource %s not defined\n", name);
    781		return -ENODEV;
    782	}
    783
    784	region = devm_request_mem_region(device, (*res)->start,
    785					 resource_size(*res), dev_name(device));
    786	if (region == NULL) {
    787		dev_err(device, "unable to request %s\n", name);
    788		return -EBUSY;
    789	}
    790
    791	*ptr = devm_ioremap(device, region->start,
    792				    resource_size(region));
    793	if (*ptr == NULL) {
    794		dev_err(device, "ioremap of %s failed!", name);
    795		return -ENOMEM;
    796	}
    797
    798	return 0;
    799}
    800
    801/**
    802 * msgdma_probe - Driver probe function
    803 * @pdev: Pointer to the platform_device structure
    804 *
    805 * Return: '0' on success and failure value on error
    806 */
    807static int msgdma_probe(struct platform_device *pdev)
    808{
    809	struct msgdma_device *mdev;
    810	struct dma_device *dma_dev;
    811	struct resource *dma_res;
    812	int ret;
    813
    814	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
    815	if (!mdev)
    816		return -ENOMEM;
    817
    818	mdev->dev = &pdev->dev;
    819
    820	/* Map CSR space */
    821	ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
    822	if (ret)
    823		return ret;
    824
    825	/* Map (extended) descriptor space */
    826	ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
    827	if (ret)
    828		return ret;
    829
    830	/* Map response space */
    831	ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
    832	if (ret)
    833		return ret;
    834
    835	platform_set_drvdata(pdev, mdev);
    836
    837	/* Get interrupt nr from platform data */
    838	mdev->irq = platform_get_irq(pdev, 0);
    839	if (mdev->irq < 0)
    840		return -ENXIO;
    841
    842	ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
    843			       0, dev_name(&pdev->dev), mdev);
    844	if (ret)
    845		return ret;
    846
    847	tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
    848
    849	dma_cookie_init(&mdev->dmachan);
    850
    851	spin_lock_init(&mdev->lock);
    852
    853	INIT_LIST_HEAD(&mdev->active_list);
    854	INIT_LIST_HEAD(&mdev->pending_list);
    855	INIT_LIST_HEAD(&mdev->done_list);
    856	INIT_LIST_HEAD(&mdev->free_list);
    857
    858	dma_dev = &mdev->dmadev;
    859
    860	/* Set DMA capabilities */
    861	dma_cap_zero(dma_dev->cap_mask);
    862	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
    863	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
    864
    865	dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    866	dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    867	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
    868		BIT(DMA_MEM_TO_MEM);
    869	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
    870
    871	/* Init DMA link list */
    872	INIT_LIST_HEAD(&dma_dev->channels);
    873
    874	/* Set base routines */
    875	dma_dev->device_tx_status = dma_cookie_status;
    876	dma_dev->device_issue_pending = msgdma_issue_pending;
    877	dma_dev->dev = &pdev->dev;
    878
    879	dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
    880	dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
    881	dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
    882	dma_dev->device_config = msgdma_dma_config;
    883
    884	dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
    885	dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
    886
    887	mdev->dmachan.device = dma_dev;
    888	list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
    889
    890	/* Set DMA mask to 64 bits */
    891	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    892	if (ret) {
    893		dev_warn(&pdev->dev, "unable to set coherent mask to 64");
    894		goto fail;
    895	}
    896
    897	msgdma_reset(mdev);
    898
    899	ret = dma_async_device_register(dma_dev);
    900	if (ret)
    901		goto fail;
    902
    903	ret = of_dma_controller_register(pdev->dev.of_node,
    904					 of_dma_xlate_by_chan_id, dma_dev);
    905	if (ret == -EINVAL)
    906		dev_warn(&pdev->dev, "device was not probed from DT");
    907	else if (ret && ret != -ENODEV)
    908		goto fail;
    909
    910	dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
    911
    912	return 0;
    913
    914fail:
    915	msgdma_dev_remove(mdev);
    916
    917	return ret;
    918}
    919
    920/**
    921 * msgdma_dma_remove - Driver remove function
    922 * @pdev: Pointer to the platform_device structure
    923 *
    924 * Return: Always '0'
    925 */
    926static int msgdma_remove(struct platform_device *pdev)
    927{
    928	struct msgdma_device *mdev = platform_get_drvdata(pdev);
    929
    930	if (pdev->dev.of_node)
    931		of_dma_controller_free(pdev->dev.of_node);
    932	dma_async_device_unregister(&mdev->dmadev);
    933	msgdma_dev_remove(mdev);
    934
    935	dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
    936
    937	return 0;
    938}
    939
    940#ifdef CONFIG_OF
    941static const struct of_device_id msgdma_match[] = {
    942	{ .compatible = "altr,socfpga-msgdma", },
    943	{ }
    944};
    945
    946MODULE_DEVICE_TABLE(of, msgdma_match);
    947#endif
    948
    949static struct platform_driver msgdma_driver = {
    950	.driver = {
    951		.name = "altera-msgdma",
    952		.of_match_table = of_match_ptr(msgdma_match),
    953	},
    954	.probe = msgdma_probe,
    955	.remove = msgdma_remove,
    956};
    957
    958module_platform_driver(msgdma_driver);
    959
    960MODULE_ALIAS("platform:altera-msgdma");
    961MODULE_DESCRIPTION("Altera mSGDMA driver");
    962MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
    963MODULE_LICENSE("GPL");