cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dma-axi-dmac.c (27834B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Driver for the Analog Devices AXI-DMAC core
      4 *
      5 * Copyright 2013-2019 Analog Devices Inc.
      6 *  Author: Lars-Peter Clausen <lars@metafoo.de>
      7 */
      8
      9#include <linux/bitfield.h>
     10#include <linux/clk.h>
     11#include <linux/device.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/dmaengine.h>
     14#include <linux/err.h>
     15#include <linux/interrupt.h>
     16#include <linux/io.h>
     17#include <linux/kernel.h>
     18#include <linux/module.h>
     19#include <linux/of.h>
     20#include <linux/of_dma.h>
     21#include <linux/platform_device.h>
     22#include <linux/regmap.h>
     23#include <linux/slab.h>
     24#include <linux/fpga/adi-axi-common.h>
     25
     26#include <dt-bindings/dma/axi-dmac.h>
     27
     28#include "dmaengine.h"
     29#include "virt-dma.h"
     30
     31/*
     32 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
     33 * various instantiation parameters which decided the exact feature set support
     34 * by the core.
     35 *
     36 * Each channel of the core has a source interface and a destination interface.
     37 * The number of channels and the type of the channel interfaces is selected at
     38 * configuration time. A interface can either be a connected to a central memory
     39 * interconnect, which allows access to system memory, or it can be connected to
     40 * a dedicated bus which is directly connected to a data port on a peripheral.
     41 * Given that those are configuration options of the core that are selected when
     42 * it is instantiated this means that they can not be changed by software at
     43 * runtime. By extension this means that each channel is uni-directional. It can
     44 * either be device to memory or memory to device, but not both. Also since the
     45 * device side is a dedicated data bus only connected to a single peripheral
     46 * there is no address than can or needs to be configured for the device side.
     47 */
     48
     49#define AXI_DMAC_REG_INTERFACE_DESC	0x10
     50#define   AXI_DMAC_DMA_SRC_TYPE_MSK	GENMASK(13, 12)
     51#define   AXI_DMAC_DMA_SRC_TYPE_GET(x)	FIELD_GET(AXI_DMAC_DMA_SRC_TYPE_MSK, x)
     52#define   AXI_DMAC_DMA_SRC_WIDTH_MSK	GENMASK(11, 8)
     53#define   AXI_DMAC_DMA_SRC_WIDTH_GET(x)	FIELD_GET(AXI_DMAC_DMA_SRC_WIDTH_MSK, x)
     54#define   AXI_DMAC_DMA_DST_TYPE_MSK	GENMASK(5, 4)
     55#define   AXI_DMAC_DMA_DST_TYPE_GET(x)	FIELD_GET(AXI_DMAC_DMA_DST_TYPE_MSK, x)
     56#define   AXI_DMAC_DMA_DST_WIDTH_MSK	GENMASK(3, 0)
     57#define   AXI_DMAC_DMA_DST_WIDTH_GET(x)	FIELD_GET(AXI_DMAC_DMA_DST_WIDTH_MSK, x)
     58
     59#define AXI_DMAC_REG_IRQ_MASK		0x80
     60#define AXI_DMAC_REG_IRQ_PENDING	0x84
     61#define AXI_DMAC_REG_IRQ_SOURCE		0x88
     62
     63#define AXI_DMAC_REG_CTRL		0x400
     64#define AXI_DMAC_REG_TRANSFER_ID	0x404
     65#define AXI_DMAC_REG_START_TRANSFER	0x408
     66#define AXI_DMAC_REG_FLAGS		0x40c
     67#define AXI_DMAC_REG_DEST_ADDRESS	0x410
     68#define AXI_DMAC_REG_SRC_ADDRESS	0x414
     69#define AXI_DMAC_REG_X_LENGTH		0x418
     70#define AXI_DMAC_REG_Y_LENGTH		0x41c
     71#define AXI_DMAC_REG_DEST_STRIDE	0x420
     72#define AXI_DMAC_REG_SRC_STRIDE		0x424
     73#define AXI_DMAC_REG_TRANSFER_DONE	0x428
     74#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
     75#define AXI_DMAC_REG_STATUS		0x430
     76#define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
     77#define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
     78#define AXI_DMAC_REG_PARTIAL_XFER_LEN	0x44c
     79#define AXI_DMAC_REG_PARTIAL_XFER_ID	0x450
     80
     81#define AXI_DMAC_CTRL_ENABLE		BIT(0)
     82#define AXI_DMAC_CTRL_PAUSE		BIT(1)
     83
     84#define AXI_DMAC_IRQ_SOT		BIT(0)
     85#define AXI_DMAC_IRQ_EOT		BIT(1)
     86
     87#define AXI_DMAC_FLAG_CYCLIC		BIT(0)
     88#define AXI_DMAC_FLAG_LAST		BIT(1)
     89#define AXI_DMAC_FLAG_PARTIAL_REPORT	BIT(2)
     90
     91#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
     92
     93/* The maximum ID allocated by the hardware is 31 */
     94#define AXI_DMAC_SG_UNUSED 32U
     95
     96struct axi_dmac_sg {
     97	dma_addr_t src_addr;
     98	dma_addr_t dest_addr;
     99	unsigned int x_len;
    100	unsigned int y_len;
    101	unsigned int dest_stride;
    102	unsigned int src_stride;
    103	unsigned int id;
    104	unsigned int partial_len;
    105	bool schedule_when_free;
    106};
    107
    108struct axi_dmac_desc {
    109	struct virt_dma_desc vdesc;
    110	bool cyclic;
    111	bool have_partial_xfer;
    112
    113	unsigned int num_submitted;
    114	unsigned int num_completed;
    115	unsigned int num_sgs;
    116	struct axi_dmac_sg sg[];
    117};
    118
    119struct axi_dmac_chan {
    120	struct virt_dma_chan vchan;
    121
    122	struct axi_dmac_desc *next_desc;
    123	struct list_head active_descs;
    124	enum dma_transfer_direction direction;
    125
    126	unsigned int src_width;
    127	unsigned int dest_width;
    128	unsigned int src_type;
    129	unsigned int dest_type;
    130
    131	unsigned int max_length;
    132	unsigned int address_align_mask;
    133	unsigned int length_align_mask;
    134
    135	bool hw_partial_xfer;
    136	bool hw_cyclic;
    137	bool hw_2d;
    138};
    139
    140struct axi_dmac {
    141	void __iomem *base;
    142	int irq;
    143
    144	struct clk *clk;
    145
    146	struct dma_device dma_dev;
    147	struct axi_dmac_chan chan;
    148};
    149
    150static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
    151{
    152	return container_of(chan->vchan.chan.device, struct axi_dmac,
    153		dma_dev);
    154}
    155
    156static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
    157{
    158	return container_of(c, struct axi_dmac_chan, vchan.chan);
    159}
    160
    161static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
    162{
    163	return container_of(vdesc, struct axi_dmac_desc, vdesc);
    164}
    165
    166static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
    167	unsigned int val)
    168{
    169	writel(val, axi_dmac->base + reg);
    170}
    171
    172static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
    173{
    174	return readl(axi_dmac->base + reg);
    175}
    176
    177static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
    178{
    179	return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
    180}
    181
    182static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
    183{
    184	return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
    185}
    186
    187static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
    188{
    189	if (len == 0)
    190		return false;
    191	if ((len & chan->length_align_mask) != 0) /* Not aligned */
    192		return false;
    193	return true;
    194}
    195
    196static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
    197{
    198	if ((addr & chan->address_align_mask) != 0) /* Not aligned */
    199		return false;
    200	return true;
    201}
    202
    203static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
    204{
    205	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
    206	struct virt_dma_desc *vdesc;
    207	struct axi_dmac_desc *desc;
    208	struct axi_dmac_sg *sg;
    209	unsigned int flags = 0;
    210	unsigned int val;
    211
    212	val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
    213	if (val) /* Queue is full, wait for the next SOT IRQ */
    214		return;
    215
    216	desc = chan->next_desc;
    217
    218	if (!desc) {
    219		vdesc = vchan_next_desc(&chan->vchan);
    220		if (!vdesc)
    221			return;
    222		list_move_tail(&vdesc->node, &chan->active_descs);
    223		desc = to_axi_dmac_desc(vdesc);
    224	}
    225	sg = &desc->sg[desc->num_submitted];
    226
    227	/* Already queued in cyclic mode. Wait for it to finish */
    228	if (sg->id != AXI_DMAC_SG_UNUSED) {
    229		sg->schedule_when_free = true;
    230		return;
    231	}
    232
    233	desc->num_submitted++;
    234	if (desc->num_submitted == desc->num_sgs ||
    235	    desc->have_partial_xfer) {
    236		if (desc->cyclic)
    237			desc->num_submitted = 0; /* Start again */
    238		else
    239			chan->next_desc = NULL;
    240		flags |= AXI_DMAC_FLAG_LAST;
    241	} else {
    242		chan->next_desc = desc;
    243	}
    244
    245	sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
    246
    247	if (axi_dmac_dest_is_mem(chan)) {
    248		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
    249		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
    250	}
    251
    252	if (axi_dmac_src_is_mem(chan)) {
    253		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
    254		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
    255	}
    256
    257	/*
    258	 * If the hardware supports cyclic transfers and there is no callback to
    259	 * call and only a single segment, enable hw cyclic mode to avoid
    260	 * unnecessary interrupts.
    261	 */
    262	if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
    263		desc->num_sgs == 1)
    264		flags |= AXI_DMAC_FLAG_CYCLIC;
    265
    266	if (chan->hw_partial_xfer)
    267		flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
    268
    269	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
    270	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
    271	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
    272	axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
    273}
    274
    275static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
    276{
    277	return list_first_entry_or_null(&chan->active_descs,
    278		struct axi_dmac_desc, vdesc.node);
    279}
    280
    281static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
    282	struct axi_dmac_sg *sg)
    283{
    284	if (chan->hw_2d)
    285		return sg->x_len * sg->y_len;
    286	else
    287		return sg->x_len;
    288}
    289
    290static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
    291{
    292	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
    293	struct axi_dmac_desc *desc;
    294	struct axi_dmac_sg *sg;
    295	u32 xfer_done, len, id, i;
    296	bool found_sg;
    297
    298	do {
    299		len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
    300		id  = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
    301
    302		found_sg = false;
    303		list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
    304			for (i = 0; i < desc->num_sgs; i++) {
    305				sg = &desc->sg[i];
    306				if (sg->id == AXI_DMAC_SG_UNUSED)
    307					continue;
    308				if (sg->id == id) {
    309					desc->have_partial_xfer = true;
    310					sg->partial_len = len;
    311					found_sg = true;
    312					break;
    313				}
    314			}
    315			if (found_sg)
    316				break;
    317		}
    318
    319		if (found_sg) {
    320			dev_dbg(dmac->dma_dev.dev,
    321				"Found partial segment id=%u, len=%u\n",
    322				id, len);
    323		} else {
    324			dev_warn(dmac->dma_dev.dev,
    325				 "Not found partial segment id=%u, len=%u\n",
    326				 id, len);
    327		}
    328
    329		/* Check if we have any more partial transfers */
    330		xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
    331		xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
    332
    333	} while (!xfer_done);
    334}
    335
    336static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
    337	struct axi_dmac_desc *active)
    338{
    339	struct dmaengine_result *rslt = &active->vdesc.tx_result;
    340	unsigned int start = active->num_completed - 1;
    341	struct axi_dmac_sg *sg;
    342	unsigned int i, total;
    343
    344	rslt->result = DMA_TRANS_NOERROR;
    345	rslt->residue = 0;
    346
    347	/*
    348	 * We get here if the last completed segment is partial, which
    349	 * means we can compute the residue from that segment onwards
    350	 */
    351	for (i = start; i < active->num_sgs; i++) {
    352		sg = &active->sg[i];
    353		total = axi_dmac_total_sg_bytes(chan, sg);
    354		rslt->residue += (total - sg->partial_len);
    355	}
    356}
    357
    358static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
    359	unsigned int completed_transfers)
    360{
    361	struct axi_dmac_desc *active;
    362	struct axi_dmac_sg *sg;
    363	bool start_next = false;
    364
    365	active = axi_dmac_active_desc(chan);
    366	if (!active)
    367		return false;
    368
    369	if (chan->hw_partial_xfer &&
    370	    (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
    371		axi_dmac_dequeue_partial_xfers(chan);
    372
    373	do {
    374		sg = &active->sg[active->num_completed];
    375		if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
    376			break;
    377		if (!(BIT(sg->id) & completed_transfers))
    378			break;
    379		active->num_completed++;
    380		sg->id = AXI_DMAC_SG_UNUSED;
    381		if (sg->schedule_when_free) {
    382			sg->schedule_when_free = false;
    383			start_next = true;
    384		}
    385
    386		if (sg->partial_len)
    387			axi_dmac_compute_residue(chan, active);
    388
    389		if (active->cyclic)
    390			vchan_cyclic_callback(&active->vdesc);
    391
    392		if (active->num_completed == active->num_sgs ||
    393		    sg->partial_len) {
    394			if (active->cyclic) {
    395				active->num_completed = 0; /* wrap around */
    396			} else {
    397				list_del(&active->vdesc.node);
    398				vchan_cookie_complete(&active->vdesc);
    399				active = axi_dmac_active_desc(chan);
    400			}
    401		}
    402	} while (active);
    403
    404	return start_next;
    405}
    406
    407static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
    408{
    409	struct axi_dmac *dmac = devid;
    410	unsigned int pending;
    411	bool start_next = false;
    412
    413	pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
    414	if (!pending)
    415		return IRQ_NONE;
    416
    417	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
    418
    419	spin_lock(&dmac->chan.vchan.lock);
    420	/* One or more transfers have finished */
    421	if (pending & AXI_DMAC_IRQ_EOT) {
    422		unsigned int completed;
    423
    424		completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
    425		start_next = axi_dmac_transfer_done(&dmac->chan, completed);
    426	}
    427	/* Space has become available in the descriptor queue */
    428	if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
    429		axi_dmac_start_transfer(&dmac->chan);
    430	spin_unlock(&dmac->chan.vchan.lock);
    431
    432	return IRQ_HANDLED;
    433}
    434
    435static int axi_dmac_terminate_all(struct dma_chan *c)
    436{
    437	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    438	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
    439	unsigned long flags;
    440	LIST_HEAD(head);
    441
    442	spin_lock_irqsave(&chan->vchan.lock, flags);
    443	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
    444	chan->next_desc = NULL;
    445	vchan_get_all_descriptors(&chan->vchan, &head);
    446	list_splice_tail_init(&chan->active_descs, &head);
    447	spin_unlock_irqrestore(&chan->vchan.lock, flags);
    448
    449	vchan_dma_desc_free_list(&chan->vchan, &head);
    450
    451	return 0;
    452}
    453
    454static void axi_dmac_synchronize(struct dma_chan *c)
    455{
    456	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    457
    458	vchan_synchronize(&chan->vchan);
    459}
    460
    461static void axi_dmac_issue_pending(struct dma_chan *c)
    462{
    463	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    464	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
    465	unsigned long flags;
    466
    467	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
    468
    469	spin_lock_irqsave(&chan->vchan.lock, flags);
    470	if (vchan_issue_pending(&chan->vchan))
    471		axi_dmac_start_transfer(chan);
    472	spin_unlock_irqrestore(&chan->vchan.lock, flags);
    473}
    474
    475static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
    476{
    477	struct axi_dmac_desc *desc;
    478	unsigned int i;
    479
    480	desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
    481	if (!desc)
    482		return NULL;
    483
    484	for (i = 0; i < num_sgs; i++)
    485		desc->sg[i].id = AXI_DMAC_SG_UNUSED;
    486
    487	desc->num_sgs = num_sgs;
    488
    489	return desc;
    490}
    491
    492static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
    493	enum dma_transfer_direction direction, dma_addr_t addr,
    494	unsigned int num_periods, unsigned int period_len,
    495	struct axi_dmac_sg *sg)
    496{
    497	unsigned int num_segments, i;
    498	unsigned int segment_size;
    499	unsigned int len;
    500
    501	/* Split into multiple equally sized segments if necessary */
    502	num_segments = DIV_ROUND_UP(period_len, chan->max_length);
    503	segment_size = DIV_ROUND_UP(period_len, num_segments);
    504	/* Take care of alignment */
    505	segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
    506
    507	for (i = 0; i < num_periods; i++) {
    508		len = period_len;
    509
    510		while (len > segment_size) {
    511			if (direction == DMA_DEV_TO_MEM)
    512				sg->dest_addr = addr;
    513			else
    514				sg->src_addr = addr;
    515			sg->x_len = segment_size;
    516			sg->y_len = 1;
    517			sg++;
    518			addr += segment_size;
    519			len -= segment_size;
    520		}
    521
    522		if (direction == DMA_DEV_TO_MEM)
    523			sg->dest_addr = addr;
    524		else
    525			sg->src_addr = addr;
    526		sg->x_len = len;
    527		sg->y_len = 1;
    528		sg++;
    529		addr += len;
    530	}
    531
    532	return sg;
    533}
    534
    535static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
    536	struct dma_chan *c, struct scatterlist *sgl,
    537	unsigned int sg_len, enum dma_transfer_direction direction,
    538	unsigned long flags, void *context)
    539{
    540	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    541	struct axi_dmac_desc *desc;
    542	struct axi_dmac_sg *dsg;
    543	struct scatterlist *sg;
    544	unsigned int num_sgs;
    545	unsigned int i;
    546
    547	if (direction != chan->direction)
    548		return NULL;
    549
    550	num_sgs = 0;
    551	for_each_sg(sgl, sg, sg_len, i)
    552		num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
    553
    554	desc = axi_dmac_alloc_desc(num_sgs);
    555	if (!desc)
    556		return NULL;
    557
    558	dsg = desc->sg;
    559
    560	for_each_sg(sgl, sg, sg_len, i) {
    561		if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
    562		    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
    563			kfree(desc);
    564			return NULL;
    565		}
    566
    567		dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
    568			sg_dma_len(sg), dsg);
    569	}
    570
    571	desc->cyclic = false;
    572
    573	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
    574}
    575
    576static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
    577	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
    578	size_t period_len, enum dma_transfer_direction direction,
    579	unsigned long flags)
    580{
    581	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    582	struct axi_dmac_desc *desc;
    583	unsigned int num_periods, num_segments;
    584
    585	if (direction != chan->direction)
    586		return NULL;
    587
    588	if (!axi_dmac_check_len(chan, buf_len) ||
    589	    !axi_dmac_check_addr(chan, buf_addr))
    590		return NULL;
    591
    592	if (period_len == 0 || buf_len % period_len)
    593		return NULL;
    594
    595	num_periods = buf_len / period_len;
    596	num_segments = DIV_ROUND_UP(period_len, chan->max_length);
    597
    598	desc = axi_dmac_alloc_desc(num_periods * num_segments);
    599	if (!desc)
    600		return NULL;
    601
    602	axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
    603		period_len, desc->sg);
    604
    605	desc->cyclic = true;
    606
    607	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
    608}
    609
    610static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
    611	struct dma_chan *c, struct dma_interleaved_template *xt,
    612	unsigned long flags)
    613{
    614	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
    615	struct axi_dmac_desc *desc;
    616	size_t dst_icg, src_icg;
    617
    618	if (xt->frame_size != 1)
    619		return NULL;
    620
    621	if (xt->dir != chan->direction)
    622		return NULL;
    623
    624	if (axi_dmac_src_is_mem(chan)) {
    625		if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
    626			return NULL;
    627	}
    628
    629	if (axi_dmac_dest_is_mem(chan)) {
    630		if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
    631			return NULL;
    632	}
    633
    634	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
    635	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
    636
    637	if (chan->hw_2d) {
    638		if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
    639		    xt->numf == 0)
    640			return NULL;
    641		if (xt->sgl[0].size + dst_icg > chan->max_length ||
    642		    xt->sgl[0].size + src_icg > chan->max_length)
    643			return NULL;
    644	} else {
    645		if (dst_icg != 0 || src_icg != 0)
    646			return NULL;
    647		if (chan->max_length / xt->sgl[0].size < xt->numf)
    648			return NULL;
    649		if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
    650			return NULL;
    651	}
    652
    653	desc = axi_dmac_alloc_desc(1);
    654	if (!desc)
    655		return NULL;
    656
    657	if (axi_dmac_src_is_mem(chan)) {
    658		desc->sg[0].src_addr = xt->src_start;
    659		desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
    660	}
    661
    662	if (axi_dmac_dest_is_mem(chan)) {
    663		desc->sg[0].dest_addr = xt->dst_start;
    664		desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
    665	}
    666
    667	if (chan->hw_2d) {
    668		desc->sg[0].x_len = xt->sgl[0].size;
    669		desc->sg[0].y_len = xt->numf;
    670	} else {
    671		desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
    672		desc->sg[0].y_len = 1;
    673	}
    674
    675	if (flags & DMA_CYCLIC)
    676		desc->cyclic = true;
    677
    678	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
    679}
    680
    681static void axi_dmac_free_chan_resources(struct dma_chan *c)
    682{
    683	vchan_free_chan_resources(to_virt_chan(c));
    684}
    685
    686static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
    687{
    688	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
    689}
    690
    691static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
    692{
    693	switch (reg) {
    694	case AXI_DMAC_REG_IRQ_MASK:
    695	case AXI_DMAC_REG_IRQ_SOURCE:
    696	case AXI_DMAC_REG_IRQ_PENDING:
    697	case AXI_DMAC_REG_CTRL:
    698	case AXI_DMAC_REG_TRANSFER_ID:
    699	case AXI_DMAC_REG_START_TRANSFER:
    700	case AXI_DMAC_REG_FLAGS:
    701	case AXI_DMAC_REG_DEST_ADDRESS:
    702	case AXI_DMAC_REG_SRC_ADDRESS:
    703	case AXI_DMAC_REG_X_LENGTH:
    704	case AXI_DMAC_REG_Y_LENGTH:
    705	case AXI_DMAC_REG_DEST_STRIDE:
    706	case AXI_DMAC_REG_SRC_STRIDE:
    707	case AXI_DMAC_REG_TRANSFER_DONE:
    708	case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
    709	case AXI_DMAC_REG_STATUS:
    710	case AXI_DMAC_REG_CURRENT_SRC_ADDR:
    711	case AXI_DMAC_REG_CURRENT_DEST_ADDR:
    712	case AXI_DMAC_REG_PARTIAL_XFER_LEN:
    713	case AXI_DMAC_REG_PARTIAL_XFER_ID:
    714		return true;
    715	default:
    716		return false;
    717	}
    718}
    719
    720static const struct regmap_config axi_dmac_regmap_config = {
    721	.reg_bits = 32,
    722	.val_bits = 32,
    723	.reg_stride = 4,
    724	.max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
    725	.readable_reg = axi_dmac_regmap_rdwr,
    726	.writeable_reg = axi_dmac_regmap_rdwr,
    727};
    728
    729static void axi_dmac_adjust_chan_params(struct axi_dmac_chan *chan)
    730{
    731	chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
    732
    733	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
    734		chan->direction = DMA_MEM_TO_MEM;
    735	else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
    736		chan->direction = DMA_MEM_TO_DEV;
    737	else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
    738		chan->direction = DMA_DEV_TO_MEM;
    739	else
    740		chan->direction = DMA_DEV_TO_DEV;
    741}
    742
    743/*
    744 * The configuration stored in the devicetree matches the configuration
    745 * parameters of the peripheral instance and allows the driver to know which
    746 * features are implemented and how it should behave.
    747 */
    748static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
    749	struct axi_dmac_chan *chan)
    750{
    751	u32 val;
    752	int ret;
    753
    754	ret = of_property_read_u32(of_chan, "reg", &val);
    755	if (ret)
    756		return ret;
    757
    758	/* We only support 1 channel for now */
    759	if (val != 0)
    760		return -EINVAL;
    761
    762	ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
    763	if (ret)
    764		return ret;
    765	if (val > AXI_DMAC_BUS_TYPE_FIFO)
    766		return -EINVAL;
    767	chan->src_type = val;
    768
    769	ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
    770	if (ret)
    771		return ret;
    772	if (val > AXI_DMAC_BUS_TYPE_FIFO)
    773		return -EINVAL;
    774	chan->dest_type = val;
    775
    776	ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
    777	if (ret)
    778		return ret;
    779	chan->src_width = val / 8;
    780
    781	ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
    782	if (ret)
    783		return ret;
    784	chan->dest_width = val / 8;
    785
    786	axi_dmac_adjust_chan_params(chan);
    787
    788	return 0;
    789}
    790
    791static int axi_dmac_parse_dt(struct device *dev, struct axi_dmac *dmac)
    792{
    793	struct device_node *of_channels, *of_chan;
    794	int ret;
    795
    796	of_channels = of_get_child_by_name(dev->of_node, "adi,channels");
    797	if (of_channels == NULL)
    798		return -ENODEV;
    799
    800	for_each_child_of_node(of_channels, of_chan) {
    801		ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
    802		if (ret) {
    803			of_node_put(of_chan);
    804			of_node_put(of_channels);
    805			return -EINVAL;
    806		}
    807	}
    808	of_node_put(of_channels);
    809
    810	return 0;
    811}
    812
    813static int axi_dmac_read_chan_config(struct device *dev, struct axi_dmac *dmac)
    814{
    815	struct axi_dmac_chan *chan = &dmac->chan;
    816	unsigned int val, desc;
    817
    818	desc = axi_dmac_read(dmac, AXI_DMAC_REG_INTERFACE_DESC);
    819	if (desc == 0) {
    820		dev_err(dev, "DMA interface register reads zero\n");
    821		return -EFAULT;
    822	}
    823
    824	val = AXI_DMAC_DMA_SRC_TYPE_GET(desc);
    825	if (val > AXI_DMAC_BUS_TYPE_FIFO) {
    826		dev_err(dev, "Invalid source bus type read: %d\n", val);
    827		return -EINVAL;
    828	}
    829	chan->src_type = val;
    830
    831	val = AXI_DMAC_DMA_DST_TYPE_GET(desc);
    832	if (val > AXI_DMAC_BUS_TYPE_FIFO) {
    833		dev_err(dev, "Invalid destination bus type read: %d\n", val);
    834		return -EINVAL;
    835	}
    836	chan->dest_type = val;
    837
    838	val = AXI_DMAC_DMA_SRC_WIDTH_GET(desc);
    839	if (val == 0) {
    840		dev_err(dev, "Source bus width is zero\n");
    841		return -EINVAL;
    842	}
    843	/* widths are stored in log2 */
    844	chan->src_width = 1 << val;
    845
    846	val = AXI_DMAC_DMA_DST_WIDTH_GET(desc);
    847	if (val == 0) {
    848		dev_err(dev, "Destination bus width is zero\n");
    849		return -EINVAL;
    850	}
    851	chan->dest_width = 1 << val;
    852
    853	axi_dmac_adjust_chan_params(chan);
    854
    855	return 0;
    856}
    857
    858static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
    859{
    860	struct axi_dmac_chan *chan = &dmac->chan;
    861
    862	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
    863	if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
    864		chan->hw_cyclic = true;
    865
    866	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
    867	if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
    868		chan->hw_2d = true;
    869
    870	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
    871	chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
    872	if (chan->max_length != UINT_MAX)
    873		chan->max_length++;
    874
    875	axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
    876	if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
    877	    chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
    878		dev_err(dmac->dma_dev.dev,
    879			"Destination memory-mapped interface not supported.");
    880		return -ENODEV;
    881	}
    882
    883	axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
    884	if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
    885	    chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
    886		dev_err(dmac->dma_dev.dev,
    887			"Source memory-mapped interface not supported.");
    888		return -ENODEV;
    889	}
    890
    891	if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
    892		chan->hw_partial_xfer = true;
    893
    894	if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
    895		axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
    896		chan->length_align_mask =
    897			axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
    898	} else {
    899		chan->length_align_mask = chan->address_align_mask;
    900	}
    901
    902	return 0;
    903}
    904
    905static int axi_dmac_probe(struct platform_device *pdev)
    906{
    907	struct dma_device *dma_dev;
    908	struct axi_dmac *dmac;
    909	struct resource *res;
    910	struct regmap *regmap;
    911	unsigned int version;
    912	int ret;
    913
    914	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
    915	if (!dmac)
    916		return -ENOMEM;
    917
    918	dmac->irq = platform_get_irq(pdev, 0);
    919	if (dmac->irq < 0)
    920		return dmac->irq;
    921	if (dmac->irq == 0)
    922		return -EINVAL;
    923
    924	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    925	dmac->base = devm_ioremap_resource(&pdev->dev, res);
    926	if (IS_ERR(dmac->base))
    927		return PTR_ERR(dmac->base);
    928
    929	dmac->clk = devm_clk_get(&pdev->dev, NULL);
    930	if (IS_ERR(dmac->clk))
    931		return PTR_ERR(dmac->clk);
    932
    933	ret = clk_prepare_enable(dmac->clk);
    934	if (ret < 0)
    935		return ret;
    936
    937	version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
    938
    939	if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
    940		ret = axi_dmac_read_chan_config(&pdev->dev, dmac);
    941	else
    942		ret = axi_dmac_parse_dt(&pdev->dev, dmac);
    943
    944	if (ret < 0)
    945		goto err_clk_disable;
    946
    947	INIT_LIST_HEAD(&dmac->chan.active_descs);
    948
    949	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
    950
    951	dma_dev = &dmac->dma_dev;
    952	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
    953	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
    954	dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
    955	dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
    956	dma_dev->device_tx_status = dma_cookie_status;
    957	dma_dev->device_issue_pending = axi_dmac_issue_pending;
    958	dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
    959	dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
    960	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
    961	dma_dev->device_terminate_all = axi_dmac_terminate_all;
    962	dma_dev->device_synchronize = axi_dmac_synchronize;
    963	dma_dev->dev = &pdev->dev;
    964	dma_dev->chancnt = 1;
    965	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
    966	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
    967	dma_dev->directions = BIT(dmac->chan.direction);
    968	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
    969	INIT_LIST_HEAD(&dma_dev->channels);
    970
    971	dmac->chan.vchan.desc_free = axi_dmac_desc_free;
    972	vchan_init(&dmac->chan.vchan, dma_dev);
    973
    974	ret = axi_dmac_detect_caps(dmac, version);
    975	if (ret)
    976		goto err_clk_disable;
    977
    978	dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
    979
    980	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
    981
    982	ret = dma_async_device_register(dma_dev);
    983	if (ret)
    984		goto err_clk_disable;
    985
    986	ret = of_dma_controller_register(pdev->dev.of_node,
    987		of_dma_xlate_by_chan_id, dma_dev);
    988	if (ret)
    989		goto err_unregister_device;
    990
    991	ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
    992		dev_name(&pdev->dev), dmac);
    993	if (ret)
    994		goto err_unregister_of;
    995
    996	platform_set_drvdata(pdev, dmac);
    997
    998	regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
    999		 &axi_dmac_regmap_config);
   1000	if (IS_ERR(regmap)) {
   1001		ret = PTR_ERR(regmap);
   1002		goto err_free_irq;
   1003	}
   1004
   1005	return 0;
   1006
   1007err_free_irq:
   1008	free_irq(dmac->irq, dmac);
   1009err_unregister_of:
   1010	of_dma_controller_free(pdev->dev.of_node);
   1011err_unregister_device:
   1012	dma_async_device_unregister(&dmac->dma_dev);
   1013err_clk_disable:
   1014	clk_disable_unprepare(dmac->clk);
   1015
   1016	return ret;
   1017}
   1018
   1019static int axi_dmac_remove(struct platform_device *pdev)
   1020{
   1021	struct axi_dmac *dmac = platform_get_drvdata(pdev);
   1022
   1023	of_dma_controller_free(pdev->dev.of_node);
   1024	free_irq(dmac->irq, dmac);
   1025	tasklet_kill(&dmac->chan.vchan.task);
   1026	dma_async_device_unregister(&dmac->dma_dev);
   1027	clk_disable_unprepare(dmac->clk);
   1028
   1029	return 0;
   1030}
   1031
   1032static const struct of_device_id axi_dmac_of_match_table[] = {
   1033	{ .compatible = "adi,axi-dmac-1.00.a" },
   1034	{ },
   1035};
   1036MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
   1037
   1038static struct platform_driver axi_dmac_driver = {
   1039	.driver = {
   1040		.name = "dma-axi-dmac",
   1041		.of_match_table = axi_dmac_of_match_table,
   1042	},
   1043	.probe = axi_dmac_probe,
   1044	.remove = axi_dmac_remove,
   1045};
   1046module_platform_driver(axi_dmac_driver);
   1047
   1048MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
   1049MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
   1050MODULE_LICENSE("GPL v2");