cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dw-edma-core.c (24318B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
      4 * Synopsys DesignWare eDMA core driver
      5 *
      6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
      7 */
      8
      9#include <linux/module.h>
     10#include <linux/device.h>
     11#include <linux/kernel.h>
     12#include <linux/pm_runtime.h>
     13#include <linux/dmaengine.h>
     14#include <linux/err.h>
     15#include <linux/interrupt.h>
     16#include <linux/irq.h>
     17#include <linux/dma/edma.h>
     18#include <linux/dma-mapping.h>
     19
     20#include "dw-edma-core.h"
     21#include "dw-edma-v0-core.h"
     22#include "../dmaengine.h"
     23#include "../virt-dma.h"
     24
     25static inline
     26struct device *dchan2dev(struct dma_chan *dchan)
     27{
     28	return &dchan->dev->device;
     29}
     30
     31static inline
     32struct device *chan2dev(struct dw_edma_chan *chan)
     33{
     34	return &chan->vc.chan.dev->device;
     35}
     36
     37static inline
     38struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
     39{
     40	return container_of(vd, struct dw_edma_desc, vd);
     41}
     42
     43static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
     44{
     45	struct dw_edma_burst *burst;
     46
     47	burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
     48	if (unlikely(!burst))
     49		return NULL;
     50
     51	INIT_LIST_HEAD(&burst->list);
     52	if (chunk->burst) {
     53		/* Create and add new element into the linked list */
     54		chunk->bursts_alloc++;
     55		list_add_tail(&burst->list, &chunk->burst->list);
     56	} else {
     57		/* List head */
     58		chunk->bursts_alloc = 0;
     59		chunk->burst = burst;
     60	}
     61
     62	return burst;
     63}
     64
     65static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
     66{
     67	struct dw_edma_chan *chan = desc->chan;
     68	struct dw_edma *dw = chan->chip->dw;
     69	struct dw_edma_chunk *chunk;
     70
     71	chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
     72	if (unlikely(!chunk))
     73		return NULL;
     74
     75	INIT_LIST_HEAD(&chunk->list);
     76	chunk->chan = chan;
     77	/* Toggling change bit (CB) in each chunk, this is a mechanism to
     78	 * inform the eDMA HW block that this is a new linked list ready
     79	 * to be consumed.
     80	 *  - Odd chunks originate CB equal to 0
     81	 *  - Even chunks originate CB equal to 1
     82	 */
     83	chunk->cb = !(desc->chunks_alloc % 2);
     84	if (chan->dir == EDMA_DIR_WRITE) {
     85		chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
     86		chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
     87	} else {
     88		chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
     89		chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
     90	}
     91
     92	if (desc->chunk) {
     93		/* Create and add new element into the linked list */
     94		if (!dw_edma_alloc_burst(chunk)) {
     95			kfree(chunk);
     96			return NULL;
     97		}
     98		desc->chunks_alloc++;
     99		list_add_tail(&chunk->list, &desc->chunk->list);
    100	} else {
    101		/* List head */
    102		chunk->burst = NULL;
    103		desc->chunks_alloc = 0;
    104		desc->chunk = chunk;
    105	}
    106
    107	return chunk;
    108}
    109
    110static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
    111{
    112	struct dw_edma_desc *desc;
    113
    114	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
    115	if (unlikely(!desc))
    116		return NULL;
    117
    118	desc->chan = chan;
    119	if (!dw_edma_alloc_chunk(desc)) {
    120		kfree(desc);
    121		return NULL;
    122	}
    123
    124	return desc;
    125}
    126
    127static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
    128{
    129	struct dw_edma_burst *child, *_next;
    130
    131	/* Remove all the list elements */
    132	list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
    133		list_del(&child->list);
    134		kfree(child);
    135		chunk->bursts_alloc--;
    136	}
    137
    138	/* Remove the list head */
    139	kfree(child);
    140	chunk->burst = NULL;
    141}
    142
    143static void dw_edma_free_chunk(struct dw_edma_desc *desc)
    144{
    145	struct dw_edma_chunk *child, *_next;
    146
    147	if (!desc->chunk)
    148		return;
    149
    150	/* Remove all the list elements */
    151	list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
    152		dw_edma_free_burst(child);
    153		list_del(&child->list);
    154		kfree(child);
    155		desc->chunks_alloc--;
    156	}
    157
    158	/* Remove the list head */
    159	kfree(child);
    160	desc->chunk = NULL;
    161}
    162
    163static void dw_edma_free_desc(struct dw_edma_desc *desc)
    164{
    165	dw_edma_free_chunk(desc);
    166	kfree(desc);
    167}
    168
    169static void vchan_free_desc(struct virt_dma_desc *vdesc)
    170{
    171	dw_edma_free_desc(vd2dw_edma_desc(vdesc));
    172}
    173
    174static void dw_edma_start_transfer(struct dw_edma_chan *chan)
    175{
    176	struct dw_edma_chunk *child;
    177	struct dw_edma_desc *desc;
    178	struct virt_dma_desc *vd;
    179
    180	vd = vchan_next_desc(&chan->vc);
    181	if (!vd)
    182		return;
    183
    184	desc = vd2dw_edma_desc(vd);
    185	if (!desc)
    186		return;
    187
    188	child = list_first_entry_or_null(&desc->chunk->list,
    189					 struct dw_edma_chunk, list);
    190	if (!child)
    191		return;
    192
    193	dw_edma_v0_core_start(child, !desc->xfer_sz);
    194	desc->xfer_sz += child->ll_region.sz;
    195	dw_edma_free_burst(child);
    196	list_del(&child->list);
    197	kfree(child);
    198	desc->chunks_alloc--;
    199}
    200
    201static int dw_edma_device_config(struct dma_chan *dchan,
    202				 struct dma_slave_config *config)
    203{
    204	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    205
    206	memcpy(&chan->config, config, sizeof(*config));
    207	chan->configured = true;
    208
    209	return 0;
    210}
    211
    212static int dw_edma_device_pause(struct dma_chan *dchan)
    213{
    214	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    215	int err = 0;
    216
    217	if (!chan->configured)
    218		err = -EPERM;
    219	else if (chan->status != EDMA_ST_BUSY)
    220		err = -EPERM;
    221	else if (chan->request != EDMA_REQ_NONE)
    222		err = -EPERM;
    223	else
    224		chan->request = EDMA_REQ_PAUSE;
    225
    226	return err;
    227}
    228
    229static int dw_edma_device_resume(struct dma_chan *dchan)
    230{
    231	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    232	int err = 0;
    233
    234	if (!chan->configured) {
    235		err = -EPERM;
    236	} else if (chan->status != EDMA_ST_PAUSE) {
    237		err = -EPERM;
    238	} else if (chan->request != EDMA_REQ_NONE) {
    239		err = -EPERM;
    240	} else {
    241		chan->status = EDMA_ST_BUSY;
    242		dw_edma_start_transfer(chan);
    243	}
    244
    245	return err;
    246}
    247
    248static int dw_edma_device_terminate_all(struct dma_chan *dchan)
    249{
    250	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    251	int err = 0;
    252
    253	if (!chan->configured) {
    254		/* Do nothing */
    255	} else if (chan->status == EDMA_ST_PAUSE) {
    256		chan->status = EDMA_ST_IDLE;
    257		chan->configured = false;
    258	} else if (chan->status == EDMA_ST_IDLE) {
    259		chan->configured = false;
    260	} else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
    261		/*
    262		 * The channel is in a false BUSY state, probably didn't
    263		 * receive or lost an interrupt
    264		 */
    265		chan->status = EDMA_ST_IDLE;
    266		chan->configured = false;
    267	} else if (chan->request > EDMA_REQ_PAUSE) {
    268		err = -EPERM;
    269	} else {
    270		chan->request = EDMA_REQ_STOP;
    271	}
    272
    273	return err;
    274}
    275
    276static void dw_edma_device_issue_pending(struct dma_chan *dchan)
    277{
    278	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    279	unsigned long flags;
    280
    281	spin_lock_irqsave(&chan->vc.lock, flags);
    282	if (chan->configured && chan->request == EDMA_REQ_NONE &&
    283	    chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
    284		chan->status = EDMA_ST_BUSY;
    285		dw_edma_start_transfer(chan);
    286	}
    287	spin_unlock_irqrestore(&chan->vc.lock, flags);
    288}
    289
    290static enum dma_status
    291dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
    292			 struct dma_tx_state *txstate)
    293{
    294	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    295	struct dw_edma_desc *desc;
    296	struct virt_dma_desc *vd;
    297	unsigned long flags;
    298	enum dma_status ret;
    299	u32 residue = 0;
    300
    301	ret = dma_cookie_status(dchan, cookie, txstate);
    302	if (ret == DMA_COMPLETE)
    303		return ret;
    304
    305	if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
    306		ret = DMA_PAUSED;
    307
    308	if (!txstate)
    309		goto ret_residue;
    310
    311	spin_lock_irqsave(&chan->vc.lock, flags);
    312	vd = vchan_find_desc(&chan->vc, cookie);
    313	if (vd) {
    314		desc = vd2dw_edma_desc(vd);
    315		if (desc)
    316			residue = desc->alloc_sz - desc->xfer_sz;
    317	}
    318	spin_unlock_irqrestore(&chan->vc.lock, flags);
    319
    320ret_residue:
    321	dma_set_residue(txstate, residue);
    322
    323	return ret;
    324}
    325
    326static struct dma_async_tx_descriptor *
    327dw_edma_device_transfer(struct dw_edma_transfer *xfer)
    328{
    329	struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
    330	enum dma_transfer_direction dir = xfer->direction;
    331	phys_addr_t src_addr, dst_addr;
    332	struct scatterlist *sg = NULL;
    333	struct dw_edma_chunk *chunk;
    334	struct dw_edma_burst *burst;
    335	struct dw_edma_desc *desc;
    336	u32 cnt = 0;
    337	int i;
    338
    339	if (!chan->configured)
    340		return NULL;
    341
    342	switch (chan->config.direction) {
    343	case DMA_DEV_TO_MEM: /* local DMA */
    344		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
    345			break;
    346		return NULL;
    347	case DMA_MEM_TO_DEV: /* local DMA */
    348		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
    349			break;
    350		return NULL;
    351	default: /* remote DMA */
    352		if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
    353			break;
    354		if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
    355			break;
    356		return NULL;
    357	}
    358
    359	if (xfer->type == EDMA_XFER_CYCLIC) {
    360		if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
    361			return NULL;
    362	} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
    363		if (xfer->xfer.sg.len < 1)
    364			return NULL;
    365	} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
    366		if (!xfer->xfer.il->numf)
    367			return NULL;
    368		if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
    369			return NULL;
    370	} else {
    371		return NULL;
    372	}
    373
    374	desc = dw_edma_alloc_desc(chan);
    375	if (unlikely(!desc))
    376		goto err_alloc;
    377
    378	chunk = dw_edma_alloc_chunk(desc);
    379	if (unlikely(!chunk))
    380		goto err_alloc;
    381
    382	if (xfer->type == EDMA_XFER_INTERLEAVED) {
    383		src_addr = xfer->xfer.il->src_start;
    384		dst_addr = xfer->xfer.il->dst_start;
    385	} else {
    386		src_addr = chan->config.src_addr;
    387		dst_addr = chan->config.dst_addr;
    388	}
    389
    390	if (xfer->type == EDMA_XFER_CYCLIC) {
    391		cnt = xfer->xfer.cyclic.cnt;
    392	} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
    393		cnt = xfer->xfer.sg.len;
    394		sg = xfer->xfer.sg.sgl;
    395	} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
    396		if (xfer->xfer.il->numf > 0)
    397			cnt = xfer->xfer.il->numf;
    398		else
    399			cnt = xfer->xfer.il->frame_size;
    400	}
    401
    402	for (i = 0; i < cnt; i++) {
    403		if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
    404			break;
    405
    406		if (chunk->bursts_alloc == chan->ll_max) {
    407			chunk = dw_edma_alloc_chunk(desc);
    408			if (unlikely(!chunk))
    409				goto err_alloc;
    410		}
    411
    412		burst = dw_edma_alloc_burst(chunk);
    413		if (unlikely(!burst))
    414			goto err_alloc;
    415
    416		if (xfer->type == EDMA_XFER_CYCLIC)
    417			burst->sz = xfer->xfer.cyclic.len;
    418		else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
    419			burst->sz = sg_dma_len(sg);
    420		else if (xfer->type == EDMA_XFER_INTERLEAVED)
    421			burst->sz = xfer->xfer.il->sgl[i].size;
    422
    423		chunk->ll_region.sz += burst->sz;
    424		desc->alloc_sz += burst->sz;
    425
    426		if (chan->dir == EDMA_DIR_WRITE) {
    427			burst->sar = src_addr;
    428			if (xfer->type == EDMA_XFER_CYCLIC) {
    429				burst->dar = xfer->xfer.cyclic.paddr;
    430			} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
    431				src_addr += sg_dma_len(sg);
    432				burst->dar = sg_dma_address(sg);
    433				/* Unlike the typical assumption by other
    434				 * drivers/IPs the peripheral memory isn't
    435				 * a FIFO memory, in this case, it's a
    436				 * linear memory and that why the source
    437				 * and destination addresses are increased
    438				 * by the same portion (data length)
    439				 */
    440			}
    441		} else {
    442			burst->dar = dst_addr;
    443			if (xfer->type == EDMA_XFER_CYCLIC) {
    444				burst->sar = xfer->xfer.cyclic.paddr;
    445			} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
    446				dst_addr += sg_dma_len(sg);
    447				burst->sar = sg_dma_address(sg);
    448				/* Unlike the typical assumption by other
    449				 * drivers/IPs the peripheral memory isn't
    450				 * a FIFO memory, in this case, it's a
    451				 * linear memory and that why the source
    452				 * and destination addresses are increased
    453				 * by the same portion (data length)
    454				 */
    455			}
    456		}
    457
    458		if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
    459			sg = sg_next(sg);
    460		} else if (xfer->type == EDMA_XFER_INTERLEAVED &&
    461			   xfer->xfer.il->frame_size > 0) {
    462			struct dma_interleaved_template *il = xfer->xfer.il;
    463			struct data_chunk *dc = &il->sgl[i];
    464
    465			if (il->src_sgl) {
    466				src_addr += burst->sz;
    467				src_addr += dmaengine_get_src_icg(il, dc);
    468			}
    469
    470			if (il->dst_sgl) {
    471				dst_addr += burst->sz;
    472				dst_addr += dmaengine_get_dst_icg(il, dc);
    473			}
    474		}
    475	}
    476
    477	return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
    478
    479err_alloc:
    480	if (desc)
    481		dw_edma_free_desc(desc);
    482
    483	return NULL;
    484}
    485
    486static struct dma_async_tx_descriptor *
    487dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
    488			     unsigned int len,
    489			     enum dma_transfer_direction direction,
    490			     unsigned long flags, void *context)
    491{
    492	struct dw_edma_transfer xfer;
    493
    494	xfer.dchan = dchan;
    495	xfer.direction = direction;
    496	xfer.xfer.sg.sgl = sgl;
    497	xfer.xfer.sg.len = len;
    498	xfer.flags = flags;
    499	xfer.type = EDMA_XFER_SCATTER_GATHER;
    500
    501	return dw_edma_device_transfer(&xfer);
    502}
    503
    504static struct dma_async_tx_descriptor *
    505dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
    506			       size_t len, size_t count,
    507			       enum dma_transfer_direction direction,
    508			       unsigned long flags)
    509{
    510	struct dw_edma_transfer xfer;
    511
    512	xfer.dchan = dchan;
    513	xfer.direction = direction;
    514	xfer.xfer.cyclic.paddr = paddr;
    515	xfer.xfer.cyclic.len = len;
    516	xfer.xfer.cyclic.cnt = count;
    517	xfer.flags = flags;
    518	xfer.type = EDMA_XFER_CYCLIC;
    519
    520	return dw_edma_device_transfer(&xfer);
    521}
    522
    523static struct dma_async_tx_descriptor *
    524dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
    525				    struct dma_interleaved_template *ilt,
    526				    unsigned long flags)
    527{
    528	struct dw_edma_transfer xfer;
    529
    530	xfer.dchan = dchan;
    531	xfer.direction = ilt->dir;
    532	xfer.xfer.il = ilt;
    533	xfer.flags = flags;
    534	xfer.type = EDMA_XFER_INTERLEAVED;
    535
    536	return dw_edma_device_transfer(&xfer);
    537}
    538
    539static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
    540{
    541	struct dw_edma_desc *desc;
    542	struct virt_dma_desc *vd;
    543	unsigned long flags;
    544
    545	dw_edma_v0_core_clear_done_int(chan);
    546
    547	spin_lock_irqsave(&chan->vc.lock, flags);
    548	vd = vchan_next_desc(&chan->vc);
    549	if (vd) {
    550		switch (chan->request) {
    551		case EDMA_REQ_NONE:
    552			desc = vd2dw_edma_desc(vd);
    553			if (desc->chunks_alloc) {
    554				chan->status = EDMA_ST_BUSY;
    555				dw_edma_start_transfer(chan);
    556			} else {
    557				list_del(&vd->node);
    558				vchan_cookie_complete(vd);
    559				chan->status = EDMA_ST_IDLE;
    560			}
    561			break;
    562
    563		case EDMA_REQ_STOP:
    564			list_del(&vd->node);
    565			vchan_cookie_complete(vd);
    566			chan->request = EDMA_REQ_NONE;
    567			chan->status = EDMA_ST_IDLE;
    568			break;
    569
    570		case EDMA_REQ_PAUSE:
    571			chan->request = EDMA_REQ_NONE;
    572			chan->status = EDMA_ST_PAUSE;
    573			break;
    574
    575		default:
    576			break;
    577		}
    578	}
    579	spin_unlock_irqrestore(&chan->vc.lock, flags);
    580}
    581
    582static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
    583{
    584	struct virt_dma_desc *vd;
    585	unsigned long flags;
    586
    587	dw_edma_v0_core_clear_abort_int(chan);
    588
    589	spin_lock_irqsave(&chan->vc.lock, flags);
    590	vd = vchan_next_desc(&chan->vc);
    591	if (vd) {
    592		list_del(&vd->node);
    593		vchan_cookie_complete(vd);
    594	}
    595	spin_unlock_irqrestore(&chan->vc.lock, flags);
    596	chan->request = EDMA_REQ_NONE;
    597	chan->status = EDMA_ST_IDLE;
    598}
    599
    600static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
    601{
    602	struct dw_edma_irq *dw_irq = data;
    603	struct dw_edma *dw = dw_irq->dw;
    604	unsigned long total, pos, val;
    605	unsigned long off;
    606	u32 mask;
    607
    608	if (write) {
    609		total = dw->wr_ch_cnt;
    610		off = 0;
    611		mask = dw_irq->wr_mask;
    612	} else {
    613		total = dw->rd_ch_cnt;
    614		off = dw->wr_ch_cnt;
    615		mask = dw_irq->rd_mask;
    616	}
    617
    618	val = dw_edma_v0_core_status_done_int(dw, write ?
    619							  EDMA_DIR_WRITE :
    620							  EDMA_DIR_READ);
    621	val &= mask;
    622	for_each_set_bit(pos, &val, total) {
    623		struct dw_edma_chan *chan = &dw->chan[pos + off];
    624
    625		dw_edma_done_interrupt(chan);
    626	}
    627
    628	val = dw_edma_v0_core_status_abort_int(dw, write ?
    629							   EDMA_DIR_WRITE :
    630							   EDMA_DIR_READ);
    631	val &= mask;
    632	for_each_set_bit(pos, &val, total) {
    633		struct dw_edma_chan *chan = &dw->chan[pos + off];
    634
    635		dw_edma_abort_interrupt(chan);
    636	}
    637
    638	return IRQ_HANDLED;
    639}
    640
    641static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
    642{
    643	return dw_edma_interrupt(irq, data, true);
    644}
    645
    646static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
    647{
    648	return dw_edma_interrupt(irq, data, false);
    649}
    650
    651static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
    652{
    653	dw_edma_interrupt(irq, data, true);
    654	dw_edma_interrupt(irq, data, false);
    655
    656	return IRQ_HANDLED;
    657}
    658
    659static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
    660{
    661	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    662
    663	if (chan->status != EDMA_ST_IDLE)
    664		return -EBUSY;
    665
    666	pm_runtime_get(chan->chip->dev);
    667
    668	return 0;
    669}
    670
    671static void dw_edma_free_chan_resources(struct dma_chan *dchan)
    672{
    673	unsigned long timeout = jiffies + msecs_to_jiffies(5000);
    674	struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
    675	int ret;
    676
    677	while (time_before(jiffies, timeout)) {
    678		ret = dw_edma_device_terminate_all(dchan);
    679		if (!ret)
    680			break;
    681
    682		if (time_after_eq(jiffies, timeout))
    683			return;
    684
    685		cpu_relax();
    686	}
    687
    688	pm_runtime_put(chan->chip->dev);
    689}
    690
    691static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
    692				 u32 wr_alloc, u32 rd_alloc)
    693{
    694	struct dw_edma_region *dt_region;
    695	struct device *dev = chip->dev;
    696	struct dw_edma *dw = chip->dw;
    697	struct dw_edma_chan *chan;
    698	struct dw_edma_irq *irq;
    699	struct dma_device *dma;
    700	u32 alloc, off_alloc;
    701	u32 i, j, cnt;
    702	int err = 0;
    703	u32 pos;
    704
    705	if (write) {
    706		i = 0;
    707		cnt = dw->wr_ch_cnt;
    708		dma = &dw->wr_edma;
    709		alloc = wr_alloc;
    710		off_alloc = 0;
    711	} else {
    712		i = dw->wr_ch_cnt;
    713		cnt = dw->rd_ch_cnt;
    714		dma = &dw->rd_edma;
    715		alloc = rd_alloc;
    716		off_alloc = wr_alloc;
    717	}
    718
    719	INIT_LIST_HEAD(&dma->channels);
    720	for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
    721		chan = &dw->chan[i];
    722
    723		dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
    724		if (!dt_region)
    725			return -ENOMEM;
    726
    727		chan->vc.chan.private = dt_region;
    728
    729		chan->chip = chip;
    730		chan->id = j;
    731		chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
    732		chan->configured = false;
    733		chan->request = EDMA_REQ_NONE;
    734		chan->status = EDMA_ST_IDLE;
    735
    736		if (write)
    737			chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
    738		else
    739			chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
    740		chan->ll_max -= 1;
    741
    742		dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
    743			 write ? "write" : "read", j, chan->ll_max);
    744
    745		if (dw->nr_irqs == 1)
    746			pos = 0;
    747		else
    748			pos = off_alloc + (j % alloc);
    749
    750		irq = &dw->irq[pos];
    751
    752		if (write)
    753			irq->wr_mask |= BIT(j);
    754		else
    755			irq->rd_mask |= BIT(j);
    756
    757		irq->dw = dw;
    758		memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
    759
    760		dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
    761			 write ? "write" : "read", j,
    762			 chan->msi.address_hi, chan->msi.address_lo,
    763			 chan->msi.data);
    764
    765		chan->vc.desc_free = vchan_free_desc;
    766		vchan_init(&chan->vc, dma);
    767
    768		if (write) {
    769			dt_region->paddr = dw->dt_region_wr[j].paddr;
    770			dt_region->vaddr = dw->dt_region_wr[j].vaddr;
    771			dt_region->sz = dw->dt_region_wr[j].sz;
    772		} else {
    773			dt_region->paddr = dw->dt_region_rd[j].paddr;
    774			dt_region->vaddr = dw->dt_region_rd[j].vaddr;
    775			dt_region->sz = dw->dt_region_rd[j].sz;
    776		}
    777
    778		dw_edma_v0_core_device_config(chan);
    779	}
    780
    781	/* Set DMA channel capabilities */
    782	dma_cap_zero(dma->cap_mask);
    783	dma_cap_set(DMA_SLAVE, dma->cap_mask);
    784	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
    785	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
    786	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
    787	dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
    788	dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    789	dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
    790	dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
    791	dma->chancnt = cnt;
    792
    793	/* Set DMA channel callbacks */
    794	dma->dev = chip->dev;
    795	dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
    796	dma->device_free_chan_resources = dw_edma_free_chan_resources;
    797	dma->device_config = dw_edma_device_config;
    798	dma->device_pause = dw_edma_device_pause;
    799	dma->device_resume = dw_edma_device_resume;
    800	dma->device_terminate_all = dw_edma_device_terminate_all;
    801	dma->device_issue_pending = dw_edma_device_issue_pending;
    802	dma->device_tx_status = dw_edma_device_tx_status;
    803	dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
    804	dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
    805	dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
    806
    807	dma_set_max_seg_size(dma->dev, U32_MAX);
    808
    809	/* Register DMA device */
    810	err = dma_async_device_register(dma);
    811
    812	return err;
    813}
    814
    815static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
    816{
    817	if (*nr_irqs && *alloc < cnt) {
    818		(*alloc)++;
    819		(*nr_irqs)--;
    820	}
    821}
    822
    823static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
    824{
    825	while (*mask * alloc < cnt)
    826		(*mask)++;
    827}
    828
    829static int dw_edma_irq_request(struct dw_edma_chip *chip,
    830			       u32 *wr_alloc, u32 *rd_alloc)
    831{
    832	struct device *dev = chip->dev;
    833	struct dw_edma *dw = chip->dw;
    834	u32 wr_mask = 1;
    835	u32 rd_mask = 1;
    836	int i, err = 0;
    837	u32 ch_cnt;
    838	int irq;
    839
    840	ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
    841
    842	if (dw->nr_irqs < 1)
    843		return -EINVAL;
    844
    845	if (dw->nr_irqs == 1) {
    846		/* Common IRQ shared among all channels */
    847		irq = dw->ops->irq_vector(dev, 0);
    848		err = request_irq(irq, dw_edma_interrupt_common,
    849				  IRQF_SHARED, dw->name, &dw->irq[0]);
    850		if (err) {
    851			dw->nr_irqs = 0;
    852			return err;
    853		}
    854
    855		if (irq_get_msi_desc(irq))
    856			get_cached_msi_msg(irq, &dw->irq[0].msi);
    857	} else {
    858		/* Distribute IRQs equally among all channels */
    859		int tmp = dw->nr_irqs;
    860
    861		while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
    862			dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
    863			dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
    864		}
    865
    866		dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
    867		dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
    868
    869		for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
    870			irq = dw->ops->irq_vector(dev, i);
    871			err = request_irq(irq,
    872					  i < *wr_alloc ?
    873						dw_edma_interrupt_write :
    874						dw_edma_interrupt_read,
    875					  IRQF_SHARED, dw->name,
    876					  &dw->irq[i]);
    877			if (err) {
    878				dw->nr_irqs = i;
    879				return err;
    880			}
    881
    882			if (irq_get_msi_desc(irq))
    883				get_cached_msi_msg(irq, &dw->irq[i].msi);
    884		}
    885
    886		dw->nr_irqs = i;
    887	}
    888
    889	return err;
    890}
    891
    892int dw_edma_probe(struct dw_edma_chip *chip)
    893{
    894	struct device *dev;
    895	struct dw_edma *dw;
    896	u32 wr_alloc = 0;
    897	u32 rd_alloc = 0;
    898	int i, err;
    899
    900	if (!chip)
    901		return -EINVAL;
    902
    903	dev = chip->dev;
    904	if (!dev)
    905		return -EINVAL;
    906
    907	dw = chip->dw;
    908	if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
    909		return -EINVAL;
    910
    911	raw_spin_lock_init(&dw->lock);
    912
    913	dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
    914			      dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
    915	dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
    916
    917	dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
    918			      dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
    919	dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
    920
    921	if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
    922		return -EINVAL;
    923
    924	dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
    925		 dw->wr_ch_cnt, dw->rd_ch_cnt);
    926
    927	/* Allocate channels */
    928	dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
    929				sizeof(*dw->chan), GFP_KERNEL);
    930	if (!dw->chan)
    931		return -ENOMEM;
    932
    933	snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
    934
    935	/* Disable eDMA, only to establish the ideal initial conditions */
    936	dw_edma_v0_core_off(dw);
    937
    938	/* Request IRQs */
    939	err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
    940	if (err)
    941		return err;
    942
    943	/* Setup write channels */
    944	err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
    945	if (err)
    946		goto err_irq_free;
    947
    948	/* Setup read channels */
    949	err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
    950	if (err)
    951		goto err_irq_free;
    952
    953	/* Power management */
    954	pm_runtime_enable(dev);
    955
    956	/* Turn debugfs on */
    957	dw_edma_v0_core_debugfs_on(chip);
    958
    959	return 0;
    960
    961err_irq_free:
    962	for (i = (dw->nr_irqs - 1); i >= 0; i--)
    963		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
    964
    965	dw->nr_irqs = 0;
    966
    967	return err;
    968}
    969EXPORT_SYMBOL_GPL(dw_edma_probe);
    970
    971int dw_edma_remove(struct dw_edma_chip *chip)
    972{
    973	struct dw_edma_chan *chan, *_chan;
    974	struct device *dev = chip->dev;
    975	struct dw_edma *dw = chip->dw;
    976	int i;
    977
    978	/* Disable eDMA */
    979	dw_edma_v0_core_off(dw);
    980
    981	/* Free irqs */
    982	for (i = (dw->nr_irqs - 1); i >= 0; i--)
    983		free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
    984
    985	/* Power management */
    986	pm_runtime_disable(dev);
    987
    988	/* Deregister eDMA device */
    989	dma_async_device_unregister(&dw->wr_edma);
    990	list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
    991				 vc.chan.device_node) {
    992		tasklet_kill(&chan->vc.task);
    993		list_del(&chan->vc.chan.device_node);
    994	}
    995
    996	dma_async_device_unregister(&dw->rd_edma);
    997	list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
    998				 vc.chan.device_node) {
    999		tasklet_kill(&chan->vc.task);
   1000		list_del(&chan->vc.chan.device_node);
   1001	}
   1002
   1003	/* Turn debugfs off */
   1004	dw_edma_v0_core_debugfs_off(chip);
   1005
   1006	return 0;
   1007}
   1008EXPORT_SYMBOL_GPL(dw_edma_remove);
   1009
   1010MODULE_LICENSE("GPL v2");
   1011MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
   1012MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");