cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tegra20-apb-dma.c (45677B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * DMA driver for Nvidia's Tegra20 APB DMA controller.
      4 *
      5 * Copyright (c) 2012-2013, NVIDIA CORPORATION.  All rights reserved.
      6 */
      7
      8#include <linux/bitops.h>
      9#include <linux/clk.h>
     10#include <linux/delay.h>
     11#include <linux/dmaengine.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/err.h>
     14#include <linux/init.h>
     15#include <linux/interrupt.h>
     16#include <linux/io.h>
     17#include <linux/mm.h>
     18#include <linux/module.h>
     19#include <linux/of.h>
     20#include <linux/of_device.h>
     21#include <linux/of_dma.h>
     22#include <linux/platform_device.h>
     23#include <linux/pm.h>
     24#include <linux/pm_runtime.h>
     25#include <linux/reset.h>
     26#include <linux/slab.h>
     27#include <linux/wait.h>
     28
     29#include "dmaengine.h"
     30
     31#define CREATE_TRACE_POINTS
     32#include <trace/events/tegra_apb_dma.h>
     33
     34#define TEGRA_APBDMA_GENERAL			0x0
     35#define TEGRA_APBDMA_GENERAL_ENABLE		BIT(31)
     36
     37#define TEGRA_APBDMA_CONTROL			0x010
     38#define TEGRA_APBDMA_IRQ_MASK			0x01c
     39#define TEGRA_APBDMA_IRQ_MASK_SET		0x020
     40
     41/* CSR register */
     42#define TEGRA_APBDMA_CHAN_CSR			0x00
     43#define TEGRA_APBDMA_CSR_ENB			BIT(31)
     44#define TEGRA_APBDMA_CSR_IE_EOC			BIT(30)
     45#define TEGRA_APBDMA_CSR_HOLD			BIT(29)
     46#define TEGRA_APBDMA_CSR_DIR			BIT(28)
     47#define TEGRA_APBDMA_CSR_ONCE			BIT(27)
     48#define TEGRA_APBDMA_CSR_FLOW			BIT(21)
     49#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT		16
     50#define TEGRA_APBDMA_CSR_REQ_SEL_MASK		0x1F
     51#define TEGRA_APBDMA_CSR_WCOUNT_MASK		0xFFFC
     52
     53/* STATUS register */
     54#define TEGRA_APBDMA_CHAN_STATUS		0x004
     55#define TEGRA_APBDMA_STATUS_BUSY		BIT(31)
     56#define TEGRA_APBDMA_STATUS_ISE_EOC		BIT(30)
     57#define TEGRA_APBDMA_STATUS_HALT		BIT(29)
     58#define TEGRA_APBDMA_STATUS_PING_PONG		BIT(28)
     59#define TEGRA_APBDMA_STATUS_COUNT_SHIFT		2
     60#define TEGRA_APBDMA_STATUS_COUNT_MASK		0xFFFC
     61
     62#define TEGRA_APBDMA_CHAN_CSRE			0x00C
     63#define TEGRA_APBDMA_CHAN_CSRE_PAUSE		BIT(31)
     64
     65/* AHB memory address */
     66#define TEGRA_APBDMA_CHAN_AHBPTR		0x010
     67
     68/* AHB sequence register */
     69#define TEGRA_APBDMA_CHAN_AHBSEQ		0x14
     70#define TEGRA_APBDMA_AHBSEQ_INTR_ENB		BIT(31)
     71#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8		(0 << 28)
     72#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16	(1 << 28)
     73#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32	(2 << 28)
     74#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64	(3 << 28)
     75#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128	(4 << 28)
     76#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP		BIT(27)
     77#define TEGRA_APBDMA_AHBSEQ_BURST_1		(4 << 24)
     78#define TEGRA_APBDMA_AHBSEQ_BURST_4		(5 << 24)
     79#define TEGRA_APBDMA_AHBSEQ_BURST_8		(6 << 24)
     80#define TEGRA_APBDMA_AHBSEQ_DBL_BUF		BIT(19)
     81#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT		16
     82#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE		0
     83
     84/* APB address */
     85#define TEGRA_APBDMA_CHAN_APBPTR		0x018
     86
     87/* APB sequence register */
     88#define TEGRA_APBDMA_CHAN_APBSEQ		0x01c
     89#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8		(0 << 28)
     90#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16	(1 << 28)
     91#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32	(2 << 28)
     92#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64	(3 << 28)
     93#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128	(4 << 28)
     94#define TEGRA_APBDMA_APBSEQ_DATA_SWAP		BIT(27)
     95#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1		(1 << 16)
     96
     97/* Tegra148 specific registers */
     98#define TEGRA_APBDMA_CHAN_WCOUNT		0x20
     99
    100#define TEGRA_APBDMA_CHAN_WORD_TRANSFER		0x24
    101
    102/*
    103 * If any burst is in flight and DMA paused then this is the time to complete
    104 * on-flight burst and update DMA status register.
    105 */
    106#define TEGRA_APBDMA_BURST_COMPLETE_TIME	20
    107
    108/* Channel base address offset from APBDMA base address */
    109#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET	0x1000
    110
    111#define TEGRA_APBDMA_SLAVE_ID_INVALID	(TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
    112
    113struct tegra_dma;
    114
    115/*
    116 * tegra_dma_chip_data Tegra chip specific DMA data
    117 * @nr_channels: Number of channels available in the controller.
    118 * @channel_reg_size: Channel register size/stride.
    119 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
    120 * @support_channel_pause: Support channel wise pause of dma.
    121 * @support_separate_wcount_reg: Support separate word count register.
    122 */
    123struct tegra_dma_chip_data {
    124	unsigned int nr_channels;
    125	unsigned int channel_reg_size;
    126	unsigned int max_dma_count;
    127	bool support_channel_pause;
    128	bool support_separate_wcount_reg;
    129};
    130
    131/* DMA channel registers */
    132struct tegra_dma_channel_regs {
    133	u32 csr;
    134	u32 ahb_ptr;
    135	u32 apb_ptr;
    136	u32 ahb_seq;
    137	u32 apb_seq;
    138	u32 wcount;
    139};
    140
    141/*
    142 * tegra_dma_sg_req: DMA request details to configure hardware. This
    143 * contains the details for one transfer to configure DMA hw.
    144 * The client's request for data transfer can be broken into multiple
    145 * sub-transfer as per requester details and hw support.
    146 * This sub transfer get added in the list of transfer and point to Tegra
    147 * DMA descriptor which manages the transfer details.
    148 */
    149struct tegra_dma_sg_req {
    150	struct tegra_dma_channel_regs	ch_regs;
    151	unsigned int			req_len;
    152	bool				configured;
    153	bool				last_sg;
    154	struct list_head		node;
    155	struct tegra_dma_desc		*dma_desc;
    156	unsigned int			words_xferred;
    157};
    158
    159/*
    160 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
    161 * This descriptor keep track of transfer status, callbacks and request
    162 * counts etc.
    163 */
    164struct tegra_dma_desc {
    165	struct dma_async_tx_descriptor	txd;
    166	unsigned int			bytes_requested;
    167	unsigned int			bytes_transferred;
    168	enum dma_status			dma_status;
    169	struct list_head		node;
    170	struct list_head		tx_list;
    171	struct list_head		cb_node;
    172	unsigned int			cb_count;
    173};
    174
    175struct tegra_dma_channel;
    176
    177typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
    178				bool to_terminate);
    179
    180/* tegra_dma_channel: Channel specific information */
    181struct tegra_dma_channel {
    182	struct dma_chan		dma_chan;
    183	char			name[12];
    184	bool			config_init;
    185	unsigned int		id;
    186	void __iomem		*chan_addr;
    187	spinlock_t		lock;
    188	bool			busy;
    189	struct tegra_dma	*tdma;
    190	bool			cyclic;
    191
    192	/* Different lists for managing the requests */
    193	struct list_head	free_sg_req;
    194	struct list_head	pending_sg_req;
    195	struct list_head	free_dma_desc;
    196	struct list_head	cb_desc;
    197
    198	/* ISR handler and tasklet for bottom half of isr handling */
    199	dma_isr_handler		isr_handler;
    200	struct tasklet_struct	tasklet;
    201
    202	/* Channel-slave specific configuration */
    203	unsigned int slave_id;
    204	struct dma_slave_config dma_sconfig;
    205	struct tegra_dma_channel_regs channel_reg;
    206
    207	struct wait_queue_head wq;
    208};
    209
    210/* tegra_dma: Tegra DMA specific information */
    211struct tegra_dma {
    212	struct dma_device		dma_dev;
    213	struct device			*dev;
    214	struct clk			*dma_clk;
    215	struct reset_control		*rst;
    216	spinlock_t			global_lock;
    217	void __iomem			*base_addr;
    218	const struct tegra_dma_chip_data *chip_data;
    219
    220	/*
    221	 * Counter for managing global pausing of the DMA controller.
    222	 * Only applicable for devices that don't support individual
    223	 * channel pausing.
    224	 */
    225	u32				global_pause_count;
    226
    227	/* Last member of the structure */
    228	struct tegra_dma_channel channels[];
    229};
    230
    231static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
    232{
    233	writel(val, tdma->base_addr + reg);
    234}
    235
    236static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
    237{
    238	return readl(tdma->base_addr + reg);
    239}
    240
    241static inline void tdc_write(struct tegra_dma_channel *tdc,
    242			     u32 reg, u32 val)
    243{
    244	writel(val, tdc->chan_addr + reg);
    245}
    246
    247static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
    248{
    249	return readl(tdc->chan_addr + reg);
    250}
    251
    252static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
    253{
    254	return container_of(dc, struct tegra_dma_channel, dma_chan);
    255}
    256
    257static inline struct tegra_dma_desc *
    258txd_to_tegra_dma_desc(struct dma_async_tx_descriptor *td)
    259{
    260	return container_of(td, struct tegra_dma_desc, txd);
    261}
    262
    263static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
    264{
    265	return &tdc->dma_chan.dev->device;
    266}
    267
    268static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
    269
    270/* Get DMA desc from free list, if not there then allocate it.  */
    271static struct tegra_dma_desc *tegra_dma_desc_get(struct tegra_dma_channel *tdc)
    272{
    273	struct tegra_dma_desc *dma_desc;
    274	unsigned long flags;
    275
    276	spin_lock_irqsave(&tdc->lock, flags);
    277
    278	/* Do not allocate if desc are waiting for ack */
    279	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
    280		if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
    281			list_del(&dma_desc->node);
    282			spin_unlock_irqrestore(&tdc->lock, flags);
    283			dma_desc->txd.flags = 0;
    284			return dma_desc;
    285		}
    286	}
    287
    288	spin_unlock_irqrestore(&tdc->lock, flags);
    289
    290	/* Allocate DMA desc */
    291	dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
    292	if (!dma_desc)
    293		return NULL;
    294
    295	dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
    296	dma_desc->txd.tx_submit = tegra_dma_tx_submit;
    297	dma_desc->txd.flags = 0;
    298
    299	return dma_desc;
    300}
    301
    302static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
    303			       struct tegra_dma_desc *dma_desc)
    304{
    305	unsigned long flags;
    306
    307	spin_lock_irqsave(&tdc->lock, flags);
    308	if (!list_empty(&dma_desc->tx_list))
    309		list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
    310	list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
    311	spin_unlock_irqrestore(&tdc->lock, flags);
    312}
    313
    314static struct tegra_dma_sg_req *
    315tegra_dma_sg_req_get(struct tegra_dma_channel *tdc)
    316{
    317	struct tegra_dma_sg_req *sg_req;
    318	unsigned long flags;
    319
    320	spin_lock_irqsave(&tdc->lock, flags);
    321	if (!list_empty(&tdc->free_sg_req)) {
    322		sg_req = list_first_entry(&tdc->free_sg_req, typeof(*sg_req),
    323					  node);
    324		list_del(&sg_req->node);
    325		spin_unlock_irqrestore(&tdc->lock, flags);
    326		return sg_req;
    327	}
    328	spin_unlock_irqrestore(&tdc->lock, flags);
    329
    330	sg_req = kzalloc(sizeof(*sg_req), GFP_NOWAIT);
    331
    332	return sg_req;
    333}
    334
    335static int tegra_dma_slave_config(struct dma_chan *dc,
    336				  struct dma_slave_config *sconfig)
    337{
    338	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
    339
    340	if (!list_empty(&tdc->pending_sg_req)) {
    341		dev_err(tdc2dev(tdc), "Configuration not allowed\n");
    342		return -EBUSY;
    343	}
    344
    345	memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
    346	tdc->config_init = true;
    347
    348	return 0;
    349}
    350
    351static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
    352				   bool wait_for_burst_complete)
    353{
    354	struct tegra_dma *tdma = tdc->tdma;
    355
    356	spin_lock(&tdma->global_lock);
    357
    358	if (tdc->tdma->global_pause_count == 0) {
    359		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
    360		if (wait_for_burst_complete)
    361			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
    362	}
    363
    364	tdc->tdma->global_pause_count++;
    365
    366	spin_unlock(&tdma->global_lock);
    367}
    368
    369static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
    370{
    371	struct tegra_dma *tdma = tdc->tdma;
    372
    373	spin_lock(&tdma->global_lock);
    374
    375	if (WARN_ON(tdc->tdma->global_pause_count == 0))
    376		goto out;
    377
    378	if (--tdc->tdma->global_pause_count == 0)
    379		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
    380			   TEGRA_APBDMA_GENERAL_ENABLE);
    381
    382out:
    383	spin_unlock(&tdma->global_lock);
    384}
    385
    386static void tegra_dma_pause(struct tegra_dma_channel *tdc,
    387			    bool wait_for_burst_complete)
    388{
    389	struct tegra_dma *tdma = tdc->tdma;
    390
    391	if (tdma->chip_data->support_channel_pause) {
    392		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
    393			  TEGRA_APBDMA_CHAN_CSRE_PAUSE);
    394		if (wait_for_burst_complete)
    395			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
    396	} else {
    397		tegra_dma_global_pause(tdc, wait_for_burst_complete);
    398	}
    399}
    400
    401static void tegra_dma_resume(struct tegra_dma_channel *tdc)
    402{
    403	struct tegra_dma *tdma = tdc->tdma;
    404
    405	if (tdma->chip_data->support_channel_pause)
    406		tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
    407	else
    408		tegra_dma_global_resume(tdc);
    409}
    410
    411static void tegra_dma_stop(struct tegra_dma_channel *tdc)
    412{
    413	u32 csr, status;
    414
    415	/* Disable interrupts */
    416	csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
    417	csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
    418	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
    419
    420	/* Disable DMA */
    421	csr &= ~TEGRA_APBDMA_CSR_ENB;
    422	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
    423
    424	/* Clear interrupt status if it is there */
    425	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    426	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
    427		dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
    428		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
    429	}
    430	tdc->busy = false;
    431}
    432
    433static void tegra_dma_start(struct tegra_dma_channel *tdc,
    434			    struct tegra_dma_sg_req *sg_req)
    435{
    436	struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
    437
    438	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
    439	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
    440	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
    441	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
    442	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
    443	if (tdc->tdma->chip_data->support_separate_wcount_reg)
    444		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
    445
    446	/* Start DMA */
    447	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
    448		  ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
    449}
    450
    451static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
    452					 struct tegra_dma_sg_req *nsg_req)
    453{
    454	unsigned long status;
    455
    456	/*
    457	 * The DMA controller reloads the new configuration for next transfer
    458	 * after last burst of current transfer completes.
    459	 * If there is no IEC status then this makes sure that last burst
    460	 * has not be completed. There may be case that last burst is on
    461	 * flight and so it can complete but because DMA is paused, it
    462	 * will not generates interrupt as well as not reload the new
    463	 * configuration.
    464	 * If there is already IEC status then interrupt handler need to
    465	 * load new configuration.
    466	 */
    467	tegra_dma_pause(tdc, false);
    468	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    469
    470	/*
    471	 * If interrupt is pending then do nothing as the ISR will handle
    472	 * the programing for new request.
    473	 */
    474	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
    475		dev_err(tdc2dev(tdc),
    476			"Skipping new configuration as interrupt is pending\n");
    477		tegra_dma_resume(tdc);
    478		return;
    479	}
    480
    481	/* Safe to program new configuration */
    482	tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
    483	tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
    484	if (tdc->tdma->chip_data->support_separate_wcount_reg)
    485		tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
    486			  nsg_req->ch_regs.wcount);
    487	tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
    488		  nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
    489	nsg_req->configured = true;
    490	nsg_req->words_xferred = 0;
    491
    492	tegra_dma_resume(tdc);
    493}
    494
    495static void tdc_start_head_req(struct tegra_dma_channel *tdc)
    496{
    497	struct tegra_dma_sg_req *sg_req;
    498
    499	sg_req = list_first_entry(&tdc->pending_sg_req, typeof(*sg_req), node);
    500	tegra_dma_start(tdc, sg_req);
    501	sg_req->configured = true;
    502	sg_req->words_xferred = 0;
    503	tdc->busy = true;
    504}
    505
    506static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
    507{
    508	struct tegra_dma_sg_req *hsgreq, *hnsgreq;
    509
    510	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
    511	if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
    512		hnsgreq = list_first_entry(&hsgreq->node, typeof(*hnsgreq),
    513					   node);
    514		tegra_dma_configure_for_next(tdc, hnsgreq);
    515	}
    516}
    517
    518static inline unsigned int
    519get_current_xferred_count(struct tegra_dma_channel *tdc,
    520			  struct tegra_dma_sg_req *sg_req,
    521			  unsigned long status)
    522{
    523	return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
    524}
    525
    526static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
    527{
    528	struct tegra_dma_desc *dma_desc;
    529	struct tegra_dma_sg_req *sgreq;
    530
    531	while (!list_empty(&tdc->pending_sg_req)) {
    532		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
    533					 node);
    534		list_move_tail(&sgreq->node, &tdc->free_sg_req);
    535		if (sgreq->last_sg) {
    536			dma_desc = sgreq->dma_desc;
    537			dma_desc->dma_status = DMA_ERROR;
    538			list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
    539
    540			/* Add in cb list if it is not there. */
    541			if (!dma_desc->cb_count)
    542				list_add_tail(&dma_desc->cb_node,
    543					      &tdc->cb_desc);
    544			dma_desc->cb_count++;
    545		}
    546	}
    547	tdc->isr_handler = NULL;
    548}
    549
    550static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
    551					   bool to_terminate)
    552{
    553	struct tegra_dma_sg_req *hsgreq;
    554
    555	/*
    556	 * Check that head req on list should be in flight.
    557	 * If it is not in flight then abort transfer as
    558	 * looping of transfer can not continue.
    559	 */
    560	hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
    561	if (!hsgreq->configured) {
    562		tegra_dma_stop(tdc);
    563		pm_runtime_put(tdc->tdma->dev);
    564		dev_err(tdc2dev(tdc), "DMA transfer underflow, aborting DMA\n");
    565		tegra_dma_abort_all(tdc);
    566		return false;
    567	}
    568
    569	/* Configure next request */
    570	if (!to_terminate)
    571		tdc_configure_next_head_desc(tdc);
    572
    573	return true;
    574}
    575
    576static void handle_once_dma_done(struct tegra_dma_channel *tdc,
    577				 bool to_terminate)
    578{
    579	struct tegra_dma_desc *dma_desc;
    580	struct tegra_dma_sg_req *sgreq;
    581
    582	tdc->busy = false;
    583	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
    584	dma_desc = sgreq->dma_desc;
    585	dma_desc->bytes_transferred += sgreq->req_len;
    586
    587	list_del(&sgreq->node);
    588	if (sgreq->last_sg) {
    589		dma_desc->dma_status = DMA_COMPLETE;
    590		dma_cookie_complete(&dma_desc->txd);
    591		if (!dma_desc->cb_count)
    592			list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
    593		dma_desc->cb_count++;
    594		list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
    595	}
    596	list_add_tail(&sgreq->node, &tdc->free_sg_req);
    597
    598	/* Do not start DMA if it is going to be terminate */
    599	if (to_terminate)
    600		return;
    601
    602	if (list_empty(&tdc->pending_sg_req)) {
    603		pm_runtime_put(tdc->tdma->dev);
    604		return;
    605	}
    606
    607	tdc_start_head_req(tdc);
    608}
    609
    610static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
    611					    bool to_terminate)
    612{
    613	struct tegra_dma_desc *dma_desc;
    614	struct tegra_dma_sg_req *sgreq;
    615	bool st;
    616
    617	sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
    618	dma_desc = sgreq->dma_desc;
    619	/* if we dma for long enough the transfer count will wrap */
    620	dma_desc->bytes_transferred =
    621		(dma_desc->bytes_transferred + sgreq->req_len) %
    622		dma_desc->bytes_requested;
    623
    624	/* Callback need to be call */
    625	if (!dma_desc->cb_count)
    626		list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
    627	dma_desc->cb_count++;
    628
    629	sgreq->words_xferred = 0;
    630
    631	/* If not last req then put at end of pending list */
    632	if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
    633		list_move_tail(&sgreq->node, &tdc->pending_sg_req);
    634		sgreq->configured = false;
    635		st = handle_continuous_head_request(tdc, to_terminate);
    636		if (!st)
    637			dma_desc->dma_status = DMA_ERROR;
    638	}
    639}
    640
    641static void tegra_dma_tasklet(struct tasklet_struct *t)
    642{
    643	struct tegra_dma_channel *tdc = from_tasklet(tdc, t, tasklet);
    644	struct dmaengine_desc_callback cb;
    645	struct tegra_dma_desc *dma_desc;
    646	unsigned int cb_count;
    647	unsigned long flags;
    648
    649	spin_lock_irqsave(&tdc->lock, flags);
    650	while (!list_empty(&tdc->cb_desc)) {
    651		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
    652					    cb_node);
    653		list_del(&dma_desc->cb_node);
    654		dmaengine_desc_get_callback(&dma_desc->txd, &cb);
    655		cb_count = dma_desc->cb_count;
    656		dma_desc->cb_count = 0;
    657		trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
    658					    cb.callback);
    659		spin_unlock_irqrestore(&tdc->lock, flags);
    660		while (cb_count--)
    661			dmaengine_desc_callback_invoke(&cb, NULL);
    662		spin_lock_irqsave(&tdc->lock, flags);
    663	}
    664	spin_unlock_irqrestore(&tdc->lock, flags);
    665}
    666
    667static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
    668{
    669	struct tegra_dma_channel *tdc = dev_id;
    670	u32 status;
    671
    672	spin_lock(&tdc->lock);
    673
    674	trace_tegra_dma_isr(&tdc->dma_chan, irq);
    675	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    676	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
    677		tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
    678		tdc->isr_handler(tdc, false);
    679		tasklet_schedule(&tdc->tasklet);
    680		wake_up_all(&tdc->wq);
    681		spin_unlock(&tdc->lock);
    682		return IRQ_HANDLED;
    683	}
    684
    685	spin_unlock(&tdc->lock);
    686	dev_info(tdc2dev(tdc), "Interrupt already served status 0x%08x\n",
    687		 status);
    688
    689	return IRQ_NONE;
    690}
    691
    692static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
    693{
    694	struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
    695	struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
    696	unsigned long flags;
    697	dma_cookie_t cookie;
    698
    699	spin_lock_irqsave(&tdc->lock, flags);
    700	dma_desc->dma_status = DMA_IN_PROGRESS;
    701	cookie = dma_cookie_assign(&dma_desc->txd);
    702	list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
    703	spin_unlock_irqrestore(&tdc->lock, flags);
    704
    705	return cookie;
    706}
    707
    708static void tegra_dma_issue_pending(struct dma_chan *dc)
    709{
    710	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
    711	unsigned long flags;
    712	int err;
    713
    714	spin_lock_irqsave(&tdc->lock, flags);
    715	if (list_empty(&tdc->pending_sg_req)) {
    716		dev_err(tdc2dev(tdc), "No DMA request\n");
    717		goto end;
    718	}
    719	if (!tdc->busy) {
    720		err = pm_runtime_resume_and_get(tdc->tdma->dev);
    721		if (err < 0) {
    722			dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
    723			goto end;
    724		}
    725
    726		tdc_start_head_req(tdc);
    727
    728		/* Continuous single mode: Configure next req */
    729		if (tdc->cyclic) {
    730			/*
    731			 * Wait for 1 burst time for configure DMA for
    732			 * next transfer.
    733			 */
    734			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
    735			tdc_configure_next_head_desc(tdc);
    736		}
    737	}
    738end:
    739	spin_unlock_irqrestore(&tdc->lock, flags);
    740}
    741
    742static int tegra_dma_terminate_all(struct dma_chan *dc)
    743{
    744	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
    745	struct tegra_dma_desc *dma_desc;
    746	struct tegra_dma_sg_req *sgreq;
    747	unsigned long flags;
    748	u32 status, wcount;
    749	bool was_busy;
    750
    751	spin_lock_irqsave(&tdc->lock, flags);
    752
    753	if (!tdc->busy)
    754		goto skip_dma_stop;
    755
    756	/* Pause DMA before checking the queue status */
    757	tegra_dma_pause(tdc, true);
    758
    759	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    760	if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
    761		dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
    762		tdc->isr_handler(tdc, true);
    763		status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    764	}
    765	if (tdc->tdma->chip_data->support_separate_wcount_reg)
    766		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
    767	else
    768		wcount = status;
    769
    770	was_busy = tdc->busy;
    771	tegra_dma_stop(tdc);
    772
    773	if (!list_empty(&tdc->pending_sg_req) && was_busy) {
    774		sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq),
    775					 node);
    776		sgreq->dma_desc->bytes_transferred +=
    777				get_current_xferred_count(tdc, sgreq, wcount);
    778	}
    779	tegra_dma_resume(tdc);
    780
    781	pm_runtime_put(tdc->tdma->dev);
    782	wake_up_all(&tdc->wq);
    783
    784skip_dma_stop:
    785	tegra_dma_abort_all(tdc);
    786
    787	while (!list_empty(&tdc->cb_desc)) {
    788		dma_desc = list_first_entry(&tdc->cb_desc, typeof(*dma_desc),
    789					    cb_node);
    790		list_del(&dma_desc->cb_node);
    791		dma_desc->cb_count = 0;
    792	}
    793	spin_unlock_irqrestore(&tdc->lock, flags);
    794
    795	return 0;
    796}
    797
    798static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
    799{
    800	unsigned long flags;
    801	u32 status;
    802
    803	spin_lock_irqsave(&tdc->lock, flags);
    804	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    805	spin_unlock_irqrestore(&tdc->lock, flags);
    806
    807	return !(status & TEGRA_APBDMA_STATUS_ISE_EOC);
    808}
    809
    810static void tegra_dma_synchronize(struct dma_chan *dc)
    811{
    812	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
    813	int err;
    814
    815	err = pm_runtime_resume_and_get(tdc->tdma->dev);
    816	if (err < 0) {
    817		dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
    818		return;
    819	}
    820
    821	/*
    822	 * CPU, which handles interrupt, could be busy in
    823	 * uninterruptible state, in this case sibling CPU
    824	 * should wait until interrupt is handled.
    825	 */
    826	wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
    827
    828	tasklet_kill(&tdc->tasklet);
    829
    830	pm_runtime_put(tdc->tdma->dev);
    831}
    832
    833static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
    834					       struct tegra_dma_sg_req *sg_req)
    835{
    836	u32 status, wcount = 0;
    837
    838	if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
    839		return 0;
    840
    841	if (tdc->tdma->chip_data->support_separate_wcount_reg)
    842		wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
    843
    844	status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
    845
    846	if (!tdc->tdma->chip_data->support_separate_wcount_reg)
    847		wcount = status;
    848
    849	if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
    850		return sg_req->req_len;
    851
    852	wcount = get_current_xferred_count(tdc, sg_req, wcount);
    853
    854	if (!wcount) {
    855		/*
    856		 * If wcount wasn't ever polled for this SG before, then
    857		 * simply assume that transfer hasn't started yet.
    858		 *
    859		 * Otherwise it's the end of the transfer.
    860		 *
    861		 * The alternative would be to poll the status register
    862		 * until EOC bit is set or wcount goes UP. That's so
    863		 * because EOC bit is getting set only after the last
    864		 * burst's completion and counter is less than the actual
    865		 * transfer size by 4 bytes. The counter value wraps around
    866		 * in a cyclic mode before EOC is set(!), so we can't easily
    867		 * distinguish start of transfer from its end.
    868		 */
    869		if (sg_req->words_xferred)
    870			wcount = sg_req->req_len - 4;
    871
    872	} else if (wcount < sg_req->words_xferred) {
    873		/*
    874		 * This case will never happen for a non-cyclic transfer.
    875		 *
    876		 * For a cyclic transfer, although it is possible for the
    877		 * next transfer to have already started (resetting the word
    878		 * count), this case should still not happen because we should
    879		 * have detected that the EOC bit is set and hence the transfer
    880		 * was completed.
    881		 */
    882		WARN_ON_ONCE(1);
    883
    884		wcount = sg_req->req_len - 4;
    885	} else {
    886		sg_req->words_xferred = wcount;
    887	}
    888
    889	return wcount;
    890}
    891
    892static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
    893					   dma_cookie_t cookie,
    894					   struct dma_tx_state *txstate)
    895{
    896	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
    897	struct tegra_dma_desc *dma_desc;
    898	struct tegra_dma_sg_req *sg_req;
    899	enum dma_status ret;
    900	unsigned long flags;
    901	unsigned int residual;
    902	unsigned int bytes = 0;
    903
    904	ret = dma_cookie_status(dc, cookie, txstate);
    905	if (ret == DMA_COMPLETE)
    906		return ret;
    907
    908	spin_lock_irqsave(&tdc->lock, flags);
    909
    910	/* Check on wait_ack desc status */
    911	list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
    912		if (dma_desc->txd.cookie == cookie) {
    913			ret = dma_desc->dma_status;
    914			goto found;
    915		}
    916	}
    917
    918	/* Check in pending list */
    919	list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
    920		dma_desc = sg_req->dma_desc;
    921		if (dma_desc->txd.cookie == cookie) {
    922			bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
    923			ret = dma_desc->dma_status;
    924			goto found;
    925		}
    926	}
    927
    928	dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
    929	dma_desc = NULL;
    930
    931found:
    932	if (dma_desc && txstate) {
    933		residual = dma_desc->bytes_requested -
    934			   ((dma_desc->bytes_transferred + bytes) %
    935			    dma_desc->bytes_requested);
    936		dma_set_residue(txstate, residual);
    937	}
    938
    939	trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
    940	spin_unlock_irqrestore(&tdc->lock, flags);
    941
    942	return ret;
    943}
    944
    945static inline unsigned int get_bus_width(struct tegra_dma_channel *tdc,
    946					 enum dma_slave_buswidth slave_bw)
    947{
    948	switch (slave_bw) {
    949	case DMA_SLAVE_BUSWIDTH_1_BYTE:
    950		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
    951	case DMA_SLAVE_BUSWIDTH_2_BYTES:
    952		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
    953	case DMA_SLAVE_BUSWIDTH_4_BYTES:
    954		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
    955	case DMA_SLAVE_BUSWIDTH_8_BYTES:
    956		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
    957	default:
    958		dev_warn(tdc2dev(tdc),
    959			 "slave bw is not supported, using 32bits\n");
    960		return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
    961	}
    962}
    963
    964static inline unsigned int get_burst_size(struct tegra_dma_channel *tdc,
    965					  u32 burst_size,
    966					  enum dma_slave_buswidth slave_bw,
    967					  u32 len)
    968{
    969	unsigned int burst_byte, burst_ahb_width;
    970
    971	/*
    972	 * burst_size from client is in terms of the bus_width.
    973	 * convert them into AHB memory width which is 4 byte.
    974	 */
    975	burst_byte = burst_size * slave_bw;
    976	burst_ahb_width = burst_byte / 4;
    977
    978	/* If burst size is 0 then calculate the burst size based on length */
    979	if (!burst_ahb_width) {
    980		if (len & 0xF)
    981			return TEGRA_APBDMA_AHBSEQ_BURST_1;
    982		else if ((len >> 4) & 0x1)
    983			return TEGRA_APBDMA_AHBSEQ_BURST_4;
    984		else
    985			return TEGRA_APBDMA_AHBSEQ_BURST_8;
    986	}
    987	if (burst_ahb_width < 4)
    988		return TEGRA_APBDMA_AHBSEQ_BURST_1;
    989	else if (burst_ahb_width < 8)
    990		return TEGRA_APBDMA_AHBSEQ_BURST_4;
    991	else
    992		return TEGRA_APBDMA_AHBSEQ_BURST_8;
    993}
    994
    995static int get_transfer_param(struct tegra_dma_channel *tdc,
    996			      enum dma_transfer_direction direction,
    997			      u32 *apb_addr,
    998			      u32 *apb_seq,
    999			      u32 *csr,
   1000			      unsigned int *burst_size,
   1001			      enum dma_slave_buswidth *slave_bw)
   1002{
   1003	switch (direction) {
   1004	case DMA_MEM_TO_DEV:
   1005		*apb_addr = tdc->dma_sconfig.dst_addr;
   1006		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
   1007		*burst_size = tdc->dma_sconfig.dst_maxburst;
   1008		*slave_bw = tdc->dma_sconfig.dst_addr_width;
   1009		*csr = TEGRA_APBDMA_CSR_DIR;
   1010		return 0;
   1011
   1012	case DMA_DEV_TO_MEM:
   1013		*apb_addr = tdc->dma_sconfig.src_addr;
   1014		*apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
   1015		*burst_size = tdc->dma_sconfig.src_maxburst;
   1016		*slave_bw = tdc->dma_sconfig.src_addr_width;
   1017		*csr = 0;
   1018		return 0;
   1019
   1020	default:
   1021		dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
   1022		break;
   1023	}
   1024
   1025	return -EINVAL;
   1026}
   1027
   1028static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
   1029				  struct tegra_dma_channel_regs *ch_regs,
   1030				  u32 len)
   1031{
   1032	u32 len_field = (len - 4) & 0xFFFC;
   1033
   1034	if (tdc->tdma->chip_data->support_separate_wcount_reg)
   1035		ch_regs->wcount = len_field;
   1036	else
   1037		ch_regs->csr |= len_field;
   1038}
   1039
   1040static struct dma_async_tx_descriptor *
   1041tegra_dma_prep_slave_sg(struct dma_chan *dc,
   1042			struct scatterlist *sgl,
   1043			unsigned int sg_len,
   1044			enum dma_transfer_direction direction,
   1045			unsigned long flags,
   1046			void *context)
   1047{
   1048	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
   1049	struct tegra_dma_sg_req *sg_req = NULL;
   1050	u32 csr, ahb_seq, apb_ptr, apb_seq;
   1051	enum dma_slave_buswidth slave_bw;
   1052	struct tegra_dma_desc *dma_desc;
   1053	struct list_head req_list;
   1054	struct scatterlist *sg;
   1055	unsigned int burst_size;
   1056	unsigned int i;
   1057
   1058	if (!tdc->config_init) {
   1059		dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
   1060		return NULL;
   1061	}
   1062	if (sg_len < 1) {
   1063		dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
   1064		return NULL;
   1065	}
   1066
   1067	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
   1068			       &burst_size, &slave_bw) < 0)
   1069		return NULL;
   1070
   1071	INIT_LIST_HEAD(&req_list);
   1072
   1073	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
   1074	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
   1075					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
   1076	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
   1077
   1078	csr |= TEGRA_APBDMA_CSR_ONCE;
   1079
   1080	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
   1081		csr |= TEGRA_APBDMA_CSR_FLOW;
   1082		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
   1083	}
   1084
   1085	if (flags & DMA_PREP_INTERRUPT) {
   1086		csr |= TEGRA_APBDMA_CSR_IE_EOC;
   1087	} else {
   1088		WARN_ON_ONCE(1);
   1089		return NULL;
   1090	}
   1091
   1092	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
   1093
   1094	dma_desc = tegra_dma_desc_get(tdc);
   1095	if (!dma_desc) {
   1096		dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
   1097		return NULL;
   1098	}
   1099	INIT_LIST_HEAD(&dma_desc->tx_list);
   1100	INIT_LIST_HEAD(&dma_desc->cb_node);
   1101	dma_desc->cb_count = 0;
   1102	dma_desc->bytes_requested = 0;
   1103	dma_desc->bytes_transferred = 0;
   1104	dma_desc->dma_status = DMA_IN_PROGRESS;
   1105
   1106	/* Make transfer requests */
   1107	for_each_sg(sgl, sg, sg_len, i) {
   1108		u32 len, mem;
   1109
   1110		mem = sg_dma_address(sg);
   1111		len = sg_dma_len(sg);
   1112
   1113		if ((len & 3) || (mem & 3) ||
   1114		    len > tdc->tdma->chip_data->max_dma_count) {
   1115			dev_err(tdc2dev(tdc),
   1116				"DMA length/memory address is not supported\n");
   1117			tegra_dma_desc_put(tdc, dma_desc);
   1118			return NULL;
   1119		}
   1120
   1121		sg_req = tegra_dma_sg_req_get(tdc);
   1122		if (!sg_req) {
   1123			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
   1124			tegra_dma_desc_put(tdc, dma_desc);
   1125			return NULL;
   1126		}
   1127
   1128		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
   1129		dma_desc->bytes_requested += len;
   1130
   1131		sg_req->ch_regs.apb_ptr = apb_ptr;
   1132		sg_req->ch_regs.ahb_ptr = mem;
   1133		sg_req->ch_regs.csr = csr;
   1134		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
   1135		sg_req->ch_regs.apb_seq = apb_seq;
   1136		sg_req->ch_regs.ahb_seq = ahb_seq;
   1137		sg_req->configured = false;
   1138		sg_req->last_sg = false;
   1139		sg_req->dma_desc = dma_desc;
   1140		sg_req->req_len = len;
   1141
   1142		list_add_tail(&sg_req->node, &dma_desc->tx_list);
   1143	}
   1144	sg_req->last_sg = true;
   1145	if (flags & DMA_CTRL_ACK)
   1146		dma_desc->txd.flags = DMA_CTRL_ACK;
   1147
   1148	/*
   1149	 * Make sure that mode should not be conflicting with currently
   1150	 * configured mode.
   1151	 */
   1152	if (!tdc->isr_handler) {
   1153		tdc->isr_handler = handle_once_dma_done;
   1154		tdc->cyclic = false;
   1155	} else {
   1156		if (tdc->cyclic) {
   1157			dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
   1158			tegra_dma_desc_put(tdc, dma_desc);
   1159			return NULL;
   1160		}
   1161	}
   1162
   1163	return &dma_desc->txd;
   1164}
   1165
   1166static struct dma_async_tx_descriptor *
   1167tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
   1168			  size_t buf_len,
   1169			  size_t period_len,
   1170			  enum dma_transfer_direction direction,
   1171			  unsigned long flags)
   1172{
   1173	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
   1174	struct tegra_dma_sg_req *sg_req = NULL;
   1175	u32 csr, ahb_seq, apb_ptr, apb_seq;
   1176	enum dma_slave_buswidth slave_bw;
   1177	struct tegra_dma_desc *dma_desc;
   1178	dma_addr_t mem = buf_addr;
   1179	unsigned int burst_size;
   1180	size_t len, remain_len;
   1181
   1182	if (!buf_len || !period_len) {
   1183		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
   1184		return NULL;
   1185	}
   1186
   1187	if (!tdc->config_init) {
   1188		dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
   1189		return NULL;
   1190	}
   1191
   1192	/*
   1193	 * We allow to take more number of requests till DMA is
   1194	 * not started. The driver will loop over all requests.
   1195	 * Once DMA is started then new requests can be queued only after
   1196	 * terminating the DMA.
   1197	 */
   1198	if (tdc->busy) {
   1199		dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
   1200		return NULL;
   1201	}
   1202
   1203	/*
   1204	 * We only support cycle transfer when buf_len is multiple of
   1205	 * period_len.
   1206	 */
   1207	if (buf_len % period_len) {
   1208		dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
   1209		return NULL;
   1210	}
   1211
   1212	len = period_len;
   1213	if ((len & 3) || (buf_addr & 3) ||
   1214	    len > tdc->tdma->chip_data->max_dma_count) {
   1215		dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
   1216		return NULL;
   1217	}
   1218
   1219	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
   1220			       &burst_size, &slave_bw) < 0)
   1221		return NULL;
   1222
   1223	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
   1224	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
   1225					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
   1226	ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
   1227
   1228	if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
   1229		csr |= TEGRA_APBDMA_CSR_FLOW;
   1230		csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
   1231	}
   1232
   1233	if (flags & DMA_PREP_INTERRUPT) {
   1234		csr |= TEGRA_APBDMA_CSR_IE_EOC;
   1235	} else {
   1236		WARN_ON_ONCE(1);
   1237		return NULL;
   1238	}
   1239
   1240	apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
   1241
   1242	dma_desc = tegra_dma_desc_get(tdc);
   1243	if (!dma_desc) {
   1244		dev_err(tdc2dev(tdc), "not enough descriptors available\n");
   1245		return NULL;
   1246	}
   1247
   1248	INIT_LIST_HEAD(&dma_desc->tx_list);
   1249	INIT_LIST_HEAD(&dma_desc->cb_node);
   1250	dma_desc->cb_count = 0;
   1251
   1252	dma_desc->bytes_transferred = 0;
   1253	dma_desc->bytes_requested = buf_len;
   1254	remain_len = buf_len;
   1255
   1256	/* Split transfer equal to period size */
   1257	while (remain_len) {
   1258		sg_req = tegra_dma_sg_req_get(tdc);
   1259		if (!sg_req) {
   1260			dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
   1261			tegra_dma_desc_put(tdc, dma_desc);
   1262			return NULL;
   1263		}
   1264
   1265		ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
   1266		sg_req->ch_regs.apb_ptr = apb_ptr;
   1267		sg_req->ch_regs.ahb_ptr = mem;
   1268		sg_req->ch_regs.csr = csr;
   1269		tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
   1270		sg_req->ch_regs.apb_seq = apb_seq;
   1271		sg_req->ch_regs.ahb_seq = ahb_seq;
   1272		sg_req->configured = false;
   1273		sg_req->last_sg = false;
   1274		sg_req->dma_desc = dma_desc;
   1275		sg_req->req_len = len;
   1276
   1277		list_add_tail(&sg_req->node, &dma_desc->tx_list);
   1278		remain_len -= len;
   1279		mem += len;
   1280	}
   1281	sg_req->last_sg = true;
   1282	if (flags & DMA_CTRL_ACK)
   1283		dma_desc->txd.flags = DMA_CTRL_ACK;
   1284
   1285	/*
   1286	 * Make sure that mode should not be conflicting with currently
   1287	 * configured mode.
   1288	 */
   1289	if (!tdc->isr_handler) {
   1290		tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
   1291		tdc->cyclic = true;
   1292	} else {
   1293		if (!tdc->cyclic) {
   1294			dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
   1295			tegra_dma_desc_put(tdc, dma_desc);
   1296			return NULL;
   1297		}
   1298	}
   1299
   1300	return &dma_desc->txd;
   1301}
   1302
   1303static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
   1304{
   1305	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
   1306
   1307	dma_cookie_init(&tdc->dma_chan);
   1308
   1309	return 0;
   1310}
   1311
   1312static void tegra_dma_free_chan_resources(struct dma_chan *dc)
   1313{
   1314	struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
   1315	struct tegra_dma_desc *dma_desc;
   1316	struct tegra_dma_sg_req *sg_req;
   1317	struct list_head dma_desc_list;
   1318	struct list_head sg_req_list;
   1319
   1320	INIT_LIST_HEAD(&dma_desc_list);
   1321	INIT_LIST_HEAD(&sg_req_list);
   1322
   1323	dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
   1324
   1325	tegra_dma_terminate_all(dc);
   1326	tasklet_kill(&tdc->tasklet);
   1327
   1328	list_splice_init(&tdc->pending_sg_req, &sg_req_list);
   1329	list_splice_init(&tdc->free_sg_req, &sg_req_list);
   1330	list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
   1331	INIT_LIST_HEAD(&tdc->cb_desc);
   1332	tdc->config_init = false;
   1333	tdc->isr_handler = NULL;
   1334
   1335	while (!list_empty(&dma_desc_list)) {
   1336		dma_desc = list_first_entry(&dma_desc_list, typeof(*dma_desc),
   1337					    node);
   1338		list_del(&dma_desc->node);
   1339		kfree(dma_desc);
   1340	}
   1341
   1342	while (!list_empty(&sg_req_list)) {
   1343		sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
   1344		list_del(&sg_req->node);
   1345		kfree(sg_req);
   1346	}
   1347
   1348	tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
   1349}
   1350
   1351static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
   1352					   struct of_dma *ofdma)
   1353{
   1354	struct tegra_dma *tdma = ofdma->of_dma_data;
   1355	struct tegra_dma_channel *tdc;
   1356	struct dma_chan *chan;
   1357
   1358	if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
   1359		dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
   1360		return NULL;
   1361	}
   1362
   1363	chan = dma_get_any_slave_channel(&tdma->dma_dev);
   1364	if (!chan)
   1365		return NULL;
   1366
   1367	tdc = to_tegra_dma_chan(chan);
   1368	tdc->slave_id = dma_spec->args[0];
   1369
   1370	return chan;
   1371}
   1372
   1373/* Tegra20 specific DMA controller information */
   1374static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
   1375	.nr_channels		= 16,
   1376	.channel_reg_size	= 0x20,
   1377	.max_dma_count		= 1024UL * 64,
   1378	.support_channel_pause	= false,
   1379	.support_separate_wcount_reg = false,
   1380};
   1381
   1382/* Tegra30 specific DMA controller information */
   1383static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
   1384	.nr_channels		= 32,
   1385	.channel_reg_size	= 0x20,
   1386	.max_dma_count		= 1024UL * 64,
   1387	.support_channel_pause	= false,
   1388	.support_separate_wcount_reg = false,
   1389};
   1390
   1391/* Tegra114 specific DMA controller information */
   1392static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
   1393	.nr_channels		= 32,
   1394	.channel_reg_size	= 0x20,
   1395	.max_dma_count		= 1024UL * 64,
   1396	.support_channel_pause	= true,
   1397	.support_separate_wcount_reg = false,
   1398};
   1399
   1400/* Tegra148 specific DMA controller information */
   1401static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
   1402	.nr_channels		= 32,
   1403	.channel_reg_size	= 0x40,
   1404	.max_dma_count		= 1024UL * 64,
   1405	.support_channel_pause	= true,
   1406	.support_separate_wcount_reg = true,
   1407};
   1408
   1409static int tegra_dma_init_hw(struct tegra_dma *tdma)
   1410{
   1411	int err;
   1412
   1413	err = reset_control_assert(tdma->rst);
   1414	if (err) {
   1415		dev_err(tdma->dev, "failed to assert reset: %d\n", err);
   1416		return err;
   1417	}
   1418
   1419	err = clk_enable(tdma->dma_clk);
   1420	if (err) {
   1421		dev_err(tdma->dev, "failed to enable clk: %d\n", err);
   1422		return err;
   1423	}
   1424
   1425	/* reset DMA controller */
   1426	udelay(2);
   1427	reset_control_deassert(tdma->rst);
   1428
   1429	/* enable global DMA registers */
   1430	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
   1431	tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
   1432	tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFF);
   1433
   1434	clk_disable(tdma->dma_clk);
   1435
   1436	return 0;
   1437}
   1438
   1439static int tegra_dma_probe(struct platform_device *pdev)
   1440{
   1441	const struct tegra_dma_chip_data *cdata;
   1442	struct tegra_dma *tdma;
   1443	unsigned int i;
   1444	size_t size;
   1445	int ret;
   1446
   1447	cdata = of_device_get_match_data(&pdev->dev);
   1448	size = struct_size(tdma, channels, cdata->nr_channels);
   1449
   1450	tdma = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
   1451	if (!tdma)
   1452		return -ENOMEM;
   1453
   1454	tdma->dev = &pdev->dev;
   1455	tdma->chip_data = cdata;
   1456	platform_set_drvdata(pdev, tdma);
   1457
   1458	tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
   1459	if (IS_ERR(tdma->base_addr))
   1460		return PTR_ERR(tdma->base_addr);
   1461
   1462	tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
   1463	if (IS_ERR(tdma->dma_clk)) {
   1464		dev_err(&pdev->dev, "Error: Missing controller clock\n");
   1465		return PTR_ERR(tdma->dma_clk);
   1466	}
   1467
   1468	tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
   1469	if (IS_ERR(tdma->rst)) {
   1470		dev_err(&pdev->dev, "Error: Missing reset\n");
   1471		return PTR_ERR(tdma->rst);
   1472	}
   1473
   1474	spin_lock_init(&tdma->global_lock);
   1475
   1476	ret = clk_prepare(tdma->dma_clk);
   1477	if (ret)
   1478		return ret;
   1479
   1480	ret = tegra_dma_init_hw(tdma);
   1481	if (ret)
   1482		goto err_clk_unprepare;
   1483
   1484	pm_runtime_irq_safe(&pdev->dev);
   1485	pm_runtime_enable(&pdev->dev);
   1486
   1487	INIT_LIST_HEAD(&tdma->dma_dev.channels);
   1488	for (i = 0; i < cdata->nr_channels; i++) {
   1489		struct tegra_dma_channel *tdc = &tdma->channels[i];
   1490		int irq;
   1491
   1492		tdc->chan_addr = tdma->base_addr +
   1493				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
   1494				 (i * cdata->channel_reg_size);
   1495
   1496		irq = platform_get_irq(pdev, i);
   1497		if (irq < 0) {
   1498			ret = irq;
   1499			goto err_pm_disable;
   1500		}
   1501
   1502		snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
   1503		ret = devm_request_irq(&pdev->dev, irq, tegra_dma_isr, 0,
   1504				       tdc->name, tdc);
   1505		if (ret) {
   1506			dev_err(&pdev->dev,
   1507				"request_irq failed with err %d channel %d\n",
   1508				ret, i);
   1509			goto err_pm_disable;
   1510		}
   1511
   1512		tdc->dma_chan.device = &tdma->dma_dev;
   1513		dma_cookie_init(&tdc->dma_chan);
   1514		list_add_tail(&tdc->dma_chan.device_node,
   1515			      &tdma->dma_dev.channels);
   1516		tdc->tdma = tdma;
   1517		tdc->id = i;
   1518		tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
   1519
   1520		tasklet_setup(&tdc->tasklet, tegra_dma_tasklet);
   1521		spin_lock_init(&tdc->lock);
   1522		init_waitqueue_head(&tdc->wq);
   1523
   1524		INIT_LIST_HEAD(&tdc->pending_sg_req);
   1525		INIT_LIST_HEAD(&tdc->free_sg_req);
   1526		INIT_LIST_HEAD(&tdc->free_dma_desc);
   1527		INIT_LIST_HEAD(&tdc->cb_desc);
   1528	}
   1529
   1530	dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
   1531	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
   1532	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
   1533
   1534	tdma->global_pause_count = 0;
   1535	tdma->dma_dev.dev = &pdev->dev;
   1536	tdma->dma_dev.device_alloc_chan_resources =
   1537					tegra_dma_alloc_chan_resources;
   1538	tdma->dma_dev.device_free_chan_resources =
   1539					tegra_dma_free_chan_resources;
   1540	tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
   1541	tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
   1542	tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
   1543		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
   1544		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
   1545		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
   1546	tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
   1547		BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
   1548		BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
   1549		BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
   1550	tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
   1551	tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
   1552	tdma->dma_dev.device_config = tegra_dma_slave_config;
   1553	tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
   1554	tdma->dma_dev.device_synchronize = tegra_dma_synchronize;
   1555	tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
   1556	tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
   1557
   1558	ret = dma_async_device_register(&tdma->dma_dev);
   1559	if (ret < 0) {
   1560		dev_err(&pdev->dev,
   1561			"Tegra20 APB DMA driver registration failed %d\n", ret);
   1562		goto err_pm_disable;
   1563	}
   1564
   1565	ret = of_dma_controller_register(pdev->dev.of_node,
   1566					 tegra_dma_of_xlate, tdma);
   1567	if (ret < 0) {
   1568		dev_err(&pdev->dev,
   1569			"Tegra20 APB DMA OF registration failed %d\n", ret);
   1570		goto err_unregister_dma_dev;
   1571	}
   1572
   1573	dev_info(&pdev->dev, "Tegra20 APB DMA driver registered %u channels\n",
   1574		 cdata->nr_channels);
   1575
   1576	return 0;
   1577
   1578err_unregister_dma_dev:
   1579	dma_async_device_unregister(&tdma->dma_dev);
   1580
   1581err_pm_disable:
   1582	pm_runtime_disable(&pdev->dev);
   1583
   1584err_clk_unprepare:
   1585	clk_unprepare(tdma->dma_clk);
   1586
   1587	return ret;
   1588}
   1589
   1590static int tegra_dma_remove(struct platform_device *pdev)
   1591{
   1592	struct tegra_dma *tdma = platform_get_drvdata(pdev);
   1593
   1594	of_dma_controller_free(pdev->dev.of_node);
   1595	dma_async_device_unregister(&tdma->dma_dev);
   1596	pm_runtime_disable(&pdev->dev);
   1597	clk_unprepare(tdma->dma_clk);
   1598
   1599	return 0;
   1600}
   1601
   1602static int __maybe_unused tegra_dma_runtime_suspend(struct device *dev)
   1603{
   1604	struct tegra_dma *tdma = dev_get_drvdata(dev);
   1605
   1606	clk_disable(tdma->dma_clk);
   1607
   1608	return 0;
   1609}
   1610
   1611static int __maybe_unused tegra_dma_runtime_resume(struct device *dev)
   1612{
   1613	struct tegra_dma *tdma = dev_get_drvdata(dev);
   1614
   1615	return clk_enable(tdma->dma_clk);
   1616}
   1617
   1618static int __maybe_unused tegra_dma_dev_suspend(struct device *dev)
   1619{
   1620	struct tegra_dma *tdma = dev_get_drvdata(dev);
   1621	unsigned long flags;
   1622	unsigned int i;
   1623	bool busy;
   1624
   1625	for (i = 0; i < tdma->chip_data->nr_channels; i++) {
   1626		struct tegra_dma_channel *tdc = &tdma->channels[i];
   1627
   1628		tasklet_kill(&tdc->tasklet);
   1629
   1630		spin_lock_irqsave(&tdc->lock, flags);
   1631		busy = tdc->busy;
   1632		spin_unlock_irqrestore(&tdc->lock, flags);
   1633
   1634		if (busy) {
   1635			dev_err(tdma->dev, "channel %u busy\n", i);
   1636			return -EBUSY;
   1637		}
   1638	}
   1639
   1640	return pm_runtime_force_suspend(dev);
   1641}
   1642
   1643static int __maybe_unused tegra_dma_dev_resume(struct device *dev)
   1644{
   1645	struct tegra_dma *tdma = dev_get_drvdata(dev);
   1646	int err;
   1647
   1648	err = tegra_dma_init_hw(tdma);
   1649	if (err)
   1650		return err;
   1651
   1652	return pm_runtime_force_resume(dev);
   1653}
   1654
   1655static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
   1656	SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
   1657			   NULL)
   1658	SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_dev_suspend, tegra_dma_dev_resume)
   1659};
   1660
   1661static const struct of_device_id tegra_dma_of_match[] = {
   1662	{
   1663		.compatible = "nvidia,tegra148-apbdma",
   1664		.data = &tegra148_dma_chip_data,
   1665	}, {
   1666		.compatible = "nvidia,tegra114-apbdma",
   1667		.data = &tegra114_dma_chip_data,
   1668	}, {
   1669		.compatible = "nvidia,tegra30-apbdma",
   1670		.data = &tegra30_dma_chip_data,
   1671	}, {
   1672		.compatible = "nvidia,tegra20-apbdma",
   1673		.data = &tegra20_dma_chip_data,
   1674	}, {
   1675	},
   1676};
   1677MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
   1678
   1679static struct platform_driver tegra_dmac_driver = {
   1680	.driver = {
   1681		.name	= "tegra-apbdma",
   1682		.pm	= &tegra_dma_dev_pm_ops,
   1683		.of_match_table = tegra_dma_of_match,
   1684	},
   1685	.probe		= tegra_dma_probe,
   1686	.remove		= tegra_dma_remove,
   1687};
   1688
   1689module_platform_driver(tegra_dmac_driver);
   1690
   1691MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
   1692MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
   1693MODULE_LICENSE("GPL v2");