cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dw-axi-dmac-platform.c (41108B)


      1// SPDX-License-Identifier: GPL-2.0
      2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
      3
      4/*
      5 * Synopsys DesignWare AXI DMA Controller driver.
      6 *
      7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
      8 */
      9
     10#include <linux/bitops.h>
     11#include <linux/delay.h>
     12#include <linux/device.h>
     13#include <linux/dmaengine.h>
     14#include <linux/dmapool.h>
     15#include <linux/dma-mapping.h>
     16#include <linux/err.h>
     17#include <linux/interrupt.h>
     18#include <linux/io.h>
     19#include <linux/iopoll.h>
     20#include <linux/io-64-nonatomic-lo-hi.h>
     21#include <linux/kernel.h>
     22#include <linux/module.h>
     23#include <linux/of.h>
     24#include <linux/of_dma.h>
     25#include <linux/platform_device.h>
     26#include <linux/pm_runtime.h>
     27#include <linux/property.h>
     28#include <linux/slab.h>
     29#include <linux/types.h>
     30
     31#include "dw-axi-dmac.h"
     32#include "../dmaengine.h"
     33#include "../virt-dma.h"
     34
     35/*
     36 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
     37 * master data bus width up to 512 bits (for both AXI master interfaces), but
     38 * it depends on IP block configuration.
     39 */
     40#define AXI_DMA_BUSWIDTHS		  \
     41	(DMA_SLAVE_BUSWIDTH_1_BYTE	| \
     42	DMA_SLAVE_BUSWIDTH_2_BYTES	| \
     43	DMA_SLAVE_BUSWIDTH_4_BYTES	| \
     44	DMA_SLAVE_BUSWIDTH_8_BYTES	| \
     45	DMA_SLAVE_BUSWIDTH_16_BYTES	| \
     46	DMA_SLAVE_BUSWIDTH_32_BYTES	| \
     47	DMA_SLAVE_BUSWIDTH_64_BYTES)
     48
     49static inline void
     50axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
     51{
     52	iowrite32(val, chip->regs + reg);
     53}
     54
     55static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
     56{
     57	return ioread32(chip->regs + reg);
     58}
     59
     60static inline void
     61axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
     62{
     63	iowrite32(val, chan->chan_regs + reg);
     64}
     65
     66static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
     67{
     68	return ioread32(chan->chan_regs + reg);
     69}
     70
     71static inline void
     72axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
     73{
     74	/*
     75	 * We split one 64 bit write for two 32 bit write as some HW doesn't
     76	 * support 64 bit access.
     77	 */
     78	iowrite32(lower_32_bits(val), chan->chan_regs + reg);
     79	iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
     80}
     81
     82static inline void axi_chan_config_write(struct axi_dma_chan *chan,
     83					 struct axi_dma_chan_config *config)
     84{
     85	u32 cfg_lo, cfg_hi;
     86
     87	cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
     88		  config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
     89	if (chan->chip->dw->hdata->reg_map_8_channels) {
     90		cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
     91			 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
     92			 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
     93			 config->src_per << CH_CFG_H_SRC_PER_POS |
     94			 config->dst_per << CH_CFG_H_DST_PER_POS |
     95			 config->prior << CH_CFG_H_PRIORITY_POS;
     96	} else {
     97		cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
     98			  config->dst_per << CH_CFG2_L_DST_PER_POS;
     99		cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
    100			 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
    101			 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
    102			 config->prior << CH_CFG2_H_PRIORITY_POS;
    103	}
    104	axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
    105	axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
    106}
    107
    108static inline void axi_dma_disable(struct axi_dma_chip *chip)
    109{
    110	u32 val;
    111
    112	val = axi_dma_ioread32(chip, DMAC_CFG);
    113	val &= ~DMAC_EN_MASK;
    114	axi_dma_iowrite32(chip, DMAC_CFG, val);
    115}
    116
    117static inline void axi_dma_enable(struct axi_dma_chip *chip)
    118{
    119	u32 val;
    120
    121	val = axi_dma_ioread32(chip, DMAC_CFG);
    122	val |= DMAC_EN_MASK;
    123	axi_dma_iowrite32(chip, DMAC_CFG, val);
    124}
    125
    126static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
    127{
    128	u32 val;
    129
    130	val = axi_dma_ioread32(chip, DMAC_CFG);
    131	val &= ~INT_EN_MASK;
    132	axi_dma_iowrite32(chip, DMAC_CFG, val);
    133}
    134
    135static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
    136{
    137	u32 val;
    138
    139	val = axi_dma_ioread32(chip, DMAC_CFG);
    140	val |= INT_EN_MASK;
    141	axi_dma_iowrite32(chip, DMAC_CFG, val);
    142}
    143
    144static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
    145{
    146	u32 val;
    147
    148	if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
    149		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
    150	} else {
    151		val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
    152		val &= ~irq_mask;
    153		axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
    154	}
    155}
    156
    157static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
    158{
    159	axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
    160}
    161
    162static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
    163{
    164	axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
    165}
    166
    167static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
    168{
    169	axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
    170}
    171
    172static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
    173{
    174	return axi_chan_ioread32(chan, CH_INTSTATUS);
    175}
    176
    177static inline void axi_chan_disable(struct axi_dma_chan *chan)
    178{
    179	u32 val;
    180
    181	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
    182	val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
    183	if (chan->chip->dw->hdata->reg_map_8_channels)
    184		val |=   BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
    185	else
    186		val |=   BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
    187	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
    188}
    189
    190static inline void axi_chan_enable(struct axi_dma_chan *chan)
    191{
    192	u32 val;
    193
    194	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
    195	if (chan->chip->dw->hdata->reg_map_8_channels)
    196		val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
    197			BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
    198	else
    199		val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
    200			BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
    201	axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
    202}
    203
    204static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
    205{
    206	u32 val;
    207
    208	val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
    209
    210	return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
    211}
    212
    213static void axi_dma_hw_init(struct axi_dma_chip *chip)
    214{
    215	int ret;
    216	u32 i;
    217
    218	for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
    219		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
    220		axi_chan_disable(&chip->dw->chan[i]);
    221	}
    222	ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
    223	if (ret)
    224		dev_warn(chip->dev, "Unable to set coherent mask\n");
    225}
    226
    227static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
    228				   dma_addr_t dst, size_t len)
    229{
    230	u32 max_width = chan->chip->dw->hdata->m_data_width;
    231
    232	return __ffs(src | dst | len | BIT(max_width));
    233}
    234
    235static inline const char *axi_chan_name(struct axi_dma_chan *chan)
    236{
    237	return dma_chan_name(&chan->vc.chan);
    238}
    239
    240static struct axi_dma_desc *axi_desc_alloc(u32 num)
    241{
    242	struct axi_dma_desc *desc;
    243
    244	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
    245	if (!desc)
    246		return NULL;
    247
    248	desc->hw_desc = kcalloc(num, sizeof(*desc->hw_desc), GFP_NOWAIT);
    249	if (!desc->hw_desc) {
    250		kfree(desc);
    251		return NULL;
    252	}
    253
    254	return desc;
    255}
    256
    257static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
    258					dma_addr_t *addr)
    259{
    260	struct axi_dma_lli *lli;
    261	dma_addr_t phys;
    262
    263	lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
    264	if (unlikely(!lli)) {
    265		dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
    266			axi_chan_name(chan));
    267		return NULL;
    268	}
    269
    270	atomic_inc(&chan->descs_allocated);
    271	*addr = phys;
    272
    273	return lli;
    274}
    275
    276static void axi_desc_put(struct axi_dma_desc *desc)
    277{
    278	struct axi_dma_chan *chan = desc->chan;
    279	int count = atomic_read(&chan->descs_allocated);
    280	struct axi_dma_hw_desc *hw_desc;
    281	int descs_put;
    282
    283	for (descs_put = 0; descs_put < count; descs_put++) {
    284		hw_desc = &desc->hw_desc[descs_put];
    285		dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
    286	}
    287
    288	kfree(desc->hw_desc);
    289	kfree(desc);
    290	atomic_sub(descs_put, &chan->descs_allocated);
    291	dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
    292		axi_chan_name(chan), descs_put,
    293		atomic_read(&chan->descs_allocated));
    294}
    295
    296static void vchan_desc_put(struct virt_dma_desc *vdesc)
    297{
    298	axi_desc_put(vd_to_axi_desc(vdesc));
    299}
    300
    301static enum dma_status
    302dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
    303		  struct dma_tx_state *txstate)
    304{
    305	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    306	struct virt_dma_desc *vdesc;
    307	enum dma_status status;
    308	u32 completed_length;
    309	unsigned long flags;
    310	u32 completed_blocks;
    311	size_t bytes = 0;
    312	u32 length;
    313	u32 len;
    314
    315	status = dma_cookie_status(dchan, cookie, txstate);
    316	if (status == DMA_COMPLETE || !txstate)
    317		return status;
    318
    319	spin_lock_irqsave(&chan->vc.lock, flags);
    320
    321	vdesc = vchan_find_desc(&chan->vc, cookie);
    322	if (vdesc) {
    323		length = vd_to_axi_desc(vdesc)->length;
    324		completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
    325		len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
    326		completed_length = completed_blocks * len;
    327		bytes = length - completed_length;
    328	} else {
    329		bytes = vd_to_axi_desc(vdesc)->length;
    330	}
    331
    332	spin_unlock_irqrestore(&chan->vc.lock, flags);
    333	dma_set_residue(txstate, bytes);
    334
    335	return status;
    336}
    337
    338static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
    339{
    340	desc->lli->llp = cpu_to_le64(adr);
    341}
    342
    343static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
    344{
    345	axi_chan_iowrite64(chan, CH_LLP, adr);
    346}
    347
    348static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
    349{
    350	u32 offset = DMAC_APB_BYTE_WR_CH_EN;
    351	u32 reg_width, val;
    352
    353	if (!chan->chip->apb_regs) {
    354		dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
    355		return;
    356	}
    357
    358	reg_width = __ffs(chan->config.dst_addr_width);
    359	if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
    360		offset = DMAC_APB_HALFWORD_WR_CH_EN;
    361
    362	val = ioread32(chan->chip->apb_regs + offset);
    363
    364	if (set)
    365		val |= BIT(chan->id);
    366	else
    367		val &= ~BIT(chan->id);
    368
    369	iowrite32(val, chan->chip->apb_regs + offset);
    370}
    371/* Called in chan locked context */
    372static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
    373				      struct axi_dma_desc *first)
    374{
    375	u32 priority = chan->chip->dw->hdata->priority[chan->id];
    376	struct axi_dma_chan_config config = {};
    377	u32 irq_mask;
    378	u8 lms = 0; /* Select AXI0 master for LLI fetching */
    379
    380	if (unlikely(axi_chan_is_hw_enable(chan))) {
    381		dev_err(chan2dev(chan), "%s is non-idle!\n",
    382			axi_chan_name(chan));
    383
    384		return;
    385	}
    386
    387	axi_dma_enable(chan->chip);
    388
    389	config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
    390	config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
    391	config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
    392	config.prior = priority;
    393	config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
    394	config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
    395	switch (chan->direction) {
    396	case DMA_MEM_TO_DEV:
    397		dw_axi_dma_set_byte_halfword(chan, true);
    398		config.tt_fc = chan->config.device_fc ?
    399				DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
    400				DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
    401		if (chan->chip->apb_regs)
    402			config.dst_per = chan->id;
    403		else
    404			config.dst_per = chan->hw_handshake_num;
    405		break;
    406	case DMA_DEV_TO_MEM:
    407		config.tt_fc = chan->config.device_fc ?
    408				DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
    409				DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
    410		if (chan->chip->apb_regs)
    411			config.src_per = chan->id;
    412		else
    413			config.src_per = chan->hw_handshake_num;
    414		break;
    415	default:
    416		break;
    417	}
    418	axi_chan_config_write(chan, &config);
    419
    420	write_chan_llp(chan, first->hw_desc[0].llp | lms);
    421
    422	irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
    423	axi_chan_irq_sig_set(chan, irq_mask);
    424
    425	/* Generate 'suspend' status but don't generate interrupt */
    426	irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
    427	axi_chan_irq_set(chan, irq_mask);
    428
    429	axi_chan_enable(chan);
    430}
    431
    432static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
    433{
    434	struct axi_dma_desc *desc;
    435	struct virt_dma_desc *vd;
    436
    437	vd = vchan_next_desc(&chan->vc);
    438	if (!vd)
    439		return;
    440
    441	desc = vd_to_axi_desc(vd);
    442	dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
    443		vd->tx.cookie);
    444	axi_chan_block_xfer_start(chan, desc);
    445}
    446
    447static void dma_chan_issue_pending(struct dma_chan *dchan)
    448{
    449	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    450	unsigned long flags;
    451
    452	spin_lock_irqsave(&chan->vc.lock, flags);
    453	if (vchan_issue_pending(&chan->vc))
    454		axi_chan_start_first_queued(chan);
    455	spin_unlock_irqrestore(&chan->vc.lock, flags);
    456}
    457
    458static void dw_axi_dma_synchronize(struct dma_chan *dchan)
    459{
    460	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    461
    462	vchan_synchronize(&chan->vc);
    463}
    464
    465static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
    466{
    467	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    468
    469	/* ASSERT: channel is idle */
    470	if (axi_chan_is_hw_enable(chan)) {
    471		dev_err(chan2dev(chan), "%s is non-idle!\n",
    472			axi_chan_name(chan));
    473		return -EBUSY;
    474	}
    475
    476	/* LLI address must be aligned to a 64-byte boundary */
    477	chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
    478					  chan->chip->dev,
    479					  sizeof(struct axi_dma_lli),
    480					  64, 0);
    481	if (!chan->desc_pool) {
    482		dev_err(chan2dev(chan), "No memory for descriptors\n");
    483		return -ENOMEM;
    484	}
    485	dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
    486
    487	pm_runtime_get(chan->chip->dev);
    488
    489	return 0;
    490}
    491
    492static void dma_chan_free_chan_resources(struct dma_chan *dchan)
    493{
    494	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    495
    496	/* ASSERT: channel is idle */
    497	if (axi_chan_is_hw_enable(chan))
    498		dev_err(dchan2dev(dchan), "%s is non-idle!\n",
    499			axi_chan_name(chan));
    500
    501	axi_chan_disable(chan);
    502	axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
    503
    504	vchan_free_chan_resources(&chan->vc);
    505
    506	dma_pool_destroy(chan->desc_pool);
    507	chan->desc_pool = NULL;
    508	dev_vdbg(dchan2dev(dchan),
    509		 "%s: free resources, descriptor still allocated: %u\n",
    510		 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
    511
    512	pm_runtime_put(chan->chip->dev);
    513}
    514
    515static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
    516{
    517	struct axi_dma_chip *chip = chan->chip;
    518	unsigned long reg_value, val;
    519
    520	if (!chip->apb_regs) {
    521		dev_err(chip->dev, "apb_regs not initialized\n");
    522		return;
    523	}
    524
    525	/*
    526	 * An unused DMA channel has a default value of 0x3F.
    527	 * Lock the DMA channel by assign a handshake number to the channel.
    528	 * Unlock the DMA channel by assign 0x3F to the channel.
    529	 */
    530	if (set)
    531		val = chan->hw_handshake_num;
    532	else
    533		val = UNUSED_CHANNEL;
    534
    535	reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
    536
    537	/* Channel is already allocated, set handshake as per channel ID */
    538	/* 64 bit write should handle for 8 channels */
    539
    540	reg_value &= ~(DMA_APB_HS_SEL_MASK <<
    541			(chan->id * DMA_APB_HS_SEL_BIT_SIZE));
    542	reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
    543	lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
    544
    545	return;
    546}
    547
    548/*
    549 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
    550 * as 1, it understands that the current block is the final block in the
    551 * transfer and completes the DMA transfer operation at the end of current
    552 * block transfer.
    553 */
    554static void set_desc_last(struct axi_dma_hw_desc *desc)
    555{
    556	u32 val;
    557
    558	val = le32_to_cpu(desc->lli->ctl_hi);
    559	val |= CH_CTL_H_LLI_LAST;
    560	desc->lli->ctl_hi = cpu_to_le32(val);
    561}
    562
    563static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
    564{
    565	desc->lli->sar = cpu_to_le64(adr);
    566}
    567
    568static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
    569{
    570	desc->lli->dar = cpu_to_le64(adr);
    571}
    572
    573static void set_desc_src_master(struct axi_dma_hw_desc *desc)
    574{
    575	u32 val;
    576
    577	/* Select AXI0 for source master */
    578	val = le32_to_cpu(desc->lli->ctl_lo);
    579	val &= ~CH_CTL_L_SRC_MAST;
    580	desc->lli->ctl_lo = cpu_to_le32(val);
    581}
    582
    583static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
    584				 struct axi_dma_desc *desc)
    585{
    586	u32 val;
    587
    588	/* Select AXI1 for source master if available */
    589	val = le32_to_cpu(hw_desc->lli->ctl_lo);
    590	if (desc->chan->chip->dw->hdata->nr_masters > 1)
    591		val |= CH_CTL_L_DST_MAST;
    592	else
    593		val &= ~CH_CTL_L_DST_MAST;
    594
    595	hw_desc->lli->ctl_lo = cpu_to_le32(val);
    596}
    597
    598static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
    599				  struct axi_dma_hw_desc *hw_desc,
    600				  dma_addr_t mem_addr, size_t len)
    601{
    602	unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
    603	unsigned int reg_width;
    604	unsigned int mem_width;
    605	dma_addr_t device_addr;
    606	size_t axi_block_ts;
    607	size_t block_ts;
    608	u32 ctllo, ctlhi;
    609	u32 burst_len;
    610
    611	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
    612
    613	mem_width = __ffs(data_width | mem_addr | len);
    614	if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
    615		mem_width = DWAXIDMAC_TRANS_WIDTH_32;
    616
    617	if (!IS_ALIGNED(mem_addr, 4)) {
    618		dev_err(chan->chip->dev, "invalid buffer alignment\n");
    619		return -EINVAL;
    620	}
    621
    622	switch (chan->direction) {
    623	case DMA_MEM_TO_DEV:
    624		reg_width = __ffs(chan->config.dst_addr_width);
    625		device_addr = chan->config.dst_addr;
    626		ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
    627			mem_width << CH_CTL_L_SRC_WIDTH_POS |
    628			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
    629			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
    630		block_ts = len >> mem_width;
    631		break;
    632	case DMA_DEV_TO_MEM:
    633		reg_width = __ffs(chan->config.src_addr_width);
    634		device_addr = chan->config.src_addr;
    635		ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
    636			mem_width << CH_CTL_L_DST_WIDTH_POS |
    637			DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
    638			DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
    639		block_ts = len >> reg_width;
    640		break;
    641	default:
    642		return -EINVAL;
    643	}
    644
    645	if (block_ts > axi_block_ts)
    646		return -EINVAL;
    647
    648	hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
    649	if (unlikely(!hw_desc->lli))
    650		return -ENOMEM;
    651
    652	ctlhi = CH_CTL_H_LLI_VALID;
    653
    654	if (chan->chip->dw->hdata->restrict_axi_burst_len) {
    655		burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
    656		ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
    657			 burst_len << CH_CTL_H_ARLEN_POS |
    658			 burst_len << CH_CTL_H_AWLEN_POS;
    659	}
    660
    661	hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
    662
    663	if (chan->direction == DMA_MEM_TO_DEV) {
    664		write_desc_sar(hw_desc, mem_addr);
    665		write_desc_dar(hw_desc, device_addr);
    666	} else {
    667		write_desc_sar(hw_desc, device_addr);
    668		write_desc_dar(hw_desc, mem_addr);
    669	}
    670
    671	hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
    672
    673	ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
    674		 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
    675	hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
    676
    677	set_desc_src_master(hw_desc);
    678
    679	hw_desc->len = len;
    680	return 0;
    681}
    682
    683static size_t calculate_block_len(struct axi_dma_chan *chan,
    684				  dma_addr_t dma_addr, size_t buf_len,
    685				  enum dma_transfer_direction direction)
    686{
    687	u32 data_width, reg_width, mem_width;
    688	size_t axi_block_ts, block_len;
    689
    690	axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
    691
    692	switch (direction) {
    693	case DMA_MEM_TO_DEV:
    694		data_width = BIT(chan->chip->dw->hdata->m_data_width);
    695		mem_width = __ffs(data_width | dma_addr | buf_len);
    696		if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
    697			mem_width = DWAXIDMAC_TRANS_WIDTH_32;
    698
    699		block_len = axi_block_ts << mem_width;
    700		break;
    701	case DMA_DEV_TO_MEM:
    702		reg_width = __ffs(chan->config.src_addr_width);
    703		block_len = axi_block_ts << reg_width;
    704		break;
    705	default:
    706		block_len = 0;
    707	}
    708
    709	return block_len;
    710}
    711
    712static struct dma_async_tx_descriptor *
    713dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
    714			    size_t buf_len, size_t period_len,
    715			    enum dma_transfer_direction direction,
    716			    unsigned long flags)
    717{
    718	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    719	struct axi_dma_hw_desc *hw_desc = NULL;
    720	struct axi_dma_desc *desc = NULL;
    721	dma_addr_t src_addr = dma_addr;
    722	u32 num_periods, num_segments;
    723	size_t axi_block_len;
    724	u32 total_segments;
    725	u32 segment_len;
    726	unsigned int i;
    727	int status;
    728	u64 llp = 0;
    729	u8 lms = 0; /* Select AXI0 master for LLI fetching */
    730
    731	num_periods = buf_len / period_len;
    732
    733	axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
    734	if (axi_block_len == 0)
    735		return NULL;
    736
    737	num_segments = DIV_ROUND_UP(period_len, axi_block_len);
    738	segment_len = DIV_ROUND_UP(period_len, num_segments);
    739
    740	total_segments = num_periods * num_segments;
    741
    742	desc = axi_desc_alloc(total_segments);
    743	if (unlikely(!desc))
    744		goto err_desc_get;
    745
    746	chan->direction = direction;
    747	desc->chan = chan;
    748	chan->cyclic = true;
    749	desc->length = 0;
    750	desc->period_len = period_len;
    751
    752	for (i = 0; i < total_segments; i++) {
    753		hw_desc = &desc->hw_desc[i];
    754
    755		status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
    756						segment_len);
    757		if (status < 0)
    758			goto err_desc_get;
    759
    760		desc->length += hw_desc->len;
    761		/* Set end-of-link to the linked descriptor, so that cyclic
    762		 * callback function can be triggered during interrupt.
    763		 */
    764		set_desc_last(hw_desc);
    765
    766		src_addr += segment_len;
    767	}
    768
    769	llp = desc->hw_desc[0].llp;
    770
    771	/* Managed transfer list */
    772	do {
    773		hw_desc = &desc->hw_desc[--total_segments];
    774		write_desc_llp(hw_desc, llp | lms);
    775		llp = hw_desc->llp;
    776	} while (total_segments);
    777
    778	dw_axi_dma_set_hw_channel(chan, true);
    779
    780	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
    781
    782err_desc_get:
    783	if (desc)
    784		axi_desc_put(desc);
    785
    786	return NULL;
    787}
    788
    789static struct dma_async_tx_descriptor *
    790dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
    791			      unsigned int sg_len,
    792			      enum dma_transfer_direction direction,
    793			      unsigned long flags, void *context)
    794{
    795	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    796	struct axi_dma_hw_desc *hw_desc = NULL;
    797	struct axi_dma_desc *desc = NULL;
    798	u32 num_segments, segment_len;
    799	unsigned int loop = 0;
    800	struct scatterlist *sg;
    801	size_t axi_block_len;
    802	u32 len, num_sgs = 0;
    803	unsigned int i;
    804	dma_addr_t mem;
    805	int status;
    806	u64 llp = 0;
    807	u8 lms = 0; /* Select AXI0 master for LLI fetching */
    808
    809	if (unlikely(!is_slave_direction(direction) || !sg_len))
    810		return NULL;
    811
    812	mem = sg_dma_address(sgl);
    813	len = sg_dma_len(sgl);
    814
    815	axi_block_len = calculate_block_len(chan, mem, len, direction);
    816	if (axi_block_len == 0)
    817		return NULL;
    818
    819	for_each_sg(sgl, sg, sg_len, i)
    820		num_sgs += DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
    821
    822	desc = axi_desc_alloc(num_sgs);
    823	if (unlikely(!desc))
    824		goto err_desc_get;
    825
    826	desc->chan = chan;
    827	desc->length = 0;
    828	chan->direction = direction;
    829
    830	for_each_sg(sgl, sg, sg_len, i) {
    831		mem = sg_dma_address(sg);
    832		len = sg_dma_len(sg);
    833		num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
    834		segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
    835
    836		do {
    837			hw_desc = &desc->hw_desc[loop++];
    838			status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
    839			if (status < 0)
    840				goto err_desc_get;
    841
    842			desc->length += hw_desc->len;
    843			len -= segment_len;
    844			mem += segment_len;
    845		} while (len >= segment_len);
    846	}
    847
    848	/* Set end-of-link to the last link descriptor of list */
    849	set_desc_last(&desc->hw_desc[num_sgs - 1]);
    850
    851	/* Managed transfer list */
    852	do {
    853		hw_desc = &desc->hw_desc[--num_sgs];
    854		write_desc_llp(hw_desc, llp | lms);
    855		llp = hw_desc->llp;
    856	} while (num_sgs);
    857
    858	dw_axi_dma_set_hw_channel(chan, true);
    859
    860	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
    861
    862err_desc_get:
    863	if (desc)
    864		axi_desc_put(desc);
    865
    866	return NULL;
    867}
    868
    869static struct dma_async_tx_descriptor *
    870dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
    871			 dma_addr_t src_adr, size_t len, unsigned long flags)
    872{
    873	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    874	size_t block_ts, max_block_ts, xfer_len;
    875	struct axi_dma_hw_desc *hw_desc = NULL;
    876	struct axi_dma_desc *desc = NULL;
    877	u32 xfer_width, reg, num;
    878	u64 llp = 0;
    879	u8 lms = 0; /* Select AXI0 master for LLI fetching */
    880
    881	dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
    882		axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
    883
    884	max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
    885	xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
    886	num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
    887	desc = axi_desc_alloc(num);
    888	if (unlikely(!desc))
    889		goto err_desc_get;
    890
    891	desc->chan = chan;
    892	num = 0;
    893	desc->length = 0;
    894	while (len) {
    895		xfer_len = len;
    896
    897		hw_desc = &desc->hw_desc[num];
    898		/*
    899		 * Take care for the alignment.
    900		 * Actually source and destination widths can be different, but
    901		 * make them same to be simpler.
    902		 */
    903		xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
    904
    905		/*
    906		 * block_ts indicates the total number of data of width
    907		 * to be transferred in a DMA block transfer.
    908		 * BLOCK_TS register should be set to block_ts - 1
    909		 */
    910		block_ts = xfer_len >> xfer_width;
    911		if (block_ts > max_block_ts) {
    912			block_ts = max_block_ts;
    913			xfer_len = max_block_ts << xfer_width;
    914		}
    915
    916		hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
    917		if (unlikely(!hw_desc->lli))
    918			goto err_desc_get;
    919
    920		write_desc_sar(hw_desc, src_adr);
    921		write_desc_dar(hw_desc, dst_adr);
    922		hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
    923
    924		reg = CH_CTL_H_LLI_VALID;
    925		if (chan->chip->dw->hdata->restrict_axi_burst_len) {
    926			u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
    927
    928			reg |= (CH_CTL_H_ARLEN_EN |
    929				burst_len << CH_CTL_H_ARLEN_POS |
    930				CH_CTL_H_AWLEN_EN |
    931				burst_len << CH_CTL_H_AWLEN_POS);
    932		}
    933		hw_desc->lli->ctl_hi = cpu_to_le32(reg);
    934
    935		reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
    936		       DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
    937		       xfer_width << CH_CTL_L_DST_WIDTH_POS |
    938		       xfer_width << CH_CTL_L_SRC_WIDTH_POS |
    939		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
    940		       DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
    941		hw_desc->lli->ctl_lo = cpu_to_le32(reg);
    942
    943		set_desc_src_master(hw_desc);
    944		set_desc_dest_master(hw_desc, desc);
    945
    946		hw_desc->len = xfer_len;
    947		desc->length += hw_desc->len;
    948		/* update the length and addresses for the next loop cycle */
    949		len -= xfer_len;
    950		dst_adr += xfer_len;
    951		src_adr += xfer_len;
    952		num++;
    953	}
    954
    955	/* Set end-of-link to the last link descriptor of list */
    956	set_desc_last(&desc->hw_desc[num - 1]);
    957	/* Managed transfer list */
    958	do {
    959		hw_desc = &desc->hw_desc[--num];
    960		write_desc_llp(hw_desc, llp | lms);
    961		llp = hw_desc->llp;
    962	} while (num);
    963
    964	return vchan_tx_prep(&chan->vc, &desc->vd, flags);
    965
    966err_desc_get:
    967	if (desc)
    968		axi_desc_put(desc);
    969	return NULL;
    970}
    971
    972static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
    973					struct dma_slave_config *config)
    974{
    975	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    976
    977	memcpy(&chan->config, config, sizeof(*config));
    978
    979	return 0;
    980}
    981
    982static void axi_chan_dump_lli(struct axi_dma_chan *chan,
    983			      struct axi_dma_hw_desc *desc)
    984{
    985	dev_err(dchan2dev(&chan->vc.chan),
    986		"SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
    987		le64_to_cpu(desc->lli->sar),
    988		le64_to_cpu(desc->lli->dar),
    989		le64_to_cpu(desc->lli->llp),
    990		le32_to_cpu(desc->lli->block_ts_lo),
    991		le32_to_cpu(desc->lli->ctl_hi),
    992		le32_to_cpu(desc->lli->ctl_lo));
    993}
    994
    995static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
    996				   struct axi_dma_desc *desc_head)
    997{
    998	int count = atomic_read(&chan->descs_allocated);
    999	int i;
   1000
   1001	for (i = 0; i < count; i++)
   1002		axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
   1003}
   1004
   1005static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
   1006{
   1007	struct virt_dma_desc *vd;
   1008	unsigned long flags;
   1009
   1010	spin_lock_irqsave(&chan->vc.lock, flags);
   1011
   1012	axi_chan_disable(chan);
   1013
   1014	/* The bad descriptor currently is in the head of vc list */
   1015	vd = vchan_next_desc(&chan->vc);
   1016	/* Remove the completed descriptor from issued list */
   1017	list_del(&vd->node);
   1018
   1019	/* WARN about bad descriptor */
   1020	dev_err(chan2dev(chan),
   1021		"Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
   1022		axi_chan_name(chan), vd->tx.cookie, status);
   1023	axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
   1024
   1025	vchan_cookie_complete(vd);
   1026
   1027	/* Try to restart the controller */
   1028	axi_chan_start_first_queued(chan);
   1029
   1030	spin_unlock_irqrestore(&chan->vc.lock, flags);
   1031}
   1032
   1033static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
   1034{
   1035	int count = atomic_read(&chan->descs_allocated);
   1036	struct axi_dma_hw_desc *hw_desc;
   1037	struct axi_dma_desc *desc;
   1038	struct virt_dma_desc *vd;
   1039	unsigned long flags;
   1040	u64 llp;
   1041	int i;
   1042
   1043	spin_lock_irqsave(&chan->vc.lock, flags);
   1044	if (unlikely(axi_chan_is_hw_enable(chan))) {
   1045		dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
   1046			axi_chan_name(chan));
   1047		axi_chan_disable(chan);
   1048	}
   1049
   1050	/* The completed descriptor currently is in the head of vc list */
   1051	vd = vchan_next_desc(&chan->vc);
   1052
   1053	if (chan->cyclic) {
   1054		desc = vd_to_axi_desc(vd);
   1055		if (desc) {
   1056			llp = lo_hi_readq(chan->chan_regs + CH_LLP);
   1057			for (i = 0; i < count; i++) {
   1058				hw_desc = &desc->hw_desc[i];
   1059				if (hw_desc->llp == llp) {
   1060					axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
   1061					hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
   1062					desc->completed_blocks = i;
   1063
   1064					if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
   1065						vchan_cyclic_callback(vd);
   1066					break;
   1067				}
   1068			}
   1069
   1070			axi_chan_enable(chan);
   1071		}
   1072	} else {
   1073		/* Remove the completed descriptor from issued list before completing */
   1074		list_del(&vd->node);
   1075		vchan_cookie_complete(vd);
   1076
   1077		/* Submit queued descriptors after processing the completed ones */
   1078		axi_chan_start_first_queued(chan);
   1079	}
   1080
   1081	spin_unlock_irqrestore(&chan->vc.lock, flags);
   1082}
   1083
   1084static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
   1085{
   1086	struct axi_dma_chip *chip = dev_id;
   1087	struct dw_axi_dma *dw = chip->dw;
   1088	struct axi_dma_chan *chan;
   1089
   1090	u32 status, i;
   1091
   1092	/* Disable DMAC interrupts. We'll enable them after processing channels */
   1093	axi_dma_irq_disable(chip);
   1094
   1095	/* Poll, clear and process every channel interrupt status */
   1096	for (i = 0; i < dw->hdata->nr_channels; i++) {
   1097		chan = &dw->chan[i];
   1098		status = axi_chan_irq_read(chan);
   1099		axi_chan_irq_clear(chan, status);
   1100
   1101		dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
   1102			axi_chan_name(chan), i, status);
   1103
   1104		if (status & DWAXIDMAC_IRQ_ALL_ERR)
   1105			axi_chan_handle_err(chan, status);
   1106		else if (status & DWAXIDMAC_IRQ_DMA_TRF)
   1107			axi_chan_block_xfer_complete(chan);
   1108	}
   1109
   1110	/* Re-enable interrupts */
   1111	axi_dma_irq_enable(chip);
   1112
   1113	return IRQ_HANDLED;
   1114}
   1115
   1116static int dma_chan_terminate_all(struct dma_chan *dchan)
   1117{
   1118	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
   1119	u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
   1120	unsigned long flags;
   1121	u32 val;
   1122	int ret;
   1123	LIST_HEAD(head);
   1124
   1125	axi_chan_disable(chan);
   1126
   1127	ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
   1128					!(val & chan_active), 1000, 10000);
   1129	if (ret == -ETIMEDOUT)
   1130		dev_warn(dchan2dev(dchan),
   1131			 "%s failed to stop\n", axi_chan_name(chan));
   1132
   1133	if (chan->direction != DMA_MEM_TO_MEM)
   1134		dw_axi_dma_set_hw_channel(chan, false);
   1135	if (chan->direction == DMA_MEM_TO_DEV)
   1136		dw_axi_dma_set_byte_halfword(chan, false);
   1137
   1138	spin_lock_irqsave(&chan->vc.lock, flags);
   1139
   1140	vchan_get_all_descriptors(&chan->vc, &head);
   1141
   1142	chan->cyclic = false;
   1143	spin_unlock_irqrestore(&chan->vc.lock, flags);
   1144
   1145	vchan_dma_desc_free_list(&chan->vc, &head);
   1146
   1147	dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
   1148
   1149	return 0;
   1150}
   1151
   1152static int dma_chan_pause(struct dma_chan *dchan)
   1153{
   1154	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
   1155	unsigned long flags;
   1156	unsigned int timeout = 20; /* timeout iterations */
   1157	u32 val;
   1158
   1159	spin_lock_irqsave(&chan->vc.lock, flags);
   1160
   1161	if (chan->chip->dw->hdata->reg_map_8_channels) {
   1162		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
   1163		val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
   1164			BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
   1165		axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
   1166	} else {
   1167		val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
   1168		val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
   1169			BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
   1170		axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
   1171	}
   1172
   1173	do  {
   1174		if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
   1175			break;
   1176
   1177		udelay(2);
   1178	} while (--timeout);
   1179
   1180	axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
   1181
   1182	chan->is_paused = true;
   1183
   1184	spin_unlock_irqrestore(&chan->vc.lock, flags);
   1185
   1186	return timeout ? 0 : -EAGAIN;
   1187}
   1188
   1189/* Called in chan locked context */
   1190static inline void axi_chan_resume(struct axi_dma_chan *chan)
   1191{
   1192	u32 val;
   1193
   1194	if (chan->chip->dw->hdata->reg_map_8_channels) {
   1195		val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
   1196		val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
   1197		val |=  (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
   1198		axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
   1199	} else {
   1200		val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
   1201		val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
   1202		val |=  (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
   1203		axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, val);
   1204	}
   1205
   1206	chan->is_paused = false;
   1207}
   1208
   1209static int dma_chan_resume(struct dma_chan *dchan)
   1210{
   1211	struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
   1212	unsigned long flags;
   1213
   1214	spin_lock_irqsave(&chan->vc.lock, flags);
   1215
   1216	if (chan->is_paused)
   1217		axi_chan_resume(chan);
   1218
   1219	spin_unlock_irqrestore(&chan->vc.lock, flags);
   1220
   1221	return 0;
   1222}
   1223
   1224static int axi_dma_suspend(struct axi_dma_chip *chip)
   1225{
   1226	axi_dma_irq_disable(chip);
   1227	axi_dma_disable(chip);
   1228
   1229	clk_disable_unprepare(chip->core_clk);
   1230	clk_disable_unprepare(chip->cfgr_clk);
   1231
   1232	return 0;
   1233}
   1234
   1235static int axi_dma_resume(struct axi_dma_chip *chip)
   1236{
   1237	int ret;
   1238
   1239	ret = clk_prepare_enable(chip->cfgr_clk);
   1240	if (ret < 0)
   1241		return ret;
   1242
   1243	ret = clk_prepare_enable(chip->core_clk);
   1244	if (ret < 0)
   1245		return ret;
   1246
   1247	axi_dma_enable(chip);
   1248	axi_dma_irq_enable(chip);
   1249
   1250	return 0;
   1251}
   1252
   1253static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
   1254{
   1255	struct axi_dma_chip *chip = dev_get_drvdata(dev);
   1256
   1257	return axi_dma_suspend(chip);
   1258}
   1259
   1260static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
   1261{
   1262	struct axi_dma_chip *chip = dev_get_drvdata(dev);
   1263
   1264	return axi_dma_resume(chip);
   1265}
   1266
   1267static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
   1268					    struct of_dma *ofdma)
   1269{
   1270	struct dw_axi_dma *dw = ofdma->of_dma_data;
   1271	struct axi_dma_chan *chan;
   1272	struct dma_chan *dchan;
   1273
   1274	dchan = dma_get_any_slave_channel(&dw->dma);
   1275	if (!dchan)
   1276		return NULL;
   1277
   1278	chan = dchan_to_axi_dma_chan(dchan);
   1279	chan->hw_handshake_num = dma_spec->args[0];
   1280	return dchan;
   1281}
   1282
   1283static int parse_device_properties(struct axi_dma_chip *chip)
   1284{
   1285	struct device *dev = chip->dev;
   1286	u32 tmp, carr[DMAC_MAX_CHANNELS];
   1287	int ret;
   1288
   1289	ret = device_property_read_u32(dev, "dma-channels", &tmp);
   1290	if (ret)
   1291		return ret;
   1292	if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
   1293		return -EINVAL;
   1294
   1295	chip->dw->hdata->nr_channels = tmp;
   1296	if (tmp <= DMA_REG_MAP_CH_REF)
   1297		chip->dw->hdata->reg_map_8_channels = true;
   1298
   1299	ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
   1300	if (ret)
   1301		return ret;
   1302	if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
   1303		return -EINVAL;
   1304
   1305	chip->dw->hdata->nr_masters = tmp;
   1306
   1307	ret = device_property_read_u32(dev, "snps,data-width", &tmp);
   1308	if (ret)
   1309		return ret;
   1310	if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
   1311		return -EINVAL;
   1312
   1313	chip->dw->hdata->m_data_width = tmp;
   1314
   1315	ret = device_property_read_u32_array(dev, "snps,block-size", carr,
   1316					     chip->dw->hdata->nr_channels);
   1317	if (ret)
   1318		return ret;
   1319	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
   1320		if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
   1321			return -EINVAL;
   1322
   1323		chip->dw->hdata->block_size[tmp] = carr[tmp];
   1324	}
   1325
   1326	ret = device_property_read_u32_array(dev, "snps,priority", carr,
   1327					     chip->dw->hdata->nr_channels);
   1328	if (ret)
   1329		return ret;
   1330	/* Priority value must be programmed within [0:nr_channels-1] range */
   1331	for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
   1332		if (carr[tmp] >= chip->dw->hdata->nr_channels)
   1333			return -EINVAL;
   1334
   1335		chip->dw->hdata->priority[tmp] = carr[tmp];
   1336	}
   1337
   1338	/* axi-max-burst-len is optional property */
   1339	ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
   1340	if (!ret) {
   1341		if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
   1342			return -EINVAL;
   1343		if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
   1344			return -EINVAL;
   1345
   1346		chip->dw->hdata->restrict_axi_burst_len = true;
   1347		chip->dw->hdata->axi_rw_burst_len = tmp;
   1348	}
   1349
   1350	return 0;
   1351}
   1352
   1353static int dw_probe(struct platform_device *pdev)
   1354{
   1355	struct device_node *node = pdev->dev.of_node;
   1356	struct axi_dma_chip *chip;
   1357	struct resource *mem;
   1358	struct dw_axi_dma *dw;
   1359	struct dw_axi_dma_hcfg *hdata;
   1360	u32 i;
   1361	int ret;
   1362
   1363	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
   1364	if (!chip)
   1365		return -ENOMEM;
   1366
   1367	dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
   1368	if (!dw)
   1369		return -ENOMEM;
   1370
   1371	hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
   1372	if (!hdata)
   1373		return -ENOMEM;
   1374
   1375	chip->dw = dw;
   1376	chip->dev = &pdev->dev;
   1377	chip->dw->hdata = hdata;
   1378
   1379	chip->irq = platform_get_irq(pdev, 0);
   1380	if (chip->irq < 0)
   1381		return chip->irq;
   1382
   1383	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1384	chip->regs = devm_ioremap_resource(chip->dev, mem);
   1385	if (IS_ERR(chip->regs))
   1386		return PTR_ERR(chip->regs);
   1387
   1388	if (of_device_is_compatible(node, "intel,kmb-axi-dma")) {
   1389		chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
   1390		if (IS_ERR(chip->apb_regs))
   1391			return PTR_ERR(chip->apb_regs);
   1392	}
   1393
   1394	chip->core_clk = devm_clk_get(chip->dev, "core-clk");
   1395	if (IS_ERR(chip->core_clk))
   1396		return PTR_ERR(chip->core_clk);
   1397
   1398	chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
   1399	if (IS_ERR(chip->cfgr_clk))
   1400		return PTR_ERR(chip->cfgr_clk);
   1401
   1402	ret = parse_device_properties(chip);
   1403	if (ret)
   1404		return ret;
   1405
   1406	dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
   1407				sizeof(*dw->chan), GFP_KERNEL);
   1408	if (!dw->chan)
   1409		return -ENOMEM;
   1410
   1411	ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
   1412			       IRQF_SHARED, KBUILD_MODNAME, chip);
   1413	if (ret)
   1414		return ret;
   1415
   1416	INIT_LIST_HEAD(&dw->dma.channels);
   1417	for (i = 0; i < hdata->nr_channels; i++) {
   1418		struct axi_dma_chan *chan = &dw->chan[i];
   1419
   1420		chan->chip = chip;
   1421		chan->id = i;
   1422		chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
   1423		atomic_set(&chan->descs_allocated, 0);
   1424
   1425		chan->vc.desc_free = vchan_desc_put;
   1426		vchan_init(&chan->vc, &dw->dma);
   1427	}
   1428
   1429	/* Set capabilities */
   1430	dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
   1431	dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
   1432	dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
   1433
   1434	/* DMA capabilities */
   1435	dw->dma.chancnt = hdata->nr_channels;
   1436	dw->dma.max_burst = hdata->axi_rw_burst_len;
   1437	dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
   1438	dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
   1439	dw->dma.directions = BIT(DMA_MEM_TO_MEM);
   1440	dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
   1441	dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
   1442
   1443	dw->dma.dev = chip->dev;
   1444	dw->dma.device_tx_status = dma_chan_tx_status;
   1445	dw->dma.device_issue_pending = dma_chan_issue_pending;
   1446	dw->dma.device_terminate_all = dma_chan_terminate_all;
   1447	dw->dma.device_pause = dma_chan_pause;
   1448	dw->dma.device_resume = dma_chan_resume;
   1449
   1450	dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
   1451	dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
   1452
   1453	dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
   1454	dw->dma.device_synchronize = dw_axi_dma_synchronize;
   1455	dw->dma.device_config = dw_axi_dma_chan_slave_config;
   1456	dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
   1457	dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
   1458
   1459	/*
   1460	 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
   1461	 * supported blocks is 1024. Device register width is 4 bytes.
   1462	 * Therefore, set constraint to 1024 * 4.
   1463	 */
   1464	dw->dma.dev->dma_parms = &dw->dma_parms;
   1465	dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
   1466	platform_set_drvdata(pdev, chip);
   1467
   1468	pm_runtime_enable(chip->dev);
   1469
   1470	/*
   1471	 * We can't just call pm_runtime_get here instead of
   1472	 * pm_runtime_get_noresume + axi_dma_resume because we need
   1473	 * driver to work also without Runtime PM.
   1474	 */
   1475	pm_runtime_get_noresume(chip->dev);
   1476	ret = axi_dma_resume(chip);
   1477	if (ret < 0)
   1478		goto err_pm_disable;
   1479
   1480	axi_dma_hw_init(chip);
   1481
   1482	pm_runtime_put(chip->dev);
   1483
   1484	ret = dmaenginem_async_device_register(&dw->dma);
   1485	if (ret)
   1486		goto err_pm_disable;
   1487
   1488	/* Register with OF helpers for DMA lookups */
   1489	ret = of_dma_controller_register(pdev->dev.of_node,
   1490					 dw_axi_dma_of_xlate, dw);
   1491	if (ret < 0)
   1492		dev_warn(&pdev->dev,
   1493			 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
   1494
   1495	dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
   1496		 dw->hdata->nr_channels);
   1497
   1498	return 0;
   1499
   1500err_pm_disable:
   1501	pm_runtime_disable(chip->dev);
   1502
   1503	return ret;
   1504}
   1505
   1506static int dw_remove(struct platform_device *pdev)
   1507{
   1508	struct axi_dma_chip *chip = platform_get_drvdata(pdev);
   1509	struct dw_axi_dma *dw = chip->dw;
   1510	struct axi_dma_chan *chan, *_chan;
   1511	u32 i;
   1512
   1513	/* Enable clk before accessing to registers */
   1514	clk_prepare_enable(chip->cfgr_clk);
   1515	clk_prepare_enable(chip->core_clk);
   1516	axi_dma_irq_disable(chip);
   1517	for (i = 0; i < dw->hdata->nr_channels; i++) {
   1518		axi_chan_disable(&chip->dw->chan[i]);
   1519		axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
   1520	}
   1521	axi_dma_disable(chip);
   1522
   1523	pm_runtime_disable(chip->dev);
   1524	axi_dma_suspend(chip);
   1525
   1526	devm_free_irq(chip->dev, chip->irq, chip);
   1527
   1528	of_dma_controller_free(chip->dev->of_node);
   1529
   1530	list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
   1531			vc.chan.device_node) {
   1532		list_del(&chan->vc.chan.device_node);
   1533		tasklet_kill(&chan->vc.task);
   1534	}
   1535
   1536	return 0;
   1537}
   1538
   1539static const struct dev_pm_ops dw_axi_dma_pm_ops = {
   1540	SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
   1541};
   1542
   1543static const struct of_device_id dw_dma_of_id_table[] = {
   1544	{ .compatible = "snps,axi-dma-1.01a" },
   1545	{ .compatible = "intel,kmb-axi-dma" },
   1546	{}
   1547};
   1548MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
   1549
   1550static struct platform_driver dw_driver = {
   1551	.probe		= dw_probe,
   1552	.remove		= dw_remove,
   1553	.driver = {
   1554		.name	= KBUILD_MODNAME,
   1555		.of_match_table = dw_dma_of_id_table,
   1556		.pm = &dw_axi_dma_pm_ops,
   1557	},
   1558};
   1559module_platform_driver(dw_driver);
   1560
   1561MODULE_LICENSE("GPL v2");
   1562MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
   1563MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");