cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

shdmac.c (24321B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * Renesas SuperH DMA Engine support
      4 *
      5 * base is drivers/dma/flsdma.c
      6 *
      7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
      8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
      9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
     10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
     11 *
     12 * - DMA of SuperH does not have Hardware DMA chain mode.
     13 * - MAX DMA size is 16MB.
     14 *
     15 */
     16
     17#include <linux/delay.h>
     18#include <linux/dmaengine.h>
     19#include <linux/err.h>
     20#include <linux/init.h>
     21#include <linux/interrupt.h>
     22#include <linux/kdebug.h>
     23#include <linux/module.h>
     24#include <linux/notifier.h>
     25#include <linux/of.h>
     26#include <linux/of_device.h>
     27#include <linux/platform_device.h>
     28#include <linux/pm_runtime.h>
     29#include <linux/rculist.h>
     30#include <linux/sh_dma.h>
     31#include <linux/slab.h>
     32#include <linux/spinlock.h>
     33
     34#include "../dmaengine.h"
     35#include "shdma.h"
     36
     37/* DMA registers */
     38#define SAR	0x00	/* Source Address Register */
     39#define DAR	0x04	/* Destination Address Register */
     40#define TCR	0x08	/* Transfer Count Register */
     41#define CHCR	0x0C	/* Channel Control Register */
     42#define DMAOR	0x40	/* DMA Operation Register */
     43
     44#define TEND	0x18 /* USB-DMAC */
     45
     46#define SH_DMAE_DRV_NAME "sh-dma-engine"
     47
     48/* Default MEMCPY transfer size = 2^2 = 4 bytes */
     49#define LOG2_DEFAULT_XFER_SIZE	2
     50#define SH_DMA_SLAVE_NUMBER 256
     51#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
     52
     53/*
     54 * Used for write-side mutual exclusion for the global device list,
     55 * read-side synchronization by way of RCU, and per-controller data.
     56 */
     57static DEFINE_SPINLOCK(sh_dmae_lock);
     58static LIST_HEAD(sh_dmae_devices);
     59
     60/*
     61 * Different DMAC implementations provide different ways to clear DMA channels:
     62 * (1) none - no CHCLR registers are available
     63 * (2) one CHCLR register per channel - 0 has to be written to it to clear
     64 *     channel buffers
     65 * (3) one CHCLR per several channels - 1 has to be written to the bit,
     66 *     corresponding to the specific channel to reset it
     67 */
     68static void channel_clear(struct sh_dmae_chan *sh_dc)
     69{
     70	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
     71	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
     72		sh_dc->shdma_chan.id;
     73	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
     74
     75	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
     76}
     77
     78static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
     79{
     80	__raw_writel(data, sh_dc->base + reg);
     81}
     82
     83static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
     84{
     85	return __raw_readl(sh_dc->base + reg);
     86}
     87
     88static u16 dmaor_read(struct sh_dmae_device *shdev)
     89{
     90	void __iomem *addr = shdev->chan_reg + DMAOR;
     91
     92	if (shdev->pdata->dmaor_is_32bit)
     93		return __raw_readl(addr);
     94	else
     95		return __raw_readw(addr);
     96}
     97
     98static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
     99{
    100	void __iomem *addr = shdev->chan_reg + DMAOR;
    101
    102	if (shdev->pdata->dmaor_is_32bit)
    103		__raw_writel(data, addr);
    104	else
    105		__raw_writew(data, addr);
    106}
    107
    108static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
    109{
    110	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
    111
    112	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
    113}
    114
    115static u32 chcr_read(struct sh_dmae_chan *sh_dc)
    116{
    117	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
    118
    119	return __raw_readl(sh_dc->base + shdev->chcr_offset);
    120}
    121
    122/*
    123 * Reset DMA controller
    124 *
    125 * SH7780 has two DMAOR register
    126 */
    127static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
    128{
    129	unsigned short dmaor;
    130	unsigned long flags;
    131
    132	spin_lock_irqsave(&sh_dmae_lock, flags);
    133
    134	dmaor = dmaor_read(shdev);
    135	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
    136
    137	spin_unlock_irqrestore(&sh_dmae_lock, flags);
    138}
    139
    140static int sh_dmae_rst(struct sh_dmae_device *shdev)
    141{
    142	unsigned short dmaor;
    143	unsigned long flags;
    144
    145	spin_lock_irqsave(&sh_dmae_lock, flags);
    146
    147	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
    148
    149	if (shdev->pdata->chclr_present) {
    150		int i;
    151		for (i = 0; i < shdev->pdata->channel_num; i++) {
    152			struct sh_dmae_chan *sh_chan = shdev->chan[i];
    153			if (sh_chan)
    154				channel_clear(sh_chan);
    155		}
    156	}
    157
    158	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
    159
    160	dmaor = dmaor_read(shdev);
    161
    162	spin_unlock_irqrestore(&sh_dmae_lock, flags);
    163
    164	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
    165		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
    166		return -EIO;
    167	}
    168	if (shdev->pdata->dmaor_init & ~dmaor)
    169		dev_warn(shdev->shdma_dev.dma_dev.dev,
    170			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
    171			 dmaor, shdev->pdata->dmaor_init);
    172	return 0;
    173}
    174
    175static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
    176{
    177	u32 chcr = chcr_read(sh_chan);
    178
    179	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
    180		return true; /* working */
    181
    182	return false; /* waiting */
    183}
    184
    185static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
    186{
    187	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    188	const struct sh_dmae_pdata *pdata = shdev->pdata;
    189	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
    190		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
    191
    192	if (cnt >= pdata->ts_shift_num)
    193		cnt = 0;
    194
    195	return pdata->ts_shift[cnt];
    196}
    197
    198static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
    199{
    200	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    201	const struct sh_dmae_pdata *pdata = shdev->pdata;
    202	int i;
    203
    204	for (i = 0; i < pdata->ts_shift_num; i++)
    205		if (pdata->ts_shift[i] == l2size)
    206			break;
    207
    208	if (i == pdata->ts_shift_num)
    209		i = 0;
    210
    211	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
    212		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
    213}
    214
    215static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
    216{
    217	sh_dmae_writel(sh_chan, hw->sar, SAR);
    218	sh_dmae_writel(sh_chan, hw->dar, DAR);
    219	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
    220}
    221
    222static void dmae_start(struct sh_dmae_chan *sh_chan)
    223{
    224	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    225	u32 chcr = chcr_read(sh_chan);
    226
    227	if (shdev->pdata->needs_tend_set)
    228		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
    229
    230	chcr |= CHCR_DE | shdev->chcr_ie_bit;
    231	chcr_write(sh_chan, chcr & ~CHCR_TE);
    232}
    233
    234static void dmae_init(struct sh_dmae_chan *sh_chan)
    235{
    236	/*
    237	 * Default configuration for dual address memory-memory transfer.
    238	 */
    239	u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
    240						   LOG2_DEFAULT_XFER_SIZE);
    241	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
    242	chcr_write(sh_chan, chcr);
    243}
    244
    245static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
    246{
    247	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
    248	if (dmae_is_busy(sh_chan))
    249		return -EBUSY;
    250
    251	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
    252	chcr_write(sh_chan, val);
    253
    254	return 0;
    255}
    256
    257static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
    258{
    259	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    260	const struct sh_dmae_pdata *pdata = shdev->pdata;
    261	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
    262	void __iomem *addr = shdev->dmars;
    263	unsigned int shift = chan_pdata->dmars_bit;
    264
    265	if (dmae_is_busy(sh_chan))
    266		return -EBUSY;
    267
    268	if (pdata->no_dmars)
    269		return 0;
    270
    271	/* in the case of a missing DMARS resource use first memory window */
    272	if (!addr)
    273		addr = shdev->chan_reg;
    274	addr += chan_pdata->dmars;
    275
    276	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
    277		     addr);
    278
    279	return 0;
    280}
    281
    282static void sh_dmae_start_xfer(struct shdma_chan *schan,
    283			       struct shdma_desc *sdesc)
    284{
    285	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    286						    shdma_chan);
    287	struct sh_dmae_desc *sh_desc = container_of(sdesc,
    288					struct sh_dmae_desc, shdma_desc);
    289	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
    290		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
    291		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
    292	/* Get the ld start address from ld_queue */
    293	dmae_set_reg(sh_chan, &sh_desc->hw);
    294	dmae_start(sh_chan);
    295}
    296
    297static bool sh_dmae_channel_busy(struct shdma_chan *schan)
    298{
    299	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    300						    shdma_chan);
    301	return dmae_is_busy(sh_chan);
    302}
    303
    304static void sh_dmae_setup_xfer(struct shdma_chan *schan,
    305			       int slave_id)
    306{
    307	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    308						    shdma_chan);
    309
    310	if (slave_id >= 0) {
    311		const struct sh_dmae_slave_config *cfg =
    312			sh_chan->config;
    313
    314		dmae_set_dmars(sh_chan, cfg->mid_rid);
    315		dmae_set_chcr(sh_chan, cfg->chcr);
    316	} else {
    317		dmae_init(sh_chan);
    318	}
    319}
    320
    321/*
    322 * Find a slave channel configuration from the contoller list by either a slave
    323 * ID in the non-DT case, or by a MID/RID value in the DT case
    324 */
    325static const struct sh_dmae_slave_config *dmae_find_slave(
    326	struct sh_dmae_chan *sh_chan, int match)
    327{
    328	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    329	const struct sh_dmae_pdata *pdata = shdev->pdata;
    330	const struct sh_dmae_slave_config *cfg;
    331	int i;
    332
    333	if (!sh_chan->shdma_chan.dev->of_node) {
    334		if (match >= SH_DMA_SLAVE_NUMBER)
    335			return NULL;
    336
    337		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
    338			if (cfg->slave_id == match)
    339				return cfg;
    340	} else {
    341		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
    342			if (cfg->mid_rid == match) {
    343				sh_chan->shdma_chan.slave_id = i;
    344				return cfg;
    345			}
    346	}
    347
    348	return NULL;
    349}
    350
    351static int sh_dmae_set_slave(struct shdma_chan *schan,
    352			     int slave_id, dma_addr_t slave_addr, bool try)
    353{
    354	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    355						    shdma_chan);
    356	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
    357	if (!cfg)
    358		return -ENXIO;
    359
    360	if (!try) {
    361		sh_chan->config = cfg;
    362		sh_chan->slave_addr = slave_addr ? : cfg->addr;
    363	}
    364
    365	return 0;
    366}
    367
    368static void dmae_halt(struct sh_dmae_chan *sh_chan)
    369{
    370	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
    371	u32 chcr = chcr_read(sh_chan);
    372
    373	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
    374	chcr_write(sh_chan, chcr);
    375}
    376
    377static int sh_dmae_desc_setup(struct shdma_chan *schan,
    378			      struct shdma_desc *sdesc,
    379			      dma_addr_t src, dma_addr_t dst, size_t *len)
    380{
    381	struct sh_dmae_desc *sh_desc = container_of(sdesc,
    382					struct sh_dmae_desc, shdma_desc);
    383
    384	if (*len > schan->max_xfer_len)
    385		*len = schan->max_xfer_len;
    386
    387	sh_desc->hw.sar = src;
    388	sh_desc->hw.dar = dst;
    389	sh_desc->hw.tcr = *len;
    390
    391	return 0;
    392}
    393
    394static void sh_dmae_halt(struct shdma_chan *schan)
    395{
    396	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    397						    shdma_chan);
    398	dmae_halt(sh_chan);
    399}
    400
    401static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
    402{
    403	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    404						    shdma_chan);
    405
    406	if (!(chcr_read(sh_chan) & CHCR_TE))
    407		return false;
    408
    409	/* DMA stop */
    410	dmae_halt(sh_chan);
    411
    412	return true;
    413}
    414
    415static size_t sh_dmae_get_partial(struct shdma_chan *schan,
    416				  struct shdma_desc *sdesc)
    417{
    418	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
    419						    shdma_chan);
    420	struct sh_dmae_desc *sh_desc = container_of(sdesc,
    421					struct sh_dmae_desc, shdma_desc);
    422	return sh_desc->hw.tcr -
    423		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
    424}
    425
    426/* Called from error IRQ or NMI */
    427static bool sh_dmae_reset(struct sh_dmae_device *shdev)
    428{
    429	bool ret;
    430
    431	/* halt the dma controller */
    432	sh_dmae_ctl_stop(shdev);
    433
    434	/* We cannot detect, which channel caused the error, have to reset all */
    435	ret = shdma_reset(&shdev->shdma_dev);
    436
    437	sh_dmae_rst(shdev);
    438
    439	return ret;
    440}
    441
    442static irqreturn_t sh_dmae_err(int irq, void *data)
    443{
    444	struct sh_dmae_device *shdev = data;
    445
    446	if (!(dmaor_read(shdev) & DMAOR_AE))
    447		return IRQ_NONE;
    448
    449	sh_dmae_reset(shdev);
    450	return IRQ_HANDLED;
    451}
    452
    453static bool sh_dmae_desc_completed(struct shdma_chan *schan,
    454				   struct shdma_desc *sdesc)
    455{
    456	struct sh_dmae_chan *sh_chan = container_of(schan,
    457					struct sh_dmae_chan, shdma_chan);
    458	struct sh_dmae_desc *sh_desc = container_of(sdesc,
    459					struct sh_dmae_desc, shdma_desc);
    460	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
    461	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
    462
    463	return	(sdesc->direction == DMA_DEV_TO_MEM &&
    464		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
    465		(sdesc->direction != DMA_DEV_TO_MEM &&
    466		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
    467}
    468
    469static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
    470{
    471	/* Fast path out if NMIF is not asserted for this controller */
    472	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
    473		return false;
    474
    475	return sh_dmae_reset(shdev);
    476}
    477
    478static int sh_dmae_nmi_handler(struct notifier_block *self,
    479			       unsigned long cmd, void *data)
    480{
    481	struct sh_dmae_device *shdev;
    482	int ret = NOTIFY_DONE;
    483	bool triggered;
    484
    485	/*
    486	 * Only concern ourselves with NMI events.
    487	 *
    488	 * Normally we would check the die chain value, but as this needs
    489	 * to be architecture independent, check for NMI context instead.
    490	 */
    491	if (!in_nmi())
    492		return NOTIFY_DONE;
    493
    494	rcu_read_lock();
    495	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
    496		/*
    497		 * Only stop if one of the controllers has NMIF asserted,
    498		 * we do not want to interfere with regular address error
    499		 * handling or NMI events that don't concern the DMACs.
    500		 */
    501		triggered = sh_dmae_nmi_notify(shdev);
    502		if (triggered == true)
    503			ret = NOTIFY_OK;
    504	}
    505	rcu_read_unlock();
    506
    507	return ret;
    508}
    509
    510static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
    511	.notifier_call	= sh_dmae_nmi_handler,
    512
    513	/* Run before NMI debug handler and KGDB */
    514	.priority	= 1,
    515};
    516
    517static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
    518					int irq, unsigned long flags)
    519{
    520	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
    521	struct shdma_dev *sdev = &shdev->shdma_dev;
    522	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
    523	struct sh_dmae_chan *sh_chan;
    524	struct shdma_chan *schan;
    525	int err;
    526
    527	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
    528			       GFP_KERNEL);
    529	if (!sh_chan)
    530		return -ENOMEM;
    531
    532	schan = &sh_chan->shdma_chan;
    533	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
    534
    535	shdma_chan_probe(sdev, schan, id);
    536
    537	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
    538
    539	/* set up channel irq */
    540	if (pdev->id >= 0)
    541		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
    542			 "sh-dmae%d.%d", pdev->id, id);
    543	else
    544		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
    545			 "sh-dma%d", id);
    546
    547	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
    548	if (err) {
    549		dev_err(sdev->dma_dev.dev,
    550			"DMA channel %d request_irq error %d\n",
    551			id, err);
    552		goto err_no_irq;
    553	}
    554
    555	shdev->chan[id] = sh_chan;
    556	return 0;
    557
    558err_no_irq:
    559	/* remove from dmaengine device node */
    560	shdma_chan_remove(schan);
    561	return err;
    562}
    563
    564static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
    565{
    566	struct shdma_chan *schan;
    567	int i;
    568
    569	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
    570		BUG_ON(!schan);
    571
    572		shdma_chan_remove(schan);
    573	}
    574}
    575
    576#ifdef CONFIG_PM
    577static int sh_dmae_runtime_suspend(struct device *dev)
    578{
    579	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
    580
    581	sh_dmae_ctl_stop(shdev);
    582	return 0;
    583}
    584
    585static int sh_dmae_runtime_resume(struct device *dev)
    586{
    587	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
    588
    589	return sh_dmae_rst(shdev);
    590}
    591#endif
    592
    593#ifdef CONFIG_PM_SLEEP
    594static int sh_dmae_suspend(struct device *dev)
    595{
    596	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
    597
    598	sh_dmae_ctl_stop(shdev);
    599	return 0;
    600}
    601
    602static int sh_dmae_resume(struct device *dev)
    603{
    604	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
    605	int i, ret;
    606
    607	ret = sh_dmae_rst(shdev);
    608	if (ret < 0)
    609		dev_err(dev, "Failed to reset!\n");
    610
    611	for (i = 0; i < shdev->pdata->channel_num; i++) {
    612		struct sh_dmae_chan *sh_chan = shdev->chan[i];
    613
    614		if (!sh_chan->shdma_chan.desc_num)
    615			continue;
    616
    617		if (sh_chan->shdma_chan.slave_id >= 0) {
    618			const struct sh_dmae_slave_config *cfg = sh_chan->config;
    619			dmae_set_dmars(sh_chan, cfg->mid_rid);
    620			dmae_set_chcr(sh_chan, cfg->chcr);
    621		} else {
    622			dmae_init(sh_chan);
    623		}
    624	}
    625
    626	return 0;
    627}
    628#endif
    629
    630static const struct dev_pm_ops sh_dmae_pm = {
    631	SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
    632	SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
    633			   NULL)
    634};
    635
    636static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
    637{
    638	struct sh_dmae_chan *sh_chan = container_of(schan,
    639					struct sh_dmae_chan, shdma_chan);
    640
    641	/*
    642	 * Implicit BUG_ON(!sh_chan->config)
    643	 * This is an exclusive slave DMA operation, may only be called after a
    644	 * successful slave configuration.
    645	 */
    646	return sh_chan->slave_addr;
    647}
    648
    649static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
    650{
    651	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
    652}
    653
    654static const struct shdma_ops sh_dmae_shdma_ops = {
    655	.desc_completed = sh_dmae_desc_completed,
    656	.halt_channel = sh_dmae_halt,
    657	.channel_busy = sh_dmae_channel_busy,
    658	.slave_addr = sh_dmae_slave_addr,
    659	.desc_setup = sh_dmae_desc_setup,
    660	.set_slave = sh_dmae_set_slave,
    661	.setup_xfer = sh_dmae_setup_xfer,
    662	.start_xfer = sh_dmae_start_xfer,
    663	.embedded_desc = sh_dmae_embedded_desc,
    664	.chan_irq = sh_dmae_chan_irq,
    665	.get_partial = sh_dmae_get_partial,
    666};
    667
    668static int sh_dmae_probe(struct platform_device *pdev)
    669{
    670	const enum dma_slave_buswidth widths =
    671		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
    672		DMA_SLAVE_BUSWIDTH_4_BYTES  | DMA_SLAVE_BUSWIDTH_8_BYTES |
    673		DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
    674	const struct sh_dmae_pdata *pdata;
    675	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
    676	int chan_irq[SH_DMAE_MAX_CHANNELS];
    677	unsigned long irqflags = 0;
    678	int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
    679	struct sh_dmae_device *shdev;
    680	struct dma_device *dma_dev;
    681	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
    682
    683	if (pdev->dev.of_node)
    684		pdata = of_device_get_match_data(&pdev->dev);
    685	else
    686		pdata = dev_get_platdata(&pdev->dev);
    687
    688	/* get platform data */
    689	if (!pdata || !pdata->channel_num)
    690		return -ENODEV;
    691
    692	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    693	/* DMARS area is optional */
    694	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    695	/*
    696	 * IRQ resources:
    697	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
    698	 *    the error IRQ, in which case it is the only IRQ in this resource:
    699	 *    start == end. If it is the only IRQ resource, all channels also
    700	 *    use the same IRQ.
    701	 * 2. DMA channel IRQ resources can be specified one per resource or in
    702	 *    ranges (start != end)
    703	 * 3. iff all events (channels and, optionally, error) on this
    704	 *    controller use the same IRQ, only one IRQ resource can be
    705	 *    specified, otherwise there must be one IRQ per channel, even if
    706	 *    some of them are equal
    707	 * 4. if all IRQs on this controller are equal or if some specific IRQs
    708	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
    709	 *    requested with the IRQF_SHARED flag
    710	 */
    711	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
    712	if (!chan || !errirq_res)
    713		return -ENODEV;
    714
    715	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
    716			     GFP_KERNEL);
    717	if (!shdev)
    718		return -ENOMEM;
    719
    720	dma_dev = &shdev->shdma_dev.dma_dev;
    721
    722	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
    723	if (IS_ERR(shdev->chan_reg))
    724		return PTR_ERR(shdev->chan_reg);
    725	if (dmars) {
    726		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
    727		if (IS_ERR(shdev->dmars))
    728			return PTR_ERR(shdev->dmars);
    729	}
    730
    731	dma_dev->src_addr_widths = widths;
    732	dma_dev->dst_addr_widths = widths;
    733	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
    734	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
    735
    736	if (!pdata->slave_only)
    737		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
    738	if (pdata->slave && pdata->slave_num)
    739		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
    740
    741	/* Default transfer size of 32 bytes requires 32-byte alignment */
    742	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
    743
    744	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
    745	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
    746	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
    747			      pdata->channel_num);
    748	if (err < 0)
    749		goto eshdma;
    750
    751	/* platform data */
    752	shdev->pdata = pdata;
    753
    754	if (pdata->chcr_offset)
    755		shdev->chcr_offset = pdata->chcr_offset;
    756	else
    757		shdev->chcr_offset = CHCR;
    758
    759	if (pdata->chcr_ie_bit)
    760		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
    761	else
    762		shdev->chcr_ie_bit = CHCR_IE;
    763
    764	platform_set_drvdata(pdev, shdev);
    765
    766	pm_runtime_enable(&pdev->dev);
    767	err = pm_runtime_get_sync(&pdev->dev);
    768	if (err < 0)
    769		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
    770
    771	spin_lock_irq(&sh_dmae_lock);
    772	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
    773	spin_unlock_irq(&sh_dmae_lock);
    774
    775	/* reset dma controller - only needed as a test */
    776	err = sh_dmae_rst(shdev);
    777	if (err)
    778		goto rst_err;
    779
    780	if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
    781		chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
    782
    783		if (!chanirq_res)
    784			chanirq_res = errirq_res;
    785		else
    786			irqres++;
    787
    788		if (chanirq_res == errirq_res ||
    789		    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
    790			irqflags = IRQF_SHARED;
    791
    792		errirq = errirq_res->start;
    793
    794		err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
    795				       irqflags, "DMAC Address Error", shdev);
    796		if (err) {
    797			dev_err(&pdev->dev,
    798				"DMA failed requesting irq #%d, error %d\n",
    799				errirq, err);
    800			goto eirq_err;
    801		}
    802	} else {
    803		chanirq_res = errirq_res;
    804	}
    805
    806	if (chanirq_res->start == chanirq_res->end &&
    807	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
    808		/* Special case - all multiplexed */
    809		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
    810			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
    811				chan_irq[irq_cnt] = chanirq_res->start;
    812				chan_flag[irq_cnt] = IRQF_SHARED;
    813			} else {
    814				irq_cap = 1;
    815				break;
    816			}
    817		}
    818	} else {
    819		do {
    820			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
    821				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
    822					irq_cap = 1;
    823					break;
    824				}
    825
    826				if ((errirq_res->flags & IORESOURCE_BITS) ==
    827				    IORESOURCE_IRQ_SHAREABLE)
    828					chan_flag[irq_cnt] = IRQF_SHARED;
    829				else
    830					chan_flag[irq_cnt] = 0;
    831				dev_dbg(&pdev->dev,
    832					"Found IRQ %d for channel %d\n",
    833					i, irq_cnt);
    834				chan_irq[irq_cnt++] = i;
    835			}
    836
    837			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
    838				break;
    839
    840			chanirq_res = platform_get_resource(pdev,
    841						IORESOURCE_IRQ, ++irqres);
    842		} while (irq_cnt < pdata->channel_num && chanirq_res);
    843	}
    844
    845	/* Create DMA Channel */
    846	for (i = 0; i < irq_cnt; i++) {
    847		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
    848		if (err)
    849			goto chan_probe_err;
    850	}
    851
    852	if (irq_cap)
    853		dev_notice(&pdev->dev, "Attempting to register %d DMA "
    854			   "channels when a maximum of %d are supported.\n",
    855			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
    856
    857	pm_runtime_put(&pdev->dev);
    858
    859	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
    860	if (err < 0)
    861		goto edmadevreg;
    862
    863	return err;
    864
    865edmadevreg:
    866	pm_runtime_get(&pdev->dev);
    867
    868chan_probe_err:
    869	sh_dmae_chan_remove(shdev);
    870
    871eirq_err:
    872rst_err:
    873	spin_lock_irq(&sh_dmae_lock);
    874	list_del_rcu(&shdev->node);
    875	spin_unlock_irq(&sh_dmae_lock);
    876
    877	pm_runtime_put(&pdev->dev);
    878	pm_runtime_disable(&pdev->dev);
    879
    880	shdma_cleanup(&shdev->shdma_dev);
    881eshdma:
    882	synchronize_rcu();
    883
    884	return err;
    885}
    886
    887static int sh_dmae_remove(struct platform_device *pdev)
    888{
    889	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
    890	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
    891
    892	dma_async_device_unregister(dma_dev);
    893
    894	spin_lock_irq(&sh_dmae_lock);
    895	list_del_rcu(&shdev->node);
    896	spin_unlock_irq(&sh_dmae_lock);
    897
    898	pm_runtime_disable(&pdev->dev);
    899
    900	sh_dmae_chan_remove(shdev);
    901	shdma_cleanup(&shdev->shdma_dev);
    902
    903	synchronize_rcu();
    904
    905	return 0;
    906}
    907
    908static struct platform_driver sh_dmae_driver = {
    909	.driver		= {
    910		.pm	= &sh_dmae_pm,
    911		.name	= SH_DMAE_DRV_NAME,
    912	},
    913	.remove		= sh_dmae_remove,
    914};
    915
    916static int __init sh_dmae_init(void)
    917{
    918	/* Wire up NMI handling */
    919	int err = register_die_notifier(&sh_dmae_nmi_notifier);
    920	if (err)
    921		return err;
    922
    923	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
    924}
    925module_init(sh_dmae_init);
    926
    927static void __exit sh_dmae_exit(void)
    928{
    929	platform_driver_unregister(&sh_dmae_driver);
    930
    931	unregister_die_notifier(&sh_dmae_nmi_notifier);
    932}
    933module_exit(sh_dmae_exit);
    934
    935MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
    936MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
    937MODULE_LICENSE("GPL");
    938MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);