cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-pic32.c (22771B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Microchip PIC32 SPI controller driver.
      4 *
      5 * Purna Chandra Mandal <purna.mandal@microchip.com>
      6 * Copyright (c) 2016, Microchip Technology Inc.
      7 */
      8
      9#include <linux/clk.h>
     10#include <linux/clkdev.h>
     11#include <linux/delay.h>
     12#include <linux/dmaengine.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/highmem.h>
     15#include <linux/module.h>
     16#include <linux/io.h>
     17#include <linux/interrupt.h>
     18#include <linux/of.h>
     19#include <linux/of_irq.h>
     20#include <linux/of_gpio.h>
     21#include <linux/of_address.h>
     22#include <linux/platform_device.h>
     23#include <linux/spi/spi.h>
     24
     25/* SPI controller registers */
     26struct pic32_spi_regs {
     27	u32 ctrl;
     28	u32 ctrl_clr;
     29	u32 ctrl_set;
     30	u32 ctrl_inv;
     31	u32 status;
     32	u32 status_clr;
     33	u32 status_set;
     34	u32 status_inv;
     35	u32 buf;
     36	u32 dontuse[3];
     37	u32 baud;
     38	u32 dontuse2[3];
     39	u32 ctrl2;
     40	u32 ctrl2_clr;
     41	u32 ctrl2_set;
     42	u32 ctrl2_inv;
     43};
     44
     45/* Bit fields of SPI Control Register */
     46#define CTRL_RX_INT_SHIFT	0  /* Rx interrupt generation */
     47#define  RX_FIFO_EMPTY		0
     48#define  RX_FIFO_NOT_EMPTY	1 /* not empty */
     49#define  RX_FIFO_HALF_FULL	2 /* full by half or more */
     50#define  RX_FIFO_FULL		3 /* completely full */
     51
     52#define CTRL_TX_INT_SHIFT	2  /* TX interrupt generation */
     53#define  TX_FIFO_ALL_EMPTY	0 /* completely empty */
     54#define  TX_FIFO_EMPTY		1 /* empty */
     55#define  TX_FIFO_HALF_EMPTY	2 /* empty by half or more */
     56#define  TX_FIFO_NOT_FULL	3 /* atleast one empty */
     57
     58#define CTRL_MSTEN	BIT(5) /* enable master mode */
     59#define CTRL_CKP	BIT(6) /* active low */
     60#define CTRL_CKE	BIT(8) /* Tx on falling edge */
     61#define CTRL_SMP	BIT(9) /* Rx at middle or end of tx */
     62#define CTRL_BPW_MASK	0x03   /* bits per word/sample */
     63#define CTRL_BPW_SHIFT	10
     64#define  PIC32_BPW_8	0
     65#define  PIC32_BPW_16	1
     66#define  PIC32_BPW_32	2
     67#define CTRL_SIDL	BIT(13) /* sleep when idle */
     68#define CTRL_ON		BIT(15) /* enable macro */
     69#define CTRL_ENHBUF	BIT(16) /* enable enhanced buffering */
     70#define CTRL_MCLKSEL	BIT(23) /* select clock source */
     71#define CTRL_MSSEN	BIT(28) /* macro driven /SS */
     72#define CTRL_FRMEN	BIT(31) /* enable framing mode */
     73
     74/* Bit fields of SPI Status Register */
     75#define STAT_RF_EMPTY	BIT(5) /* RX Fifo empty */
     76#define STAT_RX_OV	BIT(6) /* err, s/w needs to clear */
     77#define STAT_TX_UR	BIT(8) /* UR in Framed SPI modes */
     78#define STAT_FRM_ERR	BIT(12) /* Multiple Frame Sync pulse */
     79#define STAT_TF_LVL_MASK	0x1F
     80#define STAT_TF_LVL_SHIFT	16
     81#define STAT_RF_LVL_MASK	0x1F
     82#define STAT_RF_LVL_SHIFT	24
     83
     84/* Bit fields of SPI Baud Register */
     85#define BAUD_MASK		0x1ff
     86
     87/* Bit fields of SPI Control2 Register */
     88#define CTRL2_TX_UR_EN		BIT(10) /* Enable int on Tx under-run */
     89#define CTRL2_RX_OV_EN		BIT(11) /* Enable int on Rx over-run */
     90#define CTRL2_FRM_ERR_EN	BIT(12) /* Enable frame err int */
     91
     92/* Minimum DMA transfer size */
     93#define PIC32_DMA_LEN_MIN	64
     94
     95struct pic32_spi {
     96	dma_addr_t		dma_base;
     97	struct pic32_spi_regs __iomem *regs;
     98	int			fault_irq;
     99	int			rx_irq;
    100	int			tx_irq;
    101	u32			fifo_n_byte; /* FIFO depth in bytes */
    102	struct clk		*clk;
    103	struct spi_master	*master;
    104	/* Current controller setting */
    105	u32			speed_hz; /* spi-clk rate */
    106	u32			mode;
    107	u32			bits_per_word;
    108	u32			fifo_n_elm; /* FIFO depth in words */
    109#define PIC32F_DMA_PREP		0 /* DMA chnls configured */
    110	unsigned long		flags;
    111	/* Current transfer state */
    112	struct completion	xfer_done;
    113	/* PIO transfer specific */
    114	const void		*tx;
    115	const void		*tx_end;
    116	const void		*rx;
    117	const void		*rx_end;
    118	int			len;
    119	void (*rx_fifo)(struct pic32_spi *);
    120	void (*tx_fifo)(struct pic32_spi *);
    121};
    122
    123static inline void pic32_spi_enable(struct pic32_spi *pic32s)
    124{
    125	writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
    126}
    127
    128static inline void pic32_spi_disable(struct pic32_spi *pic32s)
    129{
    130	writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
    131
    132	/* avoid SPI registers read/write at immediate next CPU clock */
    133	ndelay(20);
    134}
    135
    136static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
    137{
    138	u32 div;
    139
    140	/* div = (clk_in / 2 * spi_ck) - 1 */
    141	div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
    142
    143	writel(div & BAUD_MASK, &pic32s->regs->baud);
    144}
    145
    146static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
    147{
    148	u32 sr = readl(&pic32s->regs->status);
    149
    150	return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
    151}
    152
    153static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
    154{
    155	u32 sr = readl(&pic32s->regs->status);
    156
    157	return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
    158}
    159
    160/* Return the max entries we can fill into tx fifo */
    161static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
    162{
    163	u32 tx_left, tx_room, rxtx_gap;
    164
    165	tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
    166	tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
    167
    168	/*
    169	 * Another concern is about the tx/rx mismatch, we
    170	 * though to use (pic32s->fifo_n_byte - rxfl - txfl) as
    171	 * one maximum value for tx, but it doesn't cover the
    172	 * data which is out of tx/rx fifo and inside the
    173	 * shift registers. So a ctrl from sw point of
    174	 * view is taken.
    175	 */
    176	rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
    177		    (pic32s->tx_end - pic32s->tx)) / n_bytes;
    178	return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
    179}
    180
    181/* Return the max entries we should read out of rx fifo */
    182static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
    183{
    184	u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
    185
    186	return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
    187}
    188
    189#define BUILD_SPI_FIFO_RW(__name, __type, __bwl)		\
    190static void pic32_spi_rx_##__name(struct pic32_spi *pic32s)	\
    191{								\
    192	__type v;						\
    193	u32 mx = pic32_rx_max(pic32s, sizeof(__type));		\
    194	for (; mx; mx--) {					\
    195		v = read##__bwl(&pic32s->regs->buf);		\
    196		if (pic32s->rx_end - pic32s->len)		\
    197			*(__type *)(pic32s->rx) = v;		\
    198		pic32s->rx += sizeof(__type);			\
    199	}							\
    200}								\
    201								\
    202static void pic32_spi_tx_##__name(struct pic32_spi *pic32s)	\
    203{								\
    204	__type v;						\
    205	u32 mx = pic32_tx_max(pic32s, sizeof(__type));		\
    206	for (; mx ; mx--) {					\
    207		v = (__type)~0U;				\
    208		if (pic32s->tx_end - pic32s->len)		\
    209			v = *(__type *)(pic32s->tx);		\
    210		write##__bwl(v, &pic32s->regs->buf);		\
    211		pic32s->tx += sizeof(__type);			\
    212	}							\
    213}
    214
    215BUILD_SPI_FIFO_RW(byte, u8, b);
    216BUILD_SPI_FIFO_RW(word, u16, w);
    217BUILD_SPI_FIFO_RW(dword, u32, l);
    218
    219static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
    220{
    221	/* disable all interrupts */
    222	disable_irq_nosync(pic32s->fault_irq);
    223	disable_irq_nosync(pic32s->rx_irq);
    224	disable_irq_nosync(pic32s->tx_irq);
    225
    226	/* Show err message and abort xfer with err */
    227	dev_err(&pic32s->master->dev, "%s\n", msg);
    228	if (pic32s->master->cur_msg)
    229		pic32s->master->cur_msg->status = -EIO;
    230	complete(&pic32s->xfer_done);
    231}
    232
    233static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
    234{
    235	struct pic32_spi *pic32s = dev_id;
    236	u32 status;
    237
    238	status = readl(&pic32s->regs->status);
    239
    240	/* Error handling */
    241	if (status & (STAT_RX_OV | STAT_TX_UR)) {
    242		writel(STAT_RX_OV, &pic32s->regs->status_clr);
    243		writel(STAT_TX_UR, &pic32s->regs->status_clr);
    244		pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
    245		return IRQ_HANDLED;
    246	}
    247
    248	if (status & STAT_FRM_ERR) {
    249		pic32_err_stop(pic32s, "err_irq: frame error");
    250		return IRQ_HANDLED;
    251	}
    252
    253	if (!pic32s->master->cur_msg) {
    254		pic32_err_stop(pic32s, "err_irq: no mesg");
    255		return IRQ_NONE;
    256	}
    257
    258	return IRQ_NONE;
    259}
    260
    261static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
    262{
    263	struct pic32_spi *pic32s = dev_id;
    264
    265	pic32s->rx_fifo(pic32s);
    266
    267	/* rx complete ? */
    268	if (pic32s->rx_end == pic32s->rx) {
    269		/* disable all interrupts */
    270		disable_irq_nosync(pic32s->fault_irq);
    271		disable_irq_nosync(pic32s->rx_irq);
    272
    273		/* complete current xfer */
    274		complete(&pic32s->xfer_done);
    275	}
    276
    277	return IRQ_HANDLED;
    278}
    279
    280static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
    281{
    282	struct pic32_spi *pic32s = dev_id;
    283
    284	pic32s->tx_fifo(pic32s);
    285
    286	/* tx complete? disable tx interrupt */
    287	if (pic32s->tx_end == pic32s->tx)
    288		disable_irq_nosync(pic32s->tx_irq);
    289
    290	return IRQ_HANDLED;
    291}
    292
    293static void pic32_spi_dma_rx_notify(void *data)
    294{
    295	struct pic32_spi *pic32s = data;
    296
    297	complete(&pic32s->xfer_done);
    298}
    299
    300static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
    301				  struct spi_transfer *xfer)
    302{
    303	struct spi_master *master = pic32s->master;
    304	struct dma_async_tx_descriptor *desc_rx;
    305	struct dma_async_tx_descriptor *desc_tx;
    306	dma_cookie_t cookie;
    307	int ret;
    308
    309	if (!master->dma_rx || !master->dma_tx)
    310		return -ENODEV;
    311
    312	desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
    313					  xfer->rx_sg.sgl,
    314					  xfer->rx_sg.nents,
    315					  DMA_DEV_TO_MEM,
    316					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    317	if (!desc_rx) {
    318		ret = -EINVAL;
    319		goto err_dma;
    320	}
    321
    322	desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
    323					  xfer->tx_sg.sgl,
    324					  xfer->tx_sg.nents,
    325					  DMA_MEM_TO_DEV,
    326					  DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    327	if (!desc_tx) {
    328		ret = -EINVAL;
    329		goto err_dma;
    330	}
    331
    332	/* Put callback on the RX transfer, that should finish last */
    333	desc_rx->callback = pic32_spi_dma_rx_notify;
    334	desc_rx->callback_param = pic32s;
    335
    336	cookie = dmaengine_submit(desc_rx);
    337	ret = dma_submit_error(cookie);
    338	if (ret)
    339		goto err_dma;
    340
    341	cookie = dmaengine_submit(desc_tx);
    342	ret = dma_submit_error(cookie);
    343	if (ret)
    344		goto err_dma_tx;
    345
    346	dma_async_issue_pending(master->dma_rx);
    347	dma_async_issue_pending(master->dma_tx);
    348
    349	return 0;
    350
    351err_dma_tx:
    352	dmaengine_terminate_all(master->dma_rx);
    353err_dma:
    354	return ret;
    355}
    356
    357static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
    358{
    359	int buf_offset = offsetof(struct pic32_spi_regs, buf);
    360	struct spi_master *master = pic32s->master;
    361	struct dma_slave_config cfg;
    362	int ret;
    363
    364	memset(&cfg, 0, sizeof(cfg));
    365	cfg.device_fc = true;
    366	cfg.src_addr = pic32s->dma_base + buf_offset;
    367	cfg.dst_addr = pic32s->dma_base + buf_offset;
    368	cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
    369	cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
    370	cfg.src_addr_width = dma_width;
    371	cfg.dst_addr_width = dma_width;
    372	/* tx channel */
    373	cfg.direction = DMA_MEM_TO_DEV;
    374	ret = dmaengine_slave_config(master->dma_tx, &cfg);
    375	if (ret) {
    376		dev_err(&master->dev, "tx channel setup failed\n");
    377		return ret;
    378	}
    379	/* rx channel */
    380	cfg.direction = DMA_DEV_TO_MEM;
    381	ret = dmaengine_slave_config(master->dma_rx, &cfg);
    382	if (ret)
    383		dev_err(&master->dev, "rx channel setup failed\n");
    384
    385	return ret;
    386}
    387
    388static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
    389{
    390	enum dma_slave_buswidth dmawidth;
    391	u32 buswidth, v;
    392
    393	switch (bits_per_word) {
    394	case 8:
    395		pic32s->rx_fifo = pic32_spi_rx_byte;
    396		pic32s->tx_fifo = pic32_spi_tx_byte;
    397		buswidth = PIC32_BPW_8;
    398		dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
    399		break;
    400	case 16:
    401		pic32s->rx_fifo = pic32_spi_rx_word;
    402		pic32s->tx_fifo = pic32_spi_tx_word;
    403		buswidth = PIC32_BPW_16;
    404		dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
    405		break;
    406	case 32:
    407		pic32s->rx_fifo = pic32_spi_rx_dword;
    408		pic32s->tx_fifo = pic32_spi_tx_dword;
    409		buswidth = PIC32_BPW_32;
    410		dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
    411		break;
    412	default:
    413		/* not supported */
    414		return -EINVAL;
    415	}
    416
    417	/* calculate maximum number of words fifos can hold */
    418	pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
    419					  bits_per_word / 8);
    420	/* set word size */
    421	v = readl(&pic32s->regs->ctrl);
    422	v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
    423	v |= buswidth << CTRL_BPW_SHIFT;
    424	writel(v, &pic32s->regs->ctrl);
    425
    426	/* re-configure dma width, if required */
    427	if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
    428		pic32_spi_dma_config(pic32s, dmawidth);
    429
    430	return 0;
    431}
    432
    433static int pic32_spi_prepare_hardware(struct spi_master *master)
    434{
    435	struct pic32_spi *pic32s = spi_master_get_devdata(master);
    436
    437	pic32_spi_enable(pic32s);
    438
    439	return 0;
    440}
    441
    442static int pic32_spi_prepare_message(struct spi_master *master,
    443				     struct spi_message *msg)
    444{
    445	struct pic32_spi *pic32s = spi_master_get_devdata(master);
    446	struct spi_device *spi = msg->spi;
    447	u32 val;
    448
    449	/* set device specific bits_per_word */
    450	if (pic32s->bits_per_word != spi->bits_per_word) {
    451		pic32_spi_set_word_size(pic32s, spi->bits_per_word);
    452		pic32s->bits_per_word = spi->bits_per_word;
    453	}
    454
    455	/* device specific speed change */
    456	if (pic32s->speed_hz != spi->max_speed_hz) {
    457		pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
    458		pic32s->speed_hz = spi->max_speed_hz;
    459	}
    460
    461	/* device specific mode change */
    462	if (pic32s->mode != spi->mode) {
    463		val = readl(&pic32s->regs->ctrl);
    464		/* active low */
    465		if (spi->mode & SPI_CPOL)
    466			val |= CTRL_CKP;
    467		else
    468			val &= ~CTRL_CKP;
    469		/* tx on rising edge */
    470		if (spi->mode & SPI_CPHA)
    471			val &= ~CTRL_CKE;
    472		else
    473			val |= CTRL_CKE;
    474
    475		/* rx at end of tx */
    476		val |= CTRL_SMP;
    477		writel(val, &pic32s->regs->ctrl);
    478		pic32s->mode = spi->mode;
    479	}
    480
    481	return 0;
    482}
    483
    484static bool pic32_spi_can_dma(struct spi_master *master,
    485			      struct spi_device *spi,
    486			      struct spi_transfer *xfer)
    487{
    488	struct pic32_spi *pic32s = spi_master_get_devdata(master);
    489
    490	/* skip using DMA on small size transfer to avoid overhead.*/
    491	return (xfer->len >= PIC32_DMA_LEN_MIN) &&
    492	       test_bit(PIC32F_DMA_PREP, &pic32s->flags);
    493}
    494
    495static int pic32_spi_one_transfer(struct spi_master *master,
    496				  struct spi_device *spi,
    497				  struct spi_transfer *transfer)
    498{
    499	struct pic32_spi *pic32s;
    500	bool dma_issued = false;
    501	unsigned long timeout;
    502	int ret;
    503
    504	pic32s = spi_master_get_devdata(master);
    505
    506	/* handle transfer specific word size change */
    507	if (transfer->bits_per_word &&
    508	    (transfer->bits_per_word != pic32s->bits_per_word)) {
    509		ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
    510		if (ret)
    511			return ret;
    512		pic32s->bits_per_word = transfer->bits_per_word;
    513	}
    514
    515	/* handle transfer specific speed change */
    516	if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
    517		pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
    518		pic32s->speed_hz = transfer->speed_hz;
    519	}
    520
    521	reinit_completion(&pic32s->xfer_done);
    522
    523	/* transact by DMA mode */
    524	if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
    525		ret = pic32_spi_dma_transfer(pic32s, transfer);
    526		if (ret) {
    527			dev_err(&spi->dev, "dma submit error\n");
    528			return ret;
    529		}
    530
    531		/* DMA issued */
    532		dma_issued = true;
    533	} else {
    534		/* set current transfer information */
    535		pic32s->tx = (const void *)transfer->tx_buf;
    536		pic32s->rx = (const void *)transfer->rx_buf;
    537		pic32s->tx_end = pic32s->tx + transfer->len;
    538		pic32s->rx_end = pic32s->rx + transfer->len;
    539		pic32s->len = transfer->len;
    540
    541		/* transact by interrupt driven PIO */
    542		enable_irq(pic32s->fault_irq);
    543		enable_irq(pic32s->rx_irq);
    544		enable_irq(pic32s->tx_irq);
    545	}
    546
    547	/* wait for completion */
    548	timeout = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
    549	if (timeout == 0) {
    550		dev_err(&spi->dev, "wait error/timedout\n");
    551		if (dma_issued) {
    552			dmaengine_terminate_all(master->dma_rx);
    553			dmaengine_terminate_all(master->dma_tx);
    554		}
    555		ret = -ETIMEDOUT;
    556	} else {
    557		ret = 0;
    558	}
    559
    560	return ret;
    561}
    562
    563static int pic32_spi_unprepare_message(struct spi_master *master,
    564				       struct spi_message *msg)
    565{
    566	/* nothing to do */
    567	return 0;
    568}
    569
    570static int pic32_spi_unprepare_hardware(struct spi_master *master)
    571{
    572	struct pic32_spi *pic32s = spi_master_get_devdata(master);
    573
    574	pic32_spi_disable(pic32s);
    575
    576	return 0;
    577}
    578
    579/* This may be called multiple times by same spi dev */
    580static int pic32_spi_setup(struct spi_device *spi)
    581{
    582	if (!spi->max_speed_hz) {
    583		dev_err(&spi->dev, "No max speed HZ parameter\n");
    584		return -EINVAL;
    585	}
    586
    587	/* PIC32 spi controller can drive /CS during transfer depending
    588	 * on tx fifo fill-level. /CS will stay asserted as long as TX
    589	 * fifo is non-empty, else will be deasserted indicating
    590	 * completion of the ongoing transfer. This might result into
    591	 * unreliable/erroneous SPI transactions.
    592	 * To avoid that we will always handle /CS by toggling GPIO.
    593	 */
    594	if (!spi->cs_gpiod)
    595		return -EINVAL;
    596
    597	return 0;
    598}
    599
    600static void pic32_spi_cleanup(struct spi_device *spi)
    601{
    602	/* de-activate cs-gpio, gpiolib will handle inversion */
    603	gpiod_direction_output(spi->cs_gpiod, 0);
    604}
    605
    606static int pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
    607{
    608	struct spi_master *master = pic32s->master;
    609	int ret = 0;
    610
    611	master->dma_rx = dma_request_chan(dev, "spi-rx");
    612	if (IS_ERR(master->dma_rx)) {
    613		if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER)
    614			ret = -EPROBE_DEFER;
    615		else
    616			dev_warn(dev, "RX channel not found.\n");
    617
    618		master->dma_rx = NULL;
    619		goto out_err;
    620	}
    621
    622	master->dma_tx = dma_request_chan(dev, "spi-tx");
    623	if (IS_ERR(master->dma_tx)) {
    624		if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER)
    625			ret = -EPROBE_DEFER;
    626		else
    627			dev_warn(dev, "TX channel not found.\n");
    628
    629		master->dma_tx = NULL;
    630		goto out_err;
    631	}
    632
    633	if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
    634		goto out_err;
    635
    636	/* DMA chnls allocated and prepared */
    637	set_bit(PIC32F_DMA_PREP, &pic32s->flags);
    638
    639	return 0;
    640
    641out_err:
    642	if (master->dma_rx) {
    643		dma_release_channel(master->dma_rx);
    644		master->dma_rx = NULL;
    645	}
    646
    647	if (master->dma_tx) {
    648		dma_release_channel(master->dma_tx);
    649		master->dma_tx = NULL;
    650	}
    651
    652	return ret;
    653}
    654
    655static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
    656{
    657	if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
    658		return;
    659
    660	clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
    661	if (pic32s->master->dma_rx)
    662		dma_release_channel(pic32s->master->dma_rx);
    663
    664	if (pic32s->master->dma_tx)
    665		dma_release_channel(pic32s->master->dma_tx);
    666}
    667
    668static void pic32_spi_hw_init(struct pic32_spi *pic32s)
    669{
    670	u32 ctrl;
    671
    672	/* disable hardware */
    673	pic32_spi_disable(pic32s);
    674
    675	ctrl = readl(&pic32s->regs->ctrl);
    676	/* enable enhanced fifo of 128bit deep */
    677	ctrl |= CTRL_ENHBUF;
    678	pic32s->fifo_n_byte = 16;
    679
    680	/* disable framing mode */
    681	ctrl &= ~CTRL_FRMEN;
    682
    683	/* enable master mode while disabled */
    684	ctrl |= CTRL_MSTEN;
    685
    686	/* set tx fifo threshold interrupt */
    687	ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
    688	ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
    689
    690	/* set rx fifo threshold interrupt */
    691	ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
    692	ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
    693
    694	/* select clk source */
    695	ctrl &= ~CTRL_MCLKSEL;
    696
    697	/* set manual /CS mode */
    698	ctrl &= ~CTRL_MSSEN;
    699
    700	writel(ctrl, &pic32s->regs->ctrl);
    701
    702	/* enable error reporting */
    703	ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
    704	writel(ctrl, &pic32s->regs->ctrl2_set);
    705}
    706
    707static int pic32_spi_hw_probe(struct platform_device *pdev,
    708			      struct pic32_spi *pic32s)
    709{
    710	struct resource *mem;
    711	int ret;
    712
    713	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    714	pic32s->regs = devm_ioremap_resource(&pdev->dev, mem);
    715	if (IS_ERR(pic32s->regs))
    716		return PTR_ERR(pic32s->regs);
    717
    718	pic32s->dma_base = mem->start;
    719
    720	/* get irq resources: err-irq, rx-irq, tx-irq */
    721	pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
    722	if (pic32s->fault_irq < 0)
    723		return pic32s->fault_irq;
    724
    725	pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
    726	if (pic32s->rx_irq < 0)
    727		return pic32s->rx_irq;
    728
    729	pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
    730	if (pic32s->tx_irq < 0)
    731		return pic32s->tx_irq;
    732
    733	/* get clock */
    734	pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
    735	if (IS_ERR(pic32s->clk)) {
    736		dev_err(&pdev->dev, "clk not found\n");
    737		ret = PTR_ERR(pic32s->clk);
    738		goto err_unmap_mem;
    739	}
    740
    741	ret = clk_prepare_enable(pic32s->clk);
    742	if (ret)
    743		goto err_unmap_mem;
    744
    745	pic32_spi_hw_init(pic32s);
    746
    747	return 0;
    748
    749err_unmap_mem:
    750	dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
    751	return ret;
    752}
    753
    754static int pic32_spi_probe(struct platform_device *pdev)
    755{
    756	struct spi_master *master;
    757	struct pic32_spi *pic32s;
    758	int ret;
    759
    760	master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
    761	if (!master)
    762		return -ENOMEM;
    763
    764	pic32s = spi_master_get_devdata(master);
    765	pic32s->master = master;
    766
    767	ret = pic32_spi_hw_probe(pdev, pic32s);
    768	if (ret)
    769		goto err_master;
    770
    771	master->dev.of_node	= pdev->dev.of_node;
    772	master->mode_bits	= SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
    773	master->num_chipselect	= 1; /* single chip-select */
    774	master->max_speed_hz	= clk_get_rate(pic32s->clk);
    775	master->setup		= pic32_spi_setup;
    776	master->cleanup		= pic32_spi_cleanup;
    777	master->flags		= SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
    778	master->bits_per_word_mask	= SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
    779					  SPI_BPW_MASK(32);
    780	master->transfer_one		= pic32_spi_one_transfer;
    781	master->prepare_message		= pic32_spi_prepare_message;
    782	master->unprepare_message	= pic32_spi_unprepare_message;
    783	master->prepare_transfer_hardware	= pic32_spi_prepare_hardware;
    784	master->unprepare_transfer_hardware	= pic32_spi_unprepare_hardware;
    785	master->use_gpio_descriptors = true;
    786
    787	/* optional DMA support */
    788	ret = pic32_spi_dma_prep(pic32s, &pdev->dev);
    789	if (ret)
    790		goto err_bailout;
    791
    792	if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
    793		master->can_dma	= pic32_spi_can_dma;
    794
    795	init_completion(&pic32s->xfer_done);
    796	pic32s->mode = -1;
    797
    798	/* install irq handlers (with irq-disabled) */
    799	irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
    800	ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
    801			       pic32_spi_fault_irq, IRQF_NO_THREAD,
    802			       dev_name(&pdev->dev), pic32s);
    803	if (ret < 0) {
    804		dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
    805		goto err_bailout;
    806	}
    807
    808	/* receive interrupt handler */
    809	irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
    810	ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
    811			       pic32_spi_rx_irq, IRQF_NO_THREAD,
    812			       dev_name(&pdev->dev), pic32s);
    813	if (ret < 0) {
    814		dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
    815		goto err_bailout;
    816	}
    817
    818	/* transmit interrupt handler */
    819	irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
    820	ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
    821			       pic32_spi_tx_irq, IRQF_NO_THREAD,
    822			       dev_name(&pdev->dev), pic32s);
    823	if (ret < 0) {
    824		dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
    825		goto err_bailout;
    826	}
    827
    828	/* register master */
    829	ret = devm_spi_register_master(&pdev->dev, master);
    830	if (ret) {
    831		dev_err(&master->dev, "failed registering spi master\n");
    832		goto err_bailout;
    833	}
    834
    835	platform_set_drvdata(pdev, pic32s);
    836
    837	return 0;
    838
    839err_bailout:
    840	pic32_spi_dma_unprep(pic32s);
    841	clk_disable_unprepare(pic32s->clk);
    842err_master:
    843	spi_master_put(master);
    844	return ret;
    845}
    846
    847static int pic32_spi_remove(struct platform_device *pdev)
    848{
    849	struct pic32_spi *pic32s;
    850
    851	pic32s = platform_get_drvdata(pdev);
    852	pic32_spi_disable(pic32s);
    853	clk_disable_unprepare(pic32s->clk);
    854	pic32_spi_dma_unprep(pic32s);
    855
    856	return 0;
    857}
    858
    859static const struct of_device_id pic32_spi_of_match[] = {
    860	{.compatible = "microchip,pic32mzda-spi",},
    861	{},
    862};
    863MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
    864
    865static struct platform_driver pic32_spi_driver = {
    866	.driver = {
    867		.name = "spi-pic32",
    868		.of_match_table = of_match_ptr(pic32_spi_of_match),
    869	},
    870	.probe = pic32_spi_probe,
    871	.remove = pic32_spi_remove,
    872};
    873
    874module_platform_driver(pic32_spi_driver);
    875
    876MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
    877MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
    878MODULE_LICENSE("GPL v2");