cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-at91-usart.c (16979B)


      1// SPDX-License-Identifier: GPL-2.0
      2//
      3// Driver for AT91 USART Controllers as SPI
      4//
      5// Copyright (C) 2018 Microchip Technology Inc.
      6//
      7// Author: Radu Pirea <radu.pirea@microchip.com>
      8
      9#include <linux/clk.h>
     10#include <linux/delay.h>
     11#include <linux/dmaengine.h>
     12#include <linux/dma-direction.h>
     13#include <linux/interrupt.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/of_platform.h>
     17#include <linux/gpio/consumer.h>
     18#include <linux/pinctrl/consumer.h>
     19#include <linux/platform_device.h>
     20#include <linux/pm_runtime.h>
     21
     22#include <linux/spi/spi.h>
     23
     24#define US_CR			0x00
     25#define US_MR			0x04
     26#define US_IER			0x08
     27#define US_IDR			0x0C
     28#define US_CSR			0x14
     29#define US_RHR			0x18
     30#define US_THR			0x1C
     31#define US_BRGR			0x20
     32#define US_VERSION		0xFC
     33
     34#define US_CR_RSTRX		BIT(2)
     35#define US_CR_RSTTX		BIT(3)
     36#define US_CR_RXEN		BIT(4)
     37#define US_CR_RXDIS		BIT(5)
     38#define US_CR_TXEN		BIT(6)
     39#define US_CR_TXDIS		BIT(7)
     40
     41#define US_MR_SPI_MASTER	0x0E
     42#define US_MR_CHRL		GENMASK(7, 6)
     43#define US_MR_CPHA		BIT(8)
     44#define US_MR_CPOL		BIT(16)
     45#define US_MR_CLKO		BIT(18)
     46#define US_MR_WRDBT		BIT(20)
     47#define US_MR_LOOP		BIT(15)
     48
     49#define US_IR_RXRDY		BIT(0)
     50#define US_IR_TXRDY		BIT(1)
     51#define US_IR_OVRE		BIT(5)
     52
     53#define US_BRGR_SIZE		BIT(16)
     54
     55#define US_MIN_CLK_DIV		0x06
     56#define US_MAX_CLK_DIV		BIT(16)
     57
     58#define US_RESET		(US_CR_RSTRX | US_CR_RSTTX)
     59#define US_DISABLE		(US_CR_RXDIS | US_CR_TXDIS)
     60#define US_ENABLE		(US_CR_RXEN | US_CR_TXEN)
     61#define US_OVRE_RXRDY_IRQS	(US_IR_OVRE | US_IR_RXRDY)
     62
     63#define US_INIT \
     64	(US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
     65#define US_DMA_MIN_BYTES       16
     66#define US_DMA_TIMEOUT         (msecs_to_jiffies(1000))
     67
     68/* Register access macros */
     69#define at91_usart_spi_readl(port, reg) \
     70	readl_relaxed((port)->regs + US_##reg)
     71#define at91_usart_spi_writel(port, reg, value) \
     72	writel_relaxed((value), (port)->regs + US_##reg)
     73
     74#define at91_usart_spi_readb(port, reg) \
     75	readb_relaxed((port)->regs + US_##reg)
     76#define at91_usart_spi_writeb(port, reg, value) \
     77	writeb_relaxed((value), (port)->regs + US_##reg)
     78
     79struct at91_usart_spi {
     80	struct platform_device  *mpdev;
     81	struct spi_transfer	*current_transfer;
     82	void __iomem		*regs;
     83	struct device		*dev;
     84	struct clk		*clk;
     85
     86	struct completion	xfer_completion;
     87
     88	/*used in interrupt to protect data reading*/
     89	spinlock_t		lock;
     90
     91	phys_addr_t		phybase;
     92
     93	int			irq;
     94	unsigned int		current_tx_remaining_bytes;
     95	unsigned int		current_rx_remaining_bytes;
     96
     97	u32			spi_clk;
     98	u32			status;
     99
    100	bool			xfer_failed;
    101	bool			use_dma;
    102};
    103
    104static void dma_callback(void *data)
    105{
    106	struct spi_controller   *ctlr = data;
    107	struct at91_usart_spi   *aus = spi_master_get_devdata(ctlr);
    108
    109	at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
    110	aus->current_rx_remaining_bytes = 0;
    111	complete(&aus->xfer_completion);
    112}
    113
    114static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
    115				   struct spi_device *spi,
    116				   struct spi_transfer *xfer)
    117{
    118	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
    119
    120	return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
    121}
    122
    123static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
    124					struct at91_usart_spi *aus)
    125{
    126	struct dma_slave_config slave_config;
    127	struct device *dev = &aus->mpdev->dev;
    128	phys_addr_t phybase = aus->phybase;
    129	dma_cap_mask_t mask;
    130	int err = 0;
    131
    132	dma_cap_zero(mask);
    133	dma_cap_set(DMA_SLAVE, mask);
    134
    135	ctlr->dma_tx = dma_request_chan(dev, "tx");
    136	if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
    137		if (IS_ERR(ctlr->dma_tx)) {
    138			err = PTR_ERR(ctlr->dma_tx);
    139			goto at91_usart_spi_error_clear;
    140		}
    141
    142		dev_dbg(dev,
    143			"DMA TX channel not available, SPI unable to use DMA\n");
    144		err = -EBUSY;
    145		goto at91_usart_spi_error_clear;
    146	}
    147
    148	ctlr->dma_rx = dma_request_chan(dev, "rx");
    149	if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
    150		if (IS_ERR(ctlr->dma_rx)) {
    151			err = PTR_ERR(ctlr->dma_rx);
    152			goto at91_usart_spi_error;
    153		}
    154
    155		dev_dbg(dev,
    156			"DMA RX channel not available, SPI unable to use DMA\n");
    157		err = -EBUSY;
    158		goto at91_usart_spi_error;
    159	}
    160
    161	slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    162	slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    163	slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
    164	slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
    165	slave_config.src_maxburst = 1;
    166	slave_config.dst_maxburst = 1;
    167	slave_config.device_fc = false;
    168
    169	slave_config.direction = DMA_DEV_TO_MEM;
    170	if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
    171		dev_err(&ctlr->dev,
    172			"failed to configure rx dma channel\n");
    173		err = -EINVAL;
    174		goto at91_usart_spi_error;
    175	}
    176
    177	slave_config.direction = DMA_MEM_TO_DEV;
    178	if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
    179		dev_err(&ctlr->dev,
    180			"failed to configure tx dma channel\n");
    181		err = -EINVAL;
    182		goto at91_usart_spi_error;
    183	}
    184
    185	aus->use_dma = true;
    186	return 0;
    187
    188at91_usart_spi_error:
    189	if (!IS_ERR_OR_NULL(ctlr->dma_tx))
    190		dma_release_channel(ctlr->dma_tx);
    191	if (!IS_ERR_OR_NULL(ctlr->dma_rx))
    192		dma_release_channel(ctlr->dma_rx);
    193	ctlr->dma_tx = NULL;
    194	ctlr->dma_rx = NULL;
    195
    196at91_usart_spi_error_clear:
    197	return err;
    198}
    199
    200static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
    201{
    202	if (ctlr->dma_rx)
    203		dma_release_channel(ctlr->dma_rx);
    204	if (ctlr->dma_tx)
    205		dma_release_channel(ctlr->dma_tx);
    206}
    207
    208static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
    209{
    210	if (ctlr->dma_rx)
    211		dmaengine_terminate_all(ctlr->dma_rx);
    212	if (ctlr->dma_tx)
    213		dmaengine_terminate_all(ctlr->dma_tx);
    214}
    215
    216static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
    217				       struct spi_transfer *xfer)
    218{
    219	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    220	struct dma_chan	 *rxchan = ctlr->dma_rx;
    221	struct dma_chan *txchan = ctlr->dma_tx;
    222	struct dma_async_tx_descriptor *rxdesc;
    223	struct dma_async_tx_descriptor *txdesc;
    224	dma_cookie_t cookie;
    225
    226	/* Disable RX interrupt */
    227	at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
    228
    229	rxdesc = dmaengine_prep_slave_sg(rxchan,
    230					 xfer->rx_sg.sgl,
    231					 xfer->rx_sg.nents,
    232					 DMA_DEV_TO_MEM,
    233					 DMA_PREP_INTERRUPT |
    234					 DMA_CTRL_ACK);
    235	if (!rxdesc)
    236		goto at91_usart_spi_err_dma;
    237
    238	txdesc = dmaengine_prep_slave_sg(txchan,
    239					 xfer->tx_sg.sgl,
    240					 xfer->tx_sg.nents,
    241					 DMA_MEM_TO_DEV,
    242					 DMA_PREP_INTERRUPT |
    243					 DMA_CTRL_ACK);
    244	if (!txdesc)
    245		goto at91_usart_spi_err_dma;
    246
    247	rxdesc->callback = dma_callback;
    248	rxdesc->callback_param = ctlr;
    249
    250	cookie = rxdesc->tx_submit(rxdesc);
    251	if (dma_submit_error(cookie))
    252		goto at91_usart_spi_err_dma;
    253
    254	cookie = txdesc->tx_submit(txdesc);
    255	if (dma_submit_error(cookie))
    256		goto at91_usart_spi_err_dma;
    257
    258	rxchan->device->device_issue_pending(rxchan);
    259	txchan->device->device_issue_pending(txchan);
    260
    261	return 0;
    262
    263at91_usart_spi_err_dma:
    264	/* Enable RX interrupt if something fails and fallback to PIO */
    265	at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
    266	at91_usart_spi_stop_dma(ctlr);
    267
    268	return -ENOMEM;
    269}
    270
    271static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
    272{
    273	return wait_for_completion_timeout(&aus->xfer_completion,
    274					   US_DMA_TIMEOUT);
    275}
    276
    277static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
    278{
    279	return aus->status & US_IR_TXRDY;
    280}
    281
    282static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
    283{
    284	return aus->status & US_IR_RXRDY;
    285}
    286
    287static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
    288{
    289	return aus->status & US_IR_OVRE;
    290}
    291
    292static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
    293{
    294	aus->status = at91_usart_spi_readl(aus, CSR);
    295	return aus->status;
    296}
    297
    298static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
    299{
    300	unsigned int len = aus->current_transfer->len;
    301	unsigned int remaining = aus->current_tx_remaining_bytes;
    302	const u8  *tx_buf = aus->current_transfer->tx_buf;
    303
    304	if (!remaining)
    305		return;
    306
    307	if (at91_usart_spi_tx_ready(aus)) {
    308		at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
    309		aus->current_tx_remaining_bytes--;
    310	}
    311}
    312
    313static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
    314{
    315	int len = aus->current_transfer->len;
    316	int remaining = aus->current_rx_remaining_bytes;
    317	u8  *rx_buf = aus->current_transfer->rx_buf;
    318
    319	if (!remaining)
    320		return;
    321
    322	rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
    323	aus->current_rx_remaining_bytes--;
    324}
    325
    326static inline void
    327at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
    328			      struct spi_transfer *xfer)
    329{
    330	at91_usart_spi_writel(aus, BRGR,
    331			      DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
    332}
    333
    334static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
    335{
    336	struct spi_controller *controller = dev_id;
    337	struct at91_usart_spi *aus = spi_master_get_devdata(controller);
    338
    339	spin_lock(&aus->lock);
    340	at91_usart_spi_read_status(aus);
    341
    342	if (at91_usart_spi_check_overrun(aus)) {
    343		aus->xfer_failed = true;
    344		at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
    345		spin_unlock(&aus->lock);
    346		return IRQ_HANDLED;
    347	}
    348
    349	if (at91_usart_spi_rx_ready(aus)) {
    350		at91_usart_spi_rx(aus);
    351		spin_unlock(&aus->lock);
    352		return IRQ_HANDLED;
    353	}
    354
    355	spin_unlock(&aus->lock);
    356
    357	return IRQ_NONE;
    358}
    359
    360static int at91_usart_spi_setup(struct spi_device *spi)
    361{
    362	struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
    363	u32 *ausd = spi->controller_state;
    364	unsigned int mr = at91_usart_spi_readl(aus, MR);
    365
    366	if (spi->mode & SPI_CPOL)
    367		mr |= US_MR_CPOL;
    368	else
    369		mr &= ~US_MR_CPOL;
    370
    371	if (spi->mode & SPI_CPHA)
    372		mr |= US_MR_CPHA;
    373	else
    374		mr &= ~US_MR_CPHA;
    375
    376	if (spi->mode & SPI_LOOP)
    377		mr |= US_MR_LOOP;
    378	else
    379		mr &= ~US_MR_LOOP;
    380
    381	if (!ausd) {
    382		ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
    383		if (!ausd)
    384			return -ENOMEM;
    385
    386		spi->controller_state = ausd;
    387	}
    388
    389	*ausd = mr;
    390
    391	dev_dbg(&spi->dev,
    392		"setup: bpw %u mode 0x%x -> mr %d %08x\n",
    393		spi->bits_per_word, spi->mode, spi->chip_select, mr);
    394
    395	return 0;
    396}
    397
    398static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
    399				       struct spi_device *spi,
    400				       struct spi_transfer *xfer)
    401{
    402	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    403	unsigned long dma_timeout = 0;
    404	int ret = 0;
    405
    406	at91_usart_spi_set_xfer_speed(aus, xfer);
    407	aus->xfer_failed = false;
    408	aus->current_transfer = xfer;
    409	aus->current_tx_remaining_bytes = xfer->len;
    410	aus->current_rx_remaining_bytes = xfer->len;
    411
    412	while ((aus->current_tx_remaining_bytes ||
    413		aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
    414		reinit_completion(&aus->xfer_completion);
    415		if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
    416		    !ret) {
    417			ret = at91_usart_spi_dma_transfer(ctlr, xfer);
    418			if (ret)
    419				continue;
    420
    421			dma_timeout = at91_usart_spi_dma_timeout(aus);
    422
    423			if (WARN_ON(dma_timeout == 0)) {
    424				dev_err(&spi->dev, "DMA transfer timeout\n");
    425				return -EIO;
    426			}
    427			aus->current_tx_remaining_bytes = 0;
    428		} else {
    429			at91_usart_spi_read_status(aus);
    430			at91_usart_spi_tx(aus);
    431		}
    432
    433		cpu_relax();
    434	}
    435
    436	if (aus->xfer_failed) {
    437		dev_err(aus->dev, "Overrun!\n");
    438		return -EIO;
    439	}
    440
    441	return 0;
    442}
    443
    444static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
    445					  struct spi_message *message)
    446{
    447	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    448	struct spi_device *spi = message->spi;
    449	u32 *ausd = spi->controller_state;
    450
    451	at91_usart_spi_writel(aus, CR, US_ENABLE);
    452	at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
    453	at91_usart_spi_writel(aus, MR, *ausd);
    454
    455	return 0;
    456}
    457
    458static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
    459					    struct spi_message *message)
    460{
    461	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    462
    463	at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
    464	at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
    465
    466	return 0;
    467}
    468
    469static void at91_usart_spi_cleanup(struct spi_device *spi)
    470{
    471	struct at91_usart_spi_device *ausd = spi->controller_state;
    472
    473	spi->controller_state = NULL;
    474	kfree(ausd);
    475}
    476
    477static void at91_usart_spi_init(struct at91_usart_spi *aus)
    478{
    479	at91_usart_spi_writel(aus, MR, US_INIT);
    480	at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
    481}
    482
    483static int at91_usart_gpio_setup(struct platform_device *pdev)
    484{
    485	struct gpio_descs *cs_gpios;
    486
    487	cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
    488
    489	if (IS_ERR(cs_gpios))
    490		return PTR_ERR(cs_gpios);
    491
    492	return 0;
    493}
    494
    495static int at91_usart_spi_probe(struct platform_device *pdev)
    496{
    497	struct resource *regs;
    498	struct spi_controller *controller;
    499	struct at91_usart_spi *aus;
    500	struct clk *clk;
    501	int irq;
    502	int ret;
    503
    504	regs = platform_get_resource(to_platform_device(pdev->dev.parent),
    505				     IORESOURCE_MEM, 0);
    506	if (!regs)
    507		return -EINVAL;
    508
    509	irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
    510	if (irq < 0)
    511		return irq;
    512
    513	clk = devm_clk_get(pdev->dev.parent, "usart");
    514	if (IS_ERR(clk))
    515		return PTR_ERR(clk);
    516
    517	ret = -ENOMEM;
    518	controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
    519	if (!controller)
    520		goto at91_usart_spi_probe_fail;
    521
    522	ret = at91_usart_gpio_setup(pdev);
    523	if (ret)
    524		goto at91_usart_spi_probe_fail;
    525
    526	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
    527	controller->dev.of_node = pdev->dev.parent->of_node;
    528	controller->bits_per_word_mask = SPI_BPW_MASK(8);
    529	controller->setup = at91_usart_spi_setup;
    530	controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
    531	controller->transfer_one = at91_usart_spi_transfer_one;
    532	controller->prepare_message = at91_usart_spi_prepare_message;
    533	controller->unprepare_message = at91_usart_spi_unprepare_message;
    534	controller->can_dma = at91_usart_spi_can_dma;
    535	controller->cleanup = at91_usart_spi_cleanup;
    536	controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
    537						US_MIN_CLK_DIV);
    538	controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
    539						US_MAX_CLK_DIV);
    540	platform_set_drvdata(pdev, controller);
    541
    542	aus = spi_master_get_devdata(controller);
    543
    544	aus->dev = &pdev->dev;
    545	aus->regs = devm_ioremap_resource(&pdev->dev, regs);
    546	if (IS_ERR(aus->regs)) {
    547		ret = PTR_ERR(aus->regs);
    548		goto at91_usart_spi_probe_fail;
    549	}
    550
    551	aus->irq = irq;
    552	aus->clk = clk;
    553
    554	ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
    555			       dev_name(&pdev->dev), controller);
    556	if (ret)
    557		goto at91_usart_spi_probe_fail;
    558
    559	ret = clk_prepare_enable(clk);
    560	if (ret)
    561		goto at91_usart_spi_probe_fail;
    562
    563	aus->spi_clk = clk_get_rate(clk);
    564	at91_usart_spi_init(aus);
    565
    566	aus->phybase = regs->start;
    567
    568	aus->mpdev = to_platform_device(pdev->dev.parent);
    569
    570	ret = at91_usart_spi_configure_dma(controller, aus);
    571	if (ret)
    572		goto at91_usart_fail_dma;
    573
    574	spin_lock_init(&aus->lock);
    575	init_completion(&aus->xfer_completion);
    576
    577	ret = devm_spi_register_master(&pdev->dev, controller);
    578	if (ret)
    579		goto at91_usart_fail_register_master;
    580
    581	dev_info(&pdev->dev,
    582		 "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
    583		 at91_usart_spi_readl(aus, VERSION),
    584		 &regs->start, irq);
    585
    586	return 0;
    587
    588at91_usart_fail_register_master:
    589	at91_usart_spi_release_dma(controller);
    590at91_usart_fail_dma:
    591	clk_disable_unprepare(clk);
    592at91_usart_spi_probe_fail:
    593	spi_master_put(controller);
    594	return ret;
    595}
    596
    597__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
    598{
    599	struct spi_controller *ctlr = dev_get_drvdata(dev);
    600	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    601
    602	clk_disable_unprepare(aus->clk);
    603	pinctrl_pm_select_sleep_state(dev);
    604
    605	return 0;
    606}
    607
    608__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
    609{
    610	struct spi_controller *ctrl = dev_get_drvdata(dev);
    611	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
    612
    613	pinctrl_pm_select_default_state(dev);
    614
    615	return clk_prepare_enable(aus->clk);
    616}
    617
    618__maybe_unused static int at91_usart_spi_suspend(struct device *dev)
    619{
    620	struct spi_controller *ctrl = dev_get_drvdata(dev);
    621	int ret;
    622
    623	ret = spi_controller_suspend(ctrl);
    624	if (ret)
    625		return ret;
    626
    627	if (!pm_runtime_suspended(dev))
    628		at91_usart_spi_runtime_suspend(dev);
    629
    630	return 0;
    631}
    632
    633__maybe_unused static int at91_usart_spi_resume(struct device *dev)
    634{
    635	struct spi_controller *ctrl = dev_get_drvdata(dev);
    636	struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
    637	int ret;
    638
    639	if (!pm_runtime_suspended(dev)) {
    640		ret = at91_usart_spi_runtime_resume(dev);
    641		if (ret)
    642			return ret;
    643	}
    644
    645	at91_usart_spi_init(aus);
    646
    647	return spi_controller_resume(ctrl);
    648}
    649
    650static int at91_usart_spi_remove(struct platform_device *pdev)
    651{
    652	struct spi_controller *ctlr = platform_get_drvdata(pdev);
    653	struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
    654
    655	at91_usart_spi_release_dma(ctlr);
    656	clk_disable_unprepare(aus->clk);
    657
    658	return 0;
    659}
    660
    661static const struct dev_pm_ops at91_usart_spi_pm_ops = {
    662	SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
    663	SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
    664			   at91_usart_spi_runtime_resume, NULL)
    665};
    666
    667static struct platform_driver at91_usart_spi_driver = {
    668	.driver = {
    669		.name = "at91_usart_spi",
    670		.pm = &at91_usart_spi_pm_ops,
    671	},
    672	.probe = at91_usart_spi_probe,
    673	.remove = at91_usart_spi_remove,
    674};
    675
    676module_platform_driver(at91_usart_spi_driver);
    677
    678MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
    679MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
    680MODULE_LICENSE("GPL v2");
    681MODULE_ALIAS("platform:at91_usart_spi");