cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-lantiq-ssc.c (28051B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2011-2015 Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
      4 * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de>
      5 */
      6
      7#include <linux/kernel.h>
      8#include <linux/module.h>
      9#include <linux/of_device.h>
     10#include <linux/clk.h>
     11#include <linux/io.h>
     12#include <linux/delay.h>
     13#include <linux/interrupt.h>
     14#include <linux/sched.h>
     15#include <linux/completion.h>
     16#include <linux/spinlock.h>
     17#include <linux/err.h>
     18#include <linux/pm_runtime.h>
     19#include <linux/spi/spi.h>
     20
     21#ifdef CONFIG_LANTIQ
     22#include <lantiq_soc.h>
     23#endif
     24
     25#define LTQ_SPI_RX_IRQ_NAME	"spi_rx"
     26#define LTQ_SPI_TX_IRQ_NAME	"spi_tx"
     27#define LTQ_SPI_ERR_IRQ_NAME	"spi_err"
     28#define LTQ_SPI_FRM_IRQ_NAME	"spi_frm"
     29
     30#define LTQ_SPI_CLC		0x00
     31#define LTQ_SPI_PISEL		0x04
     32#define LTQ_SPI_ID		0x08
     33#define LTQ_SPI_CON		0x10
     34#define LTQ_SPI_STAT		0x14
     35#define LTQ_SPI_WHBSTATE	0x18
     36#define LTQ_SPI_TB		0x20
     37#define LTQ_SPI_RB		0x24
     38#define LTQ_SPI_RXFCON		0x30
     39#define LTQ_SPI_TXFCON		0x34
     40#define LTQ_SPI_FSTAT		0x38
     41#define LTQ_SPI_BRT		0x40
     42#define LTQ_SPI_BRSTAT		0x44
     43#define LTQ_SPI_SFCON		0x60
     44#define LTQ_SPI_SFSTAT		0x64
     45#define LTQ_SPI_GPOCON		0x70
     46#define LTQ_SPI_GPOSTAT		0x74
     47#define LTQ_SPI_FPGO		0x78
     48#define LTQ_SPI_RXREQ		0x80
     49#define LTQ_SPI_RXCNT		0x84
     50#define LTQ_SPI_DMACON		0xec
     51#define LTQ_SPI_IRNEN		0xf4
     52
     53#define LTQ_SPI_CLC_SMC_S	16	/* Clock divider for sleep mode */
     54#define LTQ_SPI_CLC_SMC_M	(0xFF << LTQ_SPI_CLC_SMC_S)
     55#define LTQ_SPI_CLC_RMC_S	8	/* Clock divider for normal run mode */
     56#define LTQ_SPI_CLC_RMC_M	(0xFF << LTQ_SPI_CLC_RMC_S)
     57#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
     58#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
     59
     60#define LTQ_SPI_ID_TXFS_S	24	/* Implemented TX FIFO size */
     61#define LTQ_SPI_ID_RXFS_S	16	/* Implemented RX FIFO size */
     62#define LTQ_SPI_ID_MOD_S	8	/* Module ID */
     63#define LTQ_SPI_ID_MOD_M	(0xff << LTQ_SPI_ID_MOD_S)
     64#define LTQ_SPI_ID_CFG_S	5	/* DMA interface support */
     65#define LTQ_SPI_ID_CFG_M	(1 << LTQ_SPI_ID_CFG_S)
     66#define LTQ_SPI_ID_REV_M	0x1F	/* Hardware revision number */
     67
     68#define LTQ_SPI_CON_BM_S	16	/* Data width selection */
     69#define LTQ_SPI_CON_BM_M	(0x1F << LTQ_SPI_CON_BM_S)
     70#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
     71#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
     72#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
     73#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
     74#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
     75#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
     76#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
     77#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
     78#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
     79#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
     80#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
     81#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
     82#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
     83#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
     84
     85#define LTQ_SPI_STAT_RXBV_S	28
     86#define LTQ_SPI_STAT_RXBV_M	(0x7 << LTQ_SPI_STAT_RXBV_S)
     87#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
     88#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
     89#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
     90#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
     91#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
     92#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
     93#define LTQ_SPI_STAT_ME		BIT(7)	/* Mode error flag */
     94#define LTQ_SPI_STAT_MS		BIT(1)	/* Master/slave select bit */
     95#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
     96#define LTQ_SPI_STAT_ERRORS	(LTQ_SPI_STAT_ME | LTQ_SPI_STAT_TE | \
     97				 LTQ_SPI_STAT_RE | LTQ_SPI_STAT_AE | \
     98				 LTQ_SPI_STAT_TUE | LTQ_SPI_STAT_RUE)
     99
    100#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
    101#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
    102#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
    103#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
    104#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error flag */
    105#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
    106#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
    107#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
    108#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
    109#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
    110#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
    111#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
    112#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set master select bit */
    113#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear master select bit */
    114#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
    115#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
    116#define LTQ_SPI_WHBSTATE_CLR_ERRORS	(LTQ_SPI_WHBSTATE_CLRRUE | \
    117					 LTQ_SPI_WHBSTATE_CLRME | \
    118					 LTQ_SPI_WHBSTATE_CLRTE | \
    119					 LTQ_SPI_WHBSTATE_CLRRE | \
    120					 LTQ_SPI_WHBSTATE_CLRAE | \
    121					 LTQ_SPI_WHBSTATE_CLRTUE)
    122
    123#define LTQ_SPI_RXFCON_RXFITL_S	8	/* FIFO interrupt trigger level */
    124#define LTQ_SPI_RXFCON_RXFLU	BIT(1)	/* FIFO flush */
    125#define LTQ_SPI_RXFCON_RXFEN	BIT(0)	/* FIFO enable */
    126
    127#define LTQ_SPI_TXFCON_TXFITL_S	8	/* FIFO interrupt trigger level */
    128#define LTQ_SPI_TXFCON_TXFLU	BIT(1)	/* FIFO flush */
    129#define LTQ_SPI_TXFCON_TXFEN	BIT(0)	/* FIFO enable */
    130
    131#define LTQ_SPI_FSTAT_RXFFL_S	0
    132#define LTQ_SPI_FSTAT_TXFFL_S	8
    133
    134#define LTQ_SPI_GPOCON_ISCSBN_S	8
    135#define LTQ_SPI_GPOCON_INVOUTN_S	0
    136
    137#define LTQ_SPI_FGPO_SETOUTN_S	8
    138#define LTQ_SPI_FGPO_CLROUTN_S	0
    139
    140#define LTQ_SPI_RXREQ_RXCNT_M	0xFFFF	/* Receive count value */
    141#define LTQ_SPI_RXCNT_TODO_M	0xFFFF	/* Recevie to-do value */
    142
    143#define LTQ_SPI_IRNEN_TFI	BIT(4)	/* TX finished interrupt */
    144#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
    145#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
    146#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)	/* Transmit end interrupt request */
    147#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)	/* Receive end interrupt request */
    148#define LTQ_SPI_IRNEN_R_XRX	BIT(1)	/* Transmit end interrupt request */
    149#define LTQ_SPI_IRNEN_T_XRX	BIT(0)	/* Receive end interrupt request */
    150#define LTQ_SPI_IRNEN_ALL	0x1F
    151
    152struct lantiq_ssc_spi;
    153
    154struct lantiq_ssc_hwcfg {
    155	int (*cfg_irq)(struct platform_device *pdev, struct lantiq_ssc_spi *spi);
    156	unsigned int	irnen_r;
    157	unsigned int	irnen_t;
    158	unsigned int	irncr;
    159	unsigned int	irnicr;
    160	bool		irq_ack;
    161	u32		fifo_size_mask;
    162};
    163
    164struct lantiq_ssc_spi {
    165	struct spi_master		*master;
    166	struct device			*dev;
    167	void __iomem			*regbase;
    168	struct clk			*spi_clk;
    169	struct clk			*fpi_clk;
    170	const struct lantiq_ssc_hwcfg	*hwcfg;
    171
    172	spinlock_t			lock;
    173	struct workqueue_struct		*wq;
    174	struct work_struct		work;
    175
    176	const u8			*tx;
    177	u8				*rx;
    178	unsigned int			tx_todo;
    179	unsigned int			rx_todo;
    180	unsigned int			bits_per_word;
    181	unsigned int			speed_hz;
    182	unsigned int			tx_fifo_size;
    183	unsigned int			rx_fifo_size;
    184	unsigned int			base_cs;
    185	unsigned int			fdx_tx_level;
    186};
    187
    188static u32 lantiq_ssc_readl(const struct lantiq_ssc_spi *spi, u32 reg)
    189{
    190	return __raw_readl(spi->regbase + reg);
    191}
    192
    193static void lantiq_ssc_writel(const struct lantiq_ssc_spi *spi, u32 val,
    194			      u32 reg)
    195{
    196	__raw_writel(val, spi->regbase + reg);
    197}
    198
    199static void lantiq_ssc_maskl(const struct lantiq_ssc_spi *spi, u32 clr,
    200			     u32 set, u32 reg)
    201{
    202	u32 val = __raw_readl(spi->regbase + reg);
    203
    204	val &= ~clr;
    205	val |= set;
    206	__raw_writel(val, spi->regbase + reg);
    207}
    208
    209static unsigned int tx_fifo_level(const struct lantiq_ssc_spi *spi)
    210{
    211	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    212	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
    213
    214	return (fstat >> LTQ_SPI_FSTAT_TXFFL_S) & hwcfg->fifo_size_mask;
    215}
    216
    217static unsigned int rx_fifo_level(const struct lantiq_ssc_spi *spi)
    218{
    219	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    220	u32 fstat = lantiq_ssc_readl(spi, LTQ_SPI_FSTAT);
    221
    222	return (fstat >> LTQ_SPI_FSTAT_RXFFL_S) & hwcfg->fifo_size_mask;
    223}
    224
    225static unsigned int tx_fifo_free(const struct lantiq_ssc_spi *spi)
    226{
    227	return spi->tx_fifo_size - tx_fifo_level(spi);
    228}
    229
    230static void rx_fifo_reset(const struct lantiq_ssc_spi *spi)
    231{
    232	u32 val = spi->rx_fifo_size << LTQ_SPI_RXFCON_RXFITL_S;
    233
    234	val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
    235	lantiq_ssc_writel(spi, val, LTQ_SPI_RXFCON);
    236}
    237
    238static void tx_fifo_reset(const struct lantiq_ssc_spi *spi)
    239{
    240	u32 val = 1 << LTQ_SPI_TXFCON_TXFITL_S;
    241
    242	val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
    243	lantiq_ssc_writel(spi, val, LTQ_SPI_TXFCON);
    244}
    245
    246static void rx_fifo_flush(const struct lantiq_ssc_spi *spi)
    247{
    248	lantiq_ssc_maskl(spi, 0, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
    249}
    250
    251static void tx_fifo_flush(const struct lantiq_ssc_spi *spi)
    252{
    253	lantiq_ssc_maskl(spi, 0, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
    254}
    255
    256static void hw_enter_config_mode(const struct lantiq_ssc_spi *spi)
    257{
    258	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
    259}
    260
    261static void hw_enter_active_mode(const struct lantiq_ssc_spi *spi)
    262{
    263	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
    264}
    265
    266static void hw_setup_speed_hz(const struct lantiq_ssc_spi *spi,
    267			      unsigned int max_speed_hz)
    268{
    269	u32 spi_clk, brt;
    270
    271	/*
    272	 * SPI module clock is derived from FPI bus clock dependent on
    273	 * divider value in CLC.RMS which is always set to 1.
    274	 *
    275	 *                 f_SPI
    276	 * baudrate = --------------
    277	 *             2 * (BR + 1)
    278	 */
    279	spi_clk = clk_get_rate(spi->fpi_clk) / 2;
    280
    281	if (max_speed_hz > spi_clk)
    282		brt = 0;
    283	else
    284		brt = spi_clk / max_speed_hz - 1;
    285
    286	if (brt > 0xFFFF)
    287		brt = 0xFFFF;
    288
    289	dev_dbg(spi->dev, "spi_clk %u, max_speed_hz %u, brt %u\n",
    290		spi_clk, max_speed_hz, brt);
    291
    292	lantiq_ssc_writel(spi, brt, LTQ_SPI_BRT);
    293}
    294
    295static void hw_setup_bits_per_word(const struct lantiq_ssc_spi *spi,
    296				   unsigned int bits_per_word)
    297{
    298	u32 bm;
    299
    300	/* CON.BM value = bits_per_word - 1 */
    301	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_S;
    302
    303	lantiq_ssc_maskl(spi, LTQ_SPI_CON_BM_M, bm, LTQ_SPI_CON);
    304}
    305
    306static void hw_setup_clock_mode(const struct lantiq_ssc_spi *spi,
    307				unsigned int mode)
    308{
    309	u32 con_set = 0, con_clr = 0;
    310
    311	/*
    312	 * SPI mode mapping in CON register:
    313	 * Mode CPOL CPHA CON.PO CON.PH
    314	 *  0    0    0      0      1
    315	 *  1    0    1      0      0
    316	 *  2    1    0      1      1
    317	 *  3    1    1      1      0
    318	 */
    319	if (mode & SPI_CPHA)
    320		con_clr |= LTQ_SPI_CON_PH;
    321	else
    322		con_set |= LTQ_SPI_CON_PH;
    323
    324	if (mode & SPI_CPOL)
    325		con_set |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
    326	else
    327		con_clr |= LTQ_SPI_CON_PO | LTQ_SPI_CON_IDLE;
    328
    329	/* Set heading control */
    330	if (mode & SPI_LSB_FIRST)
    331		con_clr |= LTQ_SPI_CON_HB;
    332	else
    333		con_set |= LTQ_SPI_CON_HB;
    334
    335	/* Set loopback mode */
    336	if (mode & SPI_LOOP)
    337		con_set |= LTQ_SPI_CON_LB;
    338	else
    339		con_clr |= LTQ_SPI_CON_LB;
    340
    341	lantiq_ssc_maskl(spi, con_clr, con_set, LTQ_SPI_CON);
    342}
    343
    344static void lantiq_ssc_hw_init(const struct lantiq_ssc_spi *spi)
    345{
    346	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    347
    348	/*
    349	 * Set clock divider for run mode to 1 to
    350	 * run at same frequency as FPI bus
    351	 */
    352	lantiq_ssc_writel(spi, 1 << LTQ_SPI_CLC_RMC_S, LTQ_SPI_CLC);
    353
    354	/* Put controller into config mode */
    355	hw_enter_config_mode(spi);
    356
    357	/* Clear error flags */
    358	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
    359
    360	/* Enable error checking, disable TX/RX */
    361	lantiq_ssc_writel(spi, LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
    362		LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN | LTQ_SPI_CON_TXOFF |
    363		LTQ_SPI_CON_RXOFF, LTQ_SPI_CON);
    364
    365	/* Setup default SPI mode */
    366	hw_setup_bits_per_word(spi, spi->bits_per_word);
    367	hw_setup_clock_mode(spi, SPI_MODE_0);
    368
    369	/* Enable master mode and clear error flags */
    370	lantiq_ssc_writel(spi, LTQ_SPI_WHBSTATE_SETMS |
    371			       LTQ_SPI_WHBSTATE_CLR_ERRORS,
    372			       LTQ_SPI_WHBSTATE);
    373
    374	/* Reset GPIO/CS registers */
    375	lantiq_ssc_writel(spi, 0, LTQ_SPI_GPOCON);
    376	lantiq_ssc_writel(spi, 0xFF00, LTQ_SPI_FPGO);
    377
    378	/* Enable and flush FIFOs */
    379	rx_fifo_reset(spi);
    380	tx_fifo_reset(spi);
    381
    382	/* Enable interrupts */
    383	lantiq_ssc_writel(spi, hwcfg->irnen_t | hwcfg->irnen_r |
    384			  LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
    385}
    386
    387static int lantiq_ssc_setup(struct spi_device *spidev)
    388{
    389	struct spi_master *master = spidev->master;
    390	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
    391	unsigned int cs = spidev->chip_select;
    392	u32 gpocon;
    393
    394	/* GPIOs are used for CS */
    395	if (spidev->cs_gpiod)
    396		return 0;
    397
    398	dev_dbg(spi->dev, "using internal chipselect %u\n", cs);
    399
    400	if (cs < spi->base_cs) {
    401		dev_err(spi->dev,
    402			"chipselect %i too small (min %i)\n", cs, spi->base_cs);
    403		return -EINVAL;
    404	}
    405
    406	/* set GPO pin to CS mode */
    407	gpocon = 1 << ((cs - spi->base_cs) + LTQ_SPI_GPOCON_ISCSBN_S);
    408
    409	/* invert GPO pin */
    410	if (spidev->mode & SPI_CS_HIGH)
    411		gpocon |= 1 << (cs - spi->base_cs);
    412
    413	lantiq_ssc_maskl(spi, 0, gpocon, LTQ_SPI_GPOCON);
    414
    415	return 0;
    416}
    417
    418static int lantiq_ssc_prepare_message(struct spi_master *master,
    419				      struct spi_message *message)
    420{
    421	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
    422
    423	hw_enter_config_mode(spi);
    424	hw_setup_clock_mode(spi, message->spi->mode);
    425	hw_enter_active_mode(spi);
    426
    427	return 0;
    428}
    429
    430static void hw_setup_transfer(struct lantiq_ssc_spi *spi,
    431			      struct spi_device *spidev, struct spi_transfer *t)
    432{
    433	unsigned int speed_hz = t->speed_hz;
    434	unsigned int bits_per_word = t->bits_per_word;
    435	u32 con;
    436
    437	if (bits_per_word != spi->bits_per_word ||
    438		speed_hz != spi->speed_hz) {
    439		hw_enter_config_mode(spi);
    440		hw_setup_speed_hz(spi, speed_hz);
    441		hw_setup_bits_per_word(spi, bits_per_word);
    442		hw_enter_active_mode(spi);
    443
    444		spi->speed_hz = speed_hz;
    445		spi->bits_per_word = bits_per_word;
    446	}
    447
    448	/* Configure transmitter and receiver */
    449	con = lantiq_ssc_readl(spi, LTQ_SPI_CON);
    450	if (t->tx_buf)
    451		con &= ~LTQ_SPI_CON_TXOFF;
    452	else
    453		con |= LTQ_SPI_CON_TXOFF;
    454
    455	if (t->rx_buf)
    456		con &= ~LTQ_SPI_CON_RXOFF;
    457	else
    458		con |= LTQ_SPI_CON_RXOFF;
    459
    460	lantiq_ssc_writel(spi, con, LTQ_SPI_CON);
    461}
    462
    463static int lantiq_ssc_unprepare_message(struct spi_master *master,
    464					struct spi_message *message)
    465{
    466	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
    467
    468	flush_workqueue(spi->wq);
    469
    470	/* Disable transmitter and receiver while idle */
    471	lantiq_ssc_maskl(spi, 0, LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF,
    472			 LTQ_SPI_CON);
    473
    474	return 0;
    475}
    476
    477static void tx_fifo_write(struct lantiq_ssc_spi *spi)
    478{
    479	const u8 *tx8;
    480	const u16 *tx16;
    481	const u32 *tx32;
    482	u32 data;
    483	unsigned int tx_free = tx_fifo_free(spi);
    484
    485	spi->fdx_tx_level = 0;
    486	while (spi->tx_todo && tx_free) {
    487		switch (spi->bits_per_word) {
    488		case 2 ... 8:
    489			tx8 = spi->tx;
    490			data = *tx8;
    491			spi->tx_todo--;
    492			spi->tx++;
    493			break;
    494		case 16:
    495			tx16 = (u16 *) spi->tx;
    496			data = *tx16;
    497			spi->tx_todo -= 2;
    498			spi->tx += 2;
    499			break;
    500		case 32:
    501			tx32 = (u32 *) spi->tx;
    502			data = *tx32;
    503			spi->tx_todo -= 4;
    504			spi->tx += 4;
    505			break;
    506		default:
    507			WARN_ON(1);
    508			data = 0;
    509			break;
    510		}
    511
    512		lantiq_ssc_writel(spi, data, LTQ_SPI_TB);
    513		tx_free--;
    514		spi->fdx_tx_level++;
    515	}
    516}
    517
    518static void rx_fifo_read_full_duplex(struct lantiq_ssc_spi *spi)
    519{
    520	u8 *rx8;
    521	u16 *rx16;
    522	u32 *rx32;
    523	u32 data;
    524	unsigned int rx_fill = rx_fifo_level(spi);
    525
    526	/*
    527	 * Wait until all expected data to be shifted in.
    528	 * Otherwise, rx overrun may occur.
    529	 */
    530	while (rx_fill != spi->fdx_tx_level)
    531		rx_fill = rx_fifo_level(spi);
    532
    533	while (rx_fill) {
    534		data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
    535
    536		switch (spi->bits_per_word) {
    537		case 2 ... 8:
    538			rx8 = spi->rx;
    539			*rx8 = data;
    540			spi->rx_todo--;
    541			spi->rx++;
    542			break;
    543		case 16:
    544			rx16 = (u16 *) spi->rx;
    545			*rx16 = data;
    546			spi->rx_todo -= 2;
    547			spi->rx += 2;
    548			break;
    549		case 32:
    550			rx32 = (u32 *) spi->rx;
    551			*rx32 = data;
    552			spi->rx_todo -= 4;
    553			spi->rx += 4;
    554			break;
    555		default:
    556			WARN_ON(1);
    557			break;
    558		}
    559
    560		rx_fill--;
    561	}
    562}
    563
    564static void rx_fifo_read_half_duplex(struct lantiq_ssc_spi *spi)
    565{
    566	u32 data, *rx32;
    567	u8 *rx8;
    568	unsigned int rxbv, shift;
    569	unsigned int rx_fill = rx_fifo_level(spi);
    570
    571	/*
    572	 * In RX-only mode the bits per word value is ignored by HW. A value
    573	 * of 32 is used instead. Thus all 4 bytes per FIFO must be read.
    574	 * If remaining RX bytes are less than 4, the FIFO must be read
    575	 * differently. The amount of received and valid bytes is indicated
    576	 * by STAT.RXBV register value.
    577	 */
    578	while (rx_fill) {
    579		if (spi->rx_todo < 4)  {
    580			rxbv = (lantiq_ssc_readl(spi, LTQ_SPI_STAT) &
    581				LTQ_SPI_STAT_RXBV_M) >> LTQ_SPI_STAT_RXBV_S;
    582			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
    583
    584			shift = (rxbv - 1) * 8;
    585			rx8 = spi->rx;
    586
    587			while (rxbv) {
    588				*rx8++ = (data >> shift) & 0xFF;
    589				rxbv--;
    590				shift -= 8;
    591				spi->rx_todo--;
    592				spi->rx++;
    593			}
    594		} else {
    595			data = lantiq_ssc_readl(spi, LTQ_SPI_RB);
    596			rx32 = (u32 *) spi->rx;
    597
    598			*rx32++ = data;
    599			spi->rx_todo -= 4;
    600			spi->rx += 4;
    601		}
    602		rx_fill--;
    603	}
    604}
    605
    606static void rx_request(struct lantiq_ssc_spi *spi)
    607{
    608	unsigned int rxreq, rxreq_max;
    609
    610	/*
    611	 * To avoid receive overflows at high clocks it is better to request
    612	 * only the amount of bytes that fits into all FIFOs. This value
    613	 * depends on the FIFO size implemented in hardware.
    614	 */
    615	rxreq = spi->rx_todo;
    616	rxreq_max = spi->rx_fifo_size * 4;
    617	if (rxreq > rxreq_max)
    618		rxreq = rxreq_max;
    619
    620	lantiq_ssc_writel(spi, rxreq, LTQ_SPI_RXREQ);
    621}
    622
    623static irqreturn_t lantiq_ssc_xmit_interrupt(int irq, void *data)
    624{
    625	struct lantiq_ssc_spi *spi = data;
    626	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    627	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
    628
    629	spin_lock(&spi->lock);
    630	if (hwcfg->irq_ack)
    631		lantiq_ssc_writel(spi, val, hwcfg->irncr);
    632
    633	if (spi->tx) {
    634		if (spi->rx && spi->rx_todo)
    635			rx_fifo_read_full_duplex(spi);
    636
    637		if (spi->tx_todo)
    638			tx_fifo_write(spi);
    639		else if (!tx_fifo_level(spi))
    640			goto completed;
    641	} else if (spi->rx) {
    642		if (spi->rx_todo) {
    643			rx_fifo_read_half_duplex(spi);
    644
    645			if (spi->rx_todo)
    646				rx_request(spi);
    647			else
    648				goto completed;
    649		} else {
    650			goto completed;
    651		}
    652	}
    653
    654	spin_unlock(&spi->lock);
    655	return IRQ_HANDLED;
    656
    657completed:
    658	queue_work(spi->wq, &spi->work);
    659	spin_unlock(&spi->lock);
    660
    661	return IRQ_HANDLED;
    662}
    663
    664static irqreturn_t lantiq_ssc_err_interrupt(int irq, void *data)
    665{
    666	struct lantiq_ssc_spi *spi = data;
    667	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    668	u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
    669	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
    670
    671	if (!(stat & LTQ_SPI_STAT_ERRORS))
    672		return IRQ_NONE;
    673
    674	spin_lock(&spi->lock);
    675	if (hwcfg->irq_ack)
    676		lantiq_ssc_writel(spi, val, hwcfg->irncr);
    677
    678	if (stat & LTQ_SPI_STAT_RUE)
    679		dev_err(spi->dev, "receive underflow error\n");
    680	if (stat & LTQ_SPI_STAT_TUE)
    681		dev_err(spi->dev, "transmit underflow error\n");
    682	if (stat & LTQ_SPI_STAT_AE)
    683		dev_err(spi->dev, "abort error\n");
    684	if (stat & LTQ_SPI_STAT_RE)
    685		dev_err(spi->dev, "receive overflow error\n");
    686	if (stat & LTQ_SPI_STAT_TE)
    687		dev_err(spi->dev, "transmit overflow error\n");
    688	if (stat & LTQ_SPI_STAT_ME)
    689		dev_err(spi->dev, "mode error\n");
    690
    691	/* Clear error flags */
    692	lantiq_ssc_maskl(spi, 0, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
    693
    694	/* set bad status so it can be retried */
    695	if (spi->master->cur_msg)
    696		spi->master->cur_msg->status = -EIO;
    697	queue_work(spi->wq, &spi->work);
    698	spin_unlock(&spi->lock);
    699
    700	return IRQ_HANDLED;
    701}
    702
    703static irqreturn_t intel_lgm_ssc_isr(int irq, void *data)
    704{
    705	struct lantiq_ssc_spi *spi = data;
    706	const struct lantiq_ssc_hwcfg *hwcfg = spi->hwcfg;
    707	u32 val = lantiq_ssc_readl(spi, hwcfg->irncr);
    708
    709	if (!(val & LTQ_SPI_IRNEN_ALL))
    710		return IRQ_NONE;
    711
    712	if (val & LTQ_SPI_IRNEN_E)
    713		return lantiq_ssc_err_interrupt(irq, data);
    714
    715	if ((val & hwcfg->irnen_t) || (val & hwcfg->irnen_r))
    716		return lantiq_ssc_xmit_interrupt(irq, data);
    717
    718	return IRQ_HANDLED;
    719}
    720
    721static int transfer_start(struct lantiq_ssc_spi *spi, struct spi_device *spidev,
    722			  struct spi_transfer *t)
    723{
    724	unsigned long flags;
    725
    726	spin_lock_irqsave(&spi->lock, flags);
    727
    728	spi->tx = t->tx_buf;
    729	spi->rx = t->rx_buf;
    730
    731	if (t->tx_buf) {
    732		spi->tx_todo = t->len;
    733
    734		/* initially fill TX FIFO */
    735		tx_fifo_write(spi);
    736	}
    737
    738	if (spi->rx) {
    739		spi->rx_todo = t->len;
    740
    741		/* start shift clock in RX-only mode */
    742		if (!spi->tx)
    743			rx_request(spi);
    744	}
    745
    746	spin_unlock_irqrestore(&spi->lock, flags);
    747
    748	return t->len;
    749}
    750
    751/*
    752 * The driver only gets an interrupt when the FIFO is empty, but there
    753 * is an additional shift register from which the data is written to
    754 * the wire. We get the last interrupt when the controller starts to
    755 * write the last word to the wire, not when it is finished. Do busy
    756 * waiting till it finishes.
    757 */
    758static void lantiq_ssc_bussy_work(struct work_struct *work)
    759{
    760	struct lantiq_ssc_spi *spi;
    761	unsigned long long timeout = 8LL * 1000LL;
    762	unsigned long end;
    763
    764	spi = container_of(work, typeof(*spi), work);
    765
    766	do_div(timeout, spi->speed_hz);
    767	timeout += timeout + 100; /* some tolerance */
    768
    769	end = jiffies + msecs_to_jiffies(timeout);
    770	do {
    771		u32 stat = lantiq_ssc_readl(spi, LTQ_SPI_STAT);
    772
    773		if (!(stat & LTQ_SPI_STAT_BSY)) {
    774			spi_finalize_current_transfer(spi->master);
    775			return;
    776		}
    777
    778		cond_resched();
    779	} while (!time_after_eq(jiffies, end));
    780
    781	if (spi->master->cur_msg)
    782		spi->master->cur_msg->status = -EIO;
    783	spi_finalize_current_transfer(spi->master);
    784}
    785
    786static void lantiq_ssc_handle_err(struct spi_master *master,
    787				  struct spi_message *message)
    788{
    789	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
    790
    791	/* flush FIFOs on timeout */
    792	rx_fifo_flush(spi);
    793	tx_fifo_flush(spi);
    794}
    795
    796static void lantiq_ssc_set_cs(struct spi_device *spidev, bool enable)
    797{
    798	struct lantiq_ssc_spi *spi = spi_master_get_devdata(spidev->master);
    799	unsigned int cs = spidev->chip_select;
    800	u32 fgpo;
    801
    802	if (!!(spidev->mode & SPI_CS_HIGH) == enable)
    803		fgpo = (1 << (cs - spi->base_cs));
    804	else
    805		fgpo = (1 << (cs - spi->base_cs + LTQ_SPI_FGPO_SETOUTN_S));
    806
    807	lantiq_ssc_writel(spi, fgpo, LTQ_SPI_FPGO);
    808}
    809
    810static int lantiq_ssc_transfer_one(struct spi_master *master,
    811				   struct spi_device *spidev,
    812				   struct spi_transfer *t)
    813{
    814	struct lantiq_ssc_spi *spi = spi_master_get_devdata(master);
    815
    816	hw_setup_transfer(spi, spidev, t);
    817
    818	return transfer_start(spi, spidev, t);
    819}
    820
    821static int intel_lgm_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
    822{
    823	int irq;
    824
    825	irq = platform_get_irq(pdev, 0);
    826	if (irq < 0)
    827		return irq;
    828
    829	return devm_request_irq(&pdev->dev, irq, intel_lgm_ssc_isr, 0, "spi", spi);
    830}
    831
    832static int lantiq_cfg_irq(struct platform_device *pdev, struct lantiq_ssc_spi *spi)
    833{
    834	int irq, err;
    835
    836	irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
    837	if (irq < 0)
    838		return irq;
    839
    840	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
    841			       0, LTQ_SPI_RX_IRQ_NAME, spi);
    842	if (err)
    843		return err;
    844
    845	irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
    846	if (irq < 0)
    847		return irq;
    848
    849	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_xmit_interrupt,
    850			       0, LTQ_SPI_TX_IRQ_NAME, spi);
    851
    852	if (err)
    853		return err;
    854
    855	irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
    856	if (irq < 0)
    857		return irq;
    858
    859	err = devm_request_irq(&pdev->dev, irq, lantiq_ssc_err_interrupt,
    860			       0, LTQ_SPI_ERR_IRQ_NAME, spi);
    861	return err;
    862}
    863
    864static const struct lantiq_ssc_hwcfg lantiq_ssc_xway = {
    865	.cfg_irq	= lantiq_cfg_irq,
    866	.irnen_r	= LTQ_SPI_IRNEN_R_XWAY,
    867	.irnen_t	= LTQ_SPI_IRNEN_T_XWAY,
    868	.irnicr		= 0xF8,
    869	.irncr		= 0xFC,
    870	.fifo_size_mask	= GENMASK(5, 0),
    871	.irq_ack	= false,
    872};
    873
    874static const struct lantiq_ssc_hwcfg lantiq_ssc_xrx = {
    875	.cfg_irq	= lantiq_cfg_irq,
    876	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
    877	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
    878	.irnicr		= 0xF8,
    879	.irncr		= 0xFC,
    880	.fifo_size_mask	= GENMASK(5, 0),
    881	.irq_ack	= false,
    882};
    883
    884static const struct lantiq_ssc_hwcfg intel_ssc_lgm = {
    885	.cfg_irq	= intel_lgm_cfg_irq,
    886	.irnen_r	= LTQ_SPI_IRNEN_R_XRX,
    887	.irnen_t	= LTQ_SPI_IRNEN_T_XRX,
    888	.irnicr		= 0xFC,
    889	.irncr		= 0xF8,
    890	.fifo_size_mask	= GENMASK(7, 0),
    891	.irq_ack	= true,
    892};
    893
    894static const struct of_device_id lantiq_ssc_match[] = {
    895	{ .compatible = "lantiq,ase-spi", .data = &lantiq_ssc_xway, },
    896	{ .compatible = "lantiq,falcon-spi", .data = &lantiq_ssc_xrx, },
    897	{ .compatible = "lantiq,xrx100-spi", .data = &lantiq_ssc_xrx, },
    898	{ .compatible = "intel,lgm-spi", .data = &intel_ssc_lgm, },
    899	{},
    900};
    901MODULE_DEVICE_TABLE(of, lantiq_ssc_match);
    902
    903static int lantiq_ssc_probe(struct platform_device *pdev)
    904{
    905	struct device *dev = &pdev->dev;
    906	struct spi_master *master;
    907	struct lantiq_ssc_spi *spi;
    908	const struct lantiq_ssc_hwcfg *hwcfg;
    909	u32 id, supports_dma, revision;
    910	unsigned int num_cs;
    911	int err;
    912
    913	hwcfg = of_device_get_match_data(dev);
    914
    915	master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
    916	if (!master)
    917		return -ENOMEM;
    918
    919	spi = spi_master_get_devdata(master);
    920	spi->master = master;
    921	spi->dev = dev;
    922	spi->hwcfg = hwcfg;
    923	platform_set_drvdata(pdev, spi);
    924	spi->regbase = devm_platform_ioremap_resource(pdev, 0);
    925	if (IS_ERR(spi->regbase)) {
    926		err = PTR_ERR(spi->regbase);
    927		goto err_master_put;
    928	}
    929
    930	err = hwcfg->cfg_irq(pdev, spi);
    931	if (err)
    932		goto err_master_put;
    933
    934	spi->spi_clk = devm_clk_get(dev, "gate");
    935	if (IS_ERR(spi->spi_clk)) {
    936		err = PTR_ERR(spi->spi_clk);
    937		goto err_master_put;
    938	}
    939	err = clk_prepare_enable(spi->spi_clk);
    940	if (err)
    941		goto err_master_put;
    942
    943	/*
    944	 * Use the old clk_get_fpi() function on Lantiq platform, till it
    945	 * supports common clk.
    946	 */
    947#if defined(CONFIG_LANTIQ) && !defined(CONFIG_COMMON_CLK)
    948	spi->fpi_clk = clk_get_fpi();
    949#else
    950	spi->fpi_clk = clk_get(dev, "freq");
    951#endif
    952	if (IS_ERR(spi->fpi_clk)) {
    953		err = PTR_ERR(spi->fpi_clk);
    954		goto err_clk_disable;
    955	}
    956
    957	num_cs = 8;
    958	of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
    959
    960	spi->base_cs = 1;
    961	of_property_read_u32(pdev->dev.of_node, "base-cs", &spi->base_cs);
    962
    963	spin_lock_init(&spi->lock);
    964	spi->bits_per_word = 8;
    965	spi->speed_hz = 0;
    966
    967	master->dev.of_node = pdev->dev.of_node;
    968	master->num_chipselect = num_cs;
    969	master->use_gpio_descriptors = true;
    970	master->setup = lantiq_ssc_setup;
    971	master->set_cs = lantiq_ssc_set_cs;
    972	master->handle_err = lantiq_ssc_handle_err;
    973	master->prepare_message = lantiq_ssc_prepare_message;
    974	master->unprepare_message = lantiq_ssc_unprepare_message;
    975	master->transfer_one = lantiq_ssc_transfer_one;
    976	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH |
    977				SPI_LOOP;
    978	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 8) |
    979				     SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
    980
    981	spi->wq = alloc_ordered_workqueue(dev_name(dev), WQ_MEM_RECLAIM);
    982	if (!spi->wq) {
    983		err = -ENOMEM;
    984		goto err_clk_put;
    985	}
    986	INIT_WORK(&spi->work, lantiq_ssc_bussy_work);
    987
    988	id = lantiq_ssc_readl(spi, LTQ_SPI_ID);
    989	spi->tx_fifo_size = (id >> LTQ_SPI_ID_TXFS_S) & hwcfg->fifo_size_mask;
    990	spi->rx_fifo_size = (id >> LTQ_SPI_ID_RXFS_S) & hwcfg->fifo_size_mask;
    991	supports_dma = (id & LTQ_SPI_ID_CFG_M) >> LTQ_SPI_ID_CFG_S;
    992	revision = id & LTQ_SPI_ID_REV_M;
    993
    994	lantiq_ssc_hw_init(spi);
    995
    996	dev_info(dev,
    997		"Lantiq SSC SPI controller (Rev %i, TXFS %u, RXFS %u, DMA %u)\n",
    998		revision, spi->tx_fifo_size, spi->rx_fifo_size, supports_dma);
    999
   1000	err = devm_spi_register_master(dev, master);
   1001	if (err) {
   1002		dev_err(dev, "failed to register spi_master\n");
   1003		goto err_wq_destroy;
   1004	}
   1005
   1006	return 0;
   1007
   1008err_wq_destroy:
   1009	destroy_workqueue(spi->wq);
   1010err_clk_put:
   1011	clk_put(spi->fpi_clk);
   1012err_clk_disable:
   1013	clk_disable_unprepare(spi->spi_clk);
   1014err_master_put:
   1015	spi_master_put(master);
   1016
   1017	return err;
   1018}
   1019
   1020static int lantiq_ssc_remove(struct platform_device *pdev)
   1021{
   1022	struct lantiq_ssc_spi *spi = platform_get_drvdata(pdev);
   1023
   1024	lantiq_ssc_writel(spi, 0, LTQ_SPI_IRNEN);
   1025	lantiq_ssc_writel(spi, 0, LTQ_SPI_CLC);
   1026	rx_fifo_flush(spi);
   1027	tx_fifo_flush(spi);
   1028	hw_enter_config_mode(spi);
   1029
   1030	destroy_workqueue(spi->wq);
   1031	clk_disable_unprepare(spi->spi_clk);
   1032	clk_put(spi->fpi_clk);
   1033
   1034	return 0;
   1035}
   1036
   1037static struct platform_driver lantiq_ssc_driver = {
   1038	.probe = lantiq_ssc_probe,
   1039	.remove = lantiq_ssc_remove,
   1040	.driver = {
   1041		.name = "spi-lantiq-ssc",
   1042		.of_match_table = lantiq_ssc_match,
   1043	},
   1044};
   1045module_platform_driver(lantiq_ssc_driver);
   1046
   1047MODULE_DESCRIPTION("Lantiq SSC SPI controller driver");
   1048MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@gmail.com>");
   1049MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>");
   1050MODULE_LICENSE("GPL");
   1051MODULE_ALIAS("platform:spi-lantiq-ssc");