cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-mt65xx.c (37736B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2015 MediaTek Inc.
      4 * Author: Leilk Liu <leilk.liu@mediatek.com>
      5 */
      6
      7#include <linux/clk.h>
      8#include <linux/device.h>
      9#include <linux/err.h>
     10#include <linux/interrupt.h>
     11#include <linux/io.h>
     12#include <linux/ioport.h>
     13#include <linux/module.h>
     14#include <linux/of.h>
     15#include <linux/gpio/consumer.h>
     16#include <linux/platform_device.h>
     17#include <linux/platform_data/spi-mt65xx.h>
     18#include <linux/pm_runtime.h>
     19#include <linux/spi/spi.h>
     20#include <linux/spi/spi-mem.h>
     21#include <linux/dma-mapping.h>
     22
     23#define SPI_CFG0_REG			0x0000
     24#define SPI_CFG1_REG			0x0004
     25#define SPI_TX_SRC_REG			0x0008
     26#define SPI_RX_DST_REG			0x000c
     27#define SPI_TX_DATA_REG			0x0010
     28#define SPI_RX_DATA_REG			0x0014
     29#define SPI_CMD_REG			0x0018
     30#define SPI_STATUS0_REG			0x001c
     31#define SPI_PAD_SEL_REG			0x0024
     32#define SPI_CFG2_REG			0x0028
     33#define SPI_TX_SRC_REG_64		0x002c
     34#define SPI_RX_DST_REG_64		0x0030
     35#define SPI_CFG3_IPM_REG		0x0040
     36
     37#define SPI_CFG0_SCK_HIGH_OFFSET	0
     38#define SPI_CFG0_SCK_LOW_OFFSET		8
     39#define SPI_CFG0_CS_HOLD_OFFSET		16
     40#define SPI_CFG0_CS_SETUP_OFFSET	24
     41#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET	0
     42#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET	16
     43
     44#define SPI_CFG1_CS_IDLE_OFFSET		0
     45#define SPI_CFG1_PACKET_LOOP_OFFSET	8
     46#define SPI_CFG1_PACKET_LENGTH_OFFSET	16
     47#define SPI_CFG1_GET_TICK_DLY_OFFSET	29
     48#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1	30
     49
     50#define SPI_CFG1_GET_TICK_DLY_MASK	0xe0000000
     51#define SPI_CFG1_GET_TICK_DLY_MASK_V1	0xc0000000
     52
     53#define SPI_CFG1_CS_IDLE_MASK		0xff
     54#define SPI_CFG1_PACKET_LOOP_MASK	0xff00
     55#define SPI_CFG1_PACKET_LENGTH_MASK	0x3ff0000
     56#define SPI_CFG1_IPM_PACKET_LENGTH_MASK	GENMASK(31, 16)
     57#define SPI_CFG2_SCK_HIGH_OFFSET	0
     58#define SPI_CFG2_SCK_LOW_OFFSET		16
     59
     60#define SPI_CMD_ACT			BIT(0)
     61#define SPI_CMD_RESUME			BIT(1)
     62#define SPI_CMD_RST			BIT(2)
     63#define SPI_CMD_PAUSE_EN		BIT(4)
     64#define SPI_CMD_DEASSERT		BIT(5)
     65#define SPI_CMD_SAMPLE_SEL		BIT(6)
     66#define SPI_CMD_CS_POL			BIT(7)
     67#define SPI_CMD_CPHA			BIT(8)
     68#define SPI_CMD_CPOL			BIT(9)
     69#define SPI_CMD_RX_DMA			BIT(10)
     70#define SPI_CMD_TX_DMA			BIT(11)
     71#define SPI_CMD_TXMSBF			BIT(12)
     72#define SPI_CMD_RXMSBF			BIT(13)
     73#define SPI_CMD_RX_ENDIAN		BIT(14)
     74#define SPI_CMD_TX_ENDIAN		BIT(15)
     75#define SPI_CMD_FINISH_IE		BIT(16)
     76#define SPI_CMD_PAUSE_IE		BIT(17)
     77#define SPI_CMD_IPM_NONIDLE_MODE	BIT(19)
     78#define SPI_CMD_IPM_SPIM_LOOP		BIT(21)
     79#define SPI_CMD_IPM_GET_TICKDLY_OFFSET	22
     80
     81#define SPI_CMD_IPM_GET_TICKDLY_MASK	GENMASK(24, 22)
     82
     83#define PIN_MODE_CFG(x)	((x) / 2)
     84
     85#define SPI_CFG3_IPM_HALF_DUPLEX_DIR	BIT(2)
     86#define SPI_CFG3_IPM_HALF_DUPLEX_EN	BIT(3)
     87#define SPI_CFG3_IPM_XMODE_EN		BIT(4)
     88#define SPI_CFG3_IPM_NODATA_FLAG	BIT(5)
     89#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET	8
     90#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
     91
     92#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK	GENMASK(1, 0)
     93#define SPI_CFG3_IPM_CMD_BYTELEN_MASK	GENMASK(11, 8)
     94#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK	GENMASK(15, 12)
     95
     96#define MT8173_SPI_MAX_PAD_SEL		3
     97
     98#define MTK_SPI_PAUSE_INT_STATUS	0x2
     99
    100#define MTK_SPI_MAX_FIFO_SIZE		32U
    101#define MTK_SPI_PACKET_SIZE		1024
    102#define MTK_SPI_IPM_PACKET_SIZE		SZ_64K
    103#define MTK_SPI_IPM_PACKET_LOOP		SZ_256
    104
    105#define MTK_SPI_IDLE			0
    106#define MTK_SPI_PAUSED			1
    107
    108#define MTK_SPI_32BITS_MASK		(0xffffffff)
    109
    110#define DMA_ADDR_EXT_BITS		(36)
    111#define DMA_ADDR_DEF_BITS		(32)
    112
    113/**
    114 * struct mtk_spi_compatible - device data structure
    115 * @need_pad_sel:	Enable pad (pins) selection in SPI controller
    116 * @must_tx:		Must explicitly send dummy TX bytes to do RX only transfer
    117 * @enhance_timing:	Enable adjusting cfg register to enhance time accuracy
    118 * @dma_ext:		DMA address extension supported
    119 * @no_need_unprepare:	Don't unprepare the SPI clk during runtime
    120 * @ipm_design:		Adjust/extend registers to support IPM design IP features
    121 */
    122struct mtk_spi_compatible {
    123	bool need_pad_sel;
    124	bool must_tx;
    125	bool enhance_timing;
    126	bool dma_ext;
    127	bool no_need_unprepare;
    128	bool ipm_design;
    129};
    130
    131/**
    132 * struct mtk_spi - SPI driver instance
    133 * @base:		Start address of the SPI controller registers
    134 * @state:		SPI controller state
    135 * @pad_num:		Number of pad_sel entries
    136 * @pad_sel:		Groups of pins to select
    137 * @parent_clk:		Parent of sel_clk
    138 * @sel_clk:		SPI master mux clock
    139 * @spi_clk:		Peripheral clock
    140 * @spi_hclk:		AHB bus clock
    141 * @cur_transfer:	Currently processed SPI transfer
    142 * @xfer_len:		Number of bytes to transfer
    143 * @num_xfered:		Number of transferred bytes
    144 * @tx_sgl:		TX transfer scatterlist
    145 * @rx_sgl:		RX transfer scatterlist
    146 * @tx_sgl_len:		Size of TX DMA transfer
    147 * @rx_sgl_len:		Size of RX DMA transfer
    148 * @dev_comp:		Device data structure
    149 * @spi_clk_hz:		Current SPI clock in Hz
    150 * @spimem_done:	SPI-MEM operation completion
    151 * @use_spimem:		Enables SPI-MEM
    152 * @dev:		Device pointer
    153 * @tx_dma:		DMA start for SPI-MEM TX
    154 * @rx_dma:		DMA start for SPI-MEM RX
    155 */
    156struct mtk_spi {
    157	void __iomem *base;
    158	u32 state;
    159	int pad_num;
    160	u32 *pad_sel;
    161	struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
    162	struct spi_transfer *cur_transfer;
    163	u32 xfer_len;
    164	u32 num_xfered;
    165	struct scatterlist *tx_sgl, *rx_sgl;
    166	u32 tx_sgl_len, rx_sgl_len;
    167	const struct mtk_spi_compatible *dev_comp;
    168	u32 spi_clk_hz;
    169	struct completion spimem_done;
    170	bool use_spimem;
    171	struct device *dev;
    172	dma_addr_t tx_dma;
    173	dma_addr_t rx_dma;
    174};
    175
    176static const struct mtk_spi_compatible mtk_common_compat;
    177
    178static const struct mtk_spi_compatible mt2712_compat = {
    179	.must_tx = true,
    180};
    181
    182static const struct mtk_spi_compatible mtk_ipm_compat = {
    183	.enhance_timing = true,
    184	.dma_ext = true,
    185	.ipm_design = true,
    186};
    187
    188static const struct mtk_spi_compatible mt6765_compat = {
    189	.need_pad_sel = true,
    190	.must_tx = true,
    191	.enhance_timing = true,
    192	.dma_ext = true,
    193};
    194
    195static const struct mtk_spi_compatible mt7622_compat = {
    196	.must_tx = true,
    197	.enhance_timing = true,
    198};
    199
    200static const struct mtk_spi_compatible mt8173_compat = {
    201	.need_pad_sel = true,
    202	.must_tx = true,
    203};
    204
    205static const struct mtk_spi_compatible mt8183_compat = {
    206	.need_pad_sel = true,
    207	.must_tx = true,
    208	.enhance_timing = true,
    209};
    210
    211static const struct mtk_spi_compatible mt6893_compat = {
    212	.need_pad_sel = true,
    213	.must_tx = true,
    214	.enhance_timing = true,
    215	.dma_ext = true,
    216	.no_need_unprepare = true,
    217};
    218
    219/*
    220 * A piece of default chip info unless the platform
    221 * supplies it.
    222 */
    223static const struct mtk_chip_config mtk_default_chip_info = {
    224	.sample_sel = 0,
    225	.tick_delay = 0,
    226};
    227
    228static const struct of_device_id mtk_spi_of_match[] = {
    229	{ .compatible = "mediatek,spi-ipm",
    230		.data = (void *)&mtk_ipm_compat,
    231	},
    232	{ .compatible = "mediatek,mt2701-spi",
    233		.data = (void *)&mtk_common_compat,
    234	},
    235	{ .compatible = "mediatek,mt2712-spi",
    236		.data = (void *)&mt2712_compat,
    237	},
    238	{ .compatible = "mediatek,mt6589-spi",
    239		.data = (void *)&mtk_common_compat,
    240	},
    241	{ .compatible = "mediatek,mt6765-spi",
    242		.data = (void *)&mt6765_compat,
    243	},
    244	{ .compatible = "mediatek,mt7622-spi",
    245		.data = (void *)&mt7622_compat,
    246	},
    247	{ .compatible = "mediatek,mt7629-spi",
    248		.data = (void *)&mt7622_compat,
    249	},
    250	{ .compatible = "mediatek,mt8135-spi",
    251		.data = (void *)&mtk_common_compat,
    252	},
    253	{ .compatible = "mediatek,mt8173-spi",
    254		.data = (void *)&mt8173_compat,
    255	},
    256	{ .compatible = "mediatek,mt8183-spi",
    257		.data = (void *)&mt8183_compat,
    258	},
    259	{ .compatible = "mediatek,mt8192-spi",
    260		.data = (void *)&mt6765_compat,
    261	},
    262	{ .compatible = "mediatek,mt6893-spi",
    263		.data = (void *)&mt6893_compat,
    264	},
    265	{}
    266};
    267MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
    268
    269static void mtk_spi_reset(struct mtk_spi *mdata)
    270{
    271	u32 reg_val;
    272
    273	/* set the software reset bit in SPI_CMD_REG. */
    274	reg_val = readl(mdata->base + SPI_CMD_REG);
    275	reg_val |= SPI_CMD_RST;
    276	writel(reg_val, mdata->base + SPI_CMD_REG);
    277
    278	reg_val = readl(mdata->base + SPI_CMD_REG);
    279	reg_val &= ~SPI_CMD_RST;
    280	writel(reg_val, mdata->base + SPI_CMD_REG);
    281}
    282
    283static int mtk_spi_set_hw_cs_timing(struct spi_device *spi)
    284{
    285	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
    286	struct spi_delay *cs_setup = &spi->cs_setup;
    287	struct spi_delay *cs_hold = &spi->cs_hold;
    288	struct spi_delay *cs_inactive = &spi->cs_inactive;
    289	u32 setup, hold, inactive;
    290	u32 reg_val;
    291	int delay;
    292
    293	delay = spi_delay_to_ns(cs_setup, NULL);
    294	if (delay < 0)
    295		return delay;
    296	setup = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
    297
    298	delay = spi_delay_to_ns(cs_hold, NULL);
    299	if (delay < 0)
    300		return delay;
    301	hold = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
    302
    303	delay = spi_delay_to_ns(cs_inactive, NULL);
    304	if (delay < 0)
    305		return delay;
    306	inactive = (delay * DIV_ROUND_UP(mdata->spi_clk_hz, 1000000)) / 1000;
    307
    308	if (hold || setup) {
    309		reg_val = readl(mdata->base + SPI_CFG0_REG);
    310		if (mdata->dev_comp->enhance_timing) {
    311			if (hold) {
    312				hold = min_t(u32, hold, 0x10000);
    313				reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
    314				reg_val |= (((hold - 1) & 0xffff)
    315					<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
    316			}
    317			if (setup) {
    318				setup = min_t(u32, setup, 0x10000);
    319				reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
    320				reg_val |= (((setup - 1) & 0xffff)
    321					<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
    322			}
    323		} else {
    324			if (hold) {
    325				hold = min_t(u32, hold, 0x100);
    326				reg_val &= ~(0xff << SPI_CFG0_CS_HOLD_OFFSET);
    327				reg_val |= (((hold - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
    328			}
    329			if (setup) {
    330				setup = min_t(u32, setup, 0x100);
    331				reg_val &= ~(0xff << SPI_CFG0_CS_SETUP_OFFSET);
    332				reg_val |= (((setup - 1) & 0xff)
    333					<< SPI_CFG0_CS_SETUP_OFFSET);
    334			}
    335		}
    336		writel(reg_val, mdata->base + SPI_CFG0_REG);
    337	}
    338
    339	if (inactive) {
    340		inactive = min_t(u32, inactive, 0x100);
    341		reg_val = readl(mdata->base + SPI_CFG1_REG);
    342		reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
    343		reg_val |= (((inactive - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
    344		writel(reg_val, mdata->base + SPI_CFG1_REG);
    345	}
    346
    347	return 0;
    348}
    349
    350static int mtk_spi_hw_init(struct spi_master *master,
    351			   struct spi_device *spi)
    352{
    353	u16 cpha, cpol;
    354	u32 reg_val;
    355	struct mtk_chip_config *chip_config = spi->controller_data;
    356	struct mtk_spi *mdata = spi_master_get_devdata(master);
    357
    358	cpha = spi->mode & SPI_CPHA ? 1 : 0;
    359	cpol = spi->mode & SPI_CPOL ? 1 : 0;
    360
    361	reg_val = readl(mdata->base + SPI_CMD_REG);
    362	if (mdata->dev_comp->ipm_design) {
    363		/* SPI transfer without idle time until packet length done */
    364		reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
    365		if (spi->mode & SPI_LOOP)
    366			reg_val |= SPI_CMD_IPM_SPIM_LOOP;
    367		else
    368			reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
    369	}
    370
    371	if (cpha)
    372		reg_val |= SPI_CMD_CPHA;
    373	else
    374		reg_val &= ~SPI_CMD_CPHA;
    375	if (cpol)
    376		reg_val |= SPI_CMD_CPOL;
    377	else
    378		reg_val &= ~SPI_CMD_CPOL;
    379
    380	/* set the mlsbx and mlsbtx */
    381	if (spi->mode & SPI_LSB_FIRST) {
    382		reg_val &= ~SPI_CMD_TXMSBF;
    383		reg_val &= ~SPI_CMD_RXMSBF;
    384	} else {
    385		reg_val |= SPI_CMD_TXMSBF;
    386		reg_val |= SPI_CMD_RXMSBF;
    387	}
    388
    389	/* set the tx/rx endian */
    390#ifdef __LITTLE_ENDIAN
    391	reg_val &= ~SPI_CMD_TX_ENDIAN;
    392	reg_val &= ~SPI_CMD_RX_ENDIAN;
    393#else
    394	reg_val |= SPI_CMD_TX_ENDIAN;
    395	reg_val |= SPI_CMD_RX_ENDIAN;
    396#endif
    397
    398	if (mdata->dev_comp->enhance_timing) {
    399		/* set CS polarity */
    400		if (spi->mode & SPI_CS_HIGH)
    401			reg_val |= SPI_CMD_CS_POL;
    402		else
    403			reg_val &= ~SPI_CMD_CS_POL;
    404
    405		if (chip_config->sample_sel)
    406			reg_val |= SPI_CMD_SAMPLE_SEL;
    407		else
    408			reg_val &= ~SPI_CMD_SAMPLE_SEL;
    409	}
    410
    411	/* set finish and pause interrupt always enable */
    412	reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
    413
    414	/* disable dma mode */
    415	reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
    416
    417	/* disable deassert mode */
    418	reg_val &= ~SPI_CMD_DEASSERT;
    419
    420	writel(reg_val, mdata->base + SPI_CMD_REG);
    421
    422	/* pad select */
    423	if (mdata->dev_comp->need_pad_sel)
    424		writel(mdata->pad_sel[spi->chip_select],
    425		       mdata->base + SPI_PAD_SEL_REG);
    426
    427	/* tick delay */
    428	if (mdata->dev_comp->enhance_timing) {
    429		if (mdata->dev_comp->ipm_design) {
    430			reg_val = readl(mdata->base + SPI_CMD_REG);
    431			reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
    432			reg_val |= ((chip_config->tick_delay & 0x7)
    433				    << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
    434			writel(reg_val, mdata->base + SPI_CMD_REG);
    435		} else {
    436			reg_val = readl(mdata->base + SPI_CFG1_REG);
    437			reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
    438			reg_val |= ((chip_config->tick_delay & 0x7)
    439				    << SPI_CFG1_GET_TICK_DLY_OFFSET);
    440			writel(reg_val, mdata->base + SPI_CFG1_REG);
    441		}
    442	} else {
    443		reg_val = readl(mdata->base + SPI_CFG1_REG);
    444		reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
    445		reg_val |= ((chip_config->tick_delay & 0x3)
    446			    << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
    447		writel(reg_val, mdata->base + SPI_CFG1_REG);
    448	}
    449
    450	/* set hw cs timing */
    451	mtk_spi_set_hw_cs_timing(spi);
    452	return 0;
    453}
    454
    455static int mtk_spi_prepare_message(struct spi_master *master,
    456				   struct spi_message *msg)
    457{
    458	return mtk_spi_hw_init(master, msg->spi);
    459}
    460
    461static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
    462{
    463	u32 reg_val;
    464	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
    465
    466	if (spi->mode & SPI_CS_HIGH)
    467		enable = !enable;
    468
    469	reg_val = readl(mdata->base + SPI_CMD_REG);
    470	if (!enable) {
    471		reg_val |= SPI_CMD_PAUSE_EN;
    472		writel(reg_val, mdata->base + SPI_CMD_REG);
    473	} else {
    474		reg_val &= ~SPI_CMD_PAUSE_EN;
    475		writel(reg_val, mdata->base + SPI_CMD_REG);
    476		mdata->state = MTK_SPI_IDLE;
    477		mtk_spi_reset(mdata);
    478	}
    479}
    480
    481static void mtk_spi_prepare_transfer(struct spi_master *master,
    482				     u32 speed_hz)
    483{
    484	u32 div, sck_time, reg_val;
    485	struct mtk_spi *mdata = spi_master_get_devdata(master);
    486
    487	if (speed_hz < mdata->spi_clk_hz / 2)
    488		div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
    489	else
    490		div = 1;
    491
    492	sck_time = (div + 1) / 2;
    493
    494	if (mdata->dev_comp->enhance_timing) {
    495		reg_val = readl(mdata->base + SPI_CFG2_REG);
    496		reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
    497		reg_val |= (((sck_time - 1) & 0xffff)
    498			   << SPI_CFG2_SCK_HIGH_OFFSET);
    499		reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
    500		reg_val |= (((sck_time - 1) & 0xffff)
    501			   << SPI_CFG2_SCK_LOW_OFFSET);
    502		writel(reg_val, mdata->base + SPI_CFG2_REG);
    503	} else {
    504		reg_val = readl(mdata->base + SPI_CFG0_REG);
    505		reg_val &= ~(0xff << SPI_CFG0_SCK_HIGH_OFFSET);
    506		reg_val |= (((sck_time - 1) & 0xff)
    507			   << SPI_CFG0_SCK_HIGH_OFFSET);
    508		reg_val &= ~(0xff << SPI_CFG0_SCK_LOW_OFFSET);
    509		reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
    510		writel(reg_val, mdata->base + SPI_CFG0_REG);
    511	}
    512}
    513
    514static void mtk_spi_setup_packet(struct spi_master *master)
    515{
    516	u32 packet_size, packet_loop, reg_val;
    517	struct mtk_spi *mdata = spi_master_get_devdata(master);
    518
    519	if (mdata->dev_comp->ipm_design)
    520		packet_size = min_t(u32,
    521				    mdata->xfer_len,
    522				    MTK_SPI_IPM_PACKET_SIZE);
    523	else
    524		packet_size = min_t(u32,
    525				    mdata->xfer_len,
    526				    MTK_SPI_PACKET_SIZE);
    527
    528	packet_loop = mdata->xfer_len / packet_size;
    529
    530	reg_val = readl(mdata->base + SPI_CFG1_REG);
    531	if (mdata->dev_comp->ipm_design)
    532		reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
    533	else
    534		reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
    535	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
    536	reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
    537	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
    538	writel(reg_val, mdata->base + SPI_CFG1_REG);
    539}
    540
    541static void mtk_spi_enable_transfer(struct spi_master *master)
    542{
    543	u32 cmd;
    544	struct mtk_spi *mdata = spi_master_get_devdata(master);
    545
    546	cmd = readl(mdata->base + SPI_CMD_REG);
    547	if (mdata->state == MTK_SPI_IDLE)
    548		cmd |= SPI_CMD_ACT;
    549	else
    550		cmd |= SPI_CMD_RESUME;
    551	writel(cmd, mdata->base + SPI_CMD_REG);
    552}
    553
    554static int mtk_spi_get_mult_delta(u32 xfer_len)
    555{
    556	u32 mult_delta;
    557
    558	if (xfer_len > MTK_SPI_PACKET_SIZE)
    559		mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
    560	else
    561		mult_delta = 0;
    562
    563	return mult_delta;
    564}
    565
    566static void mtk_spi_update_mdata_len(struct spi_master *master)
    567{
    568	int mult_delta;
    569	struct mtk_spi *mdata = spi_master_get_devdata(master);
    570
    571	if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
    572		if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
    573			mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
    574			mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
    575			mdata->rx_sgl_len = mult_delta;
    576			mdata->tx_sgl_len -= mdata->xfer_len;
    577		} else {
    578			mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
    579			mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
    580			mdata->tx_sgl_len = mult_delta;
    581			mdata->rx_sgl_len -= mdata->xfer_len;
    582		}
    583	} else if (mdata->tx_sgl_len) {
    584		mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
    585		mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
    586		mdata->tx_sgl_len = mult_delta;
    587	} else if (mdata->rx_sgl_len) {
    588		mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
    589		mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
    590		mdata->rx_sgl_len = mult_delta;
    591	}
    592}
    593
    594static void mtk_spi_setup_dma_addr(struct spi_master *master,
    595				   struct spi_transfer *xfer)
    596{
    597	struct mtk_spi *mdata = spi_master_get_devdata(master);
    598
    599	if (mdata->tx_sgl) {
    600		writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
    601		       mdata->base + SPI_TX_SRC_REG);
    602#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
    603		if (mdata->dev_comp->dma_ext)
    604			writel((u32)(xfer->tx_dma >> 32),
    605			       mdata->base + SPI_TX_SRC_REG_64);
    606#endif
    607	}
    608
    609	if (mdata->rx_sgl) {
    610		writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
    611		       mdata->base + SPI_RX_DST_REG);
    612#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
    613		if (mdata->dev_comp->dma_ext)
    614			writel((u32)(xfer->rx_dma >> 32),
    615			       mdata->base + SPI_RX_DST_REG_64);
    616#endif
    617	}
    618}
    619
    620static int mtk_spi_fifo_transfer(struct spi_master *master,
    621				 struct spi_device *spi,
    622				 struct spi_transfer *xfer)
    623{
    624	int cnt, remainder;
    625	u32 reg_val;
    626	struct mtk_spi *mdata = spi_master_get_devdata(master);
    627
    628	mdata->cur_transfer = xfer;
    629	mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
    630	mdata->num_xfered = 0;
    631	mtk_spi_prepare_transfer(master, xfer->speed_hz);
    632	mtk_spi_setup_packet(master);
    633
    634	if (xfer->tx_buf) {
    635		cnt = xfer->len / 4;
    636		iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
    637		remainder = xfer->len % 4;
    638		if (remainder > 0) {
    639			reg_val = 0;
    640			memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
    641			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
    642		}
    643	}
    644
    645	mtk_spi_enable_transfer(master);
    646
    647	return 1;
    648}
    649
    650static int mtk_spi_dma_transfer(struct spi_master *master,
    651				struct spi_device *spi,
    652				struct spi_transfer *xfer)
    653{
    654	int cmd;
    655	struct mtk_spi *mdata = spi_master_get_devdata(master);
    656
    657	mdata->tx_sgl = NULL;
    658	mdata->rx_sgl = NULL;
    659	mdata->tx_sgl_len = 0;
    660	mdata->rx_sgl_len = 0;
    661	mdata->cur_transfer = xfer;
    662	mdata->num_xfered = 0;
    663
    664	mtk_spi_prepare_transfer(master, xfer->speed_hz);
    665
    666	cmd = readl(mdata->base + SPI_CMD_REG);
    667	if (xfer->tx_buf)
    668		cmd |= SPI_CMD_TX_DMA;
    669	if (xfer->rx_buf)
    670		cmd |= SPI_CMD_RX_DMA;
    671	writel(cmd, mdata->base + SPI_CMD_REG);
    672
    673	if (xfer->tx_buf)
    674		mdata->tx_sgl = xfer->tx_sg.sgl;
    675	if (xfer->rx_buf)
    676		mdata->rx_sgl = xfer->rx_sg.sgl;
    677
    678	if (mdata->tx_sgl) {
    679		xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
    680		mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
    681	}
    682	if (mdata->rx_sgl) {
    683		xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
    684		mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
    685	}
    686
    687	mtk_spi_update_mdata_len(master);
    688	mtk_spi_setup_packet(master);
    689	mtk_spi_setup_dma_addr(master, xfer);
    690	mtk_spi_enable_transfer(master);
    691
    692	return 1;
    693}
    694
    695static int mtk_spi_transfer_one(struct spi_master *master,
    696				struct spi_device *spi,
    697				struct spi_transfer *xfer)
    698{
    699	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
    700	u32 reg_val = 0;
    701
    702	/* prepare xfer direction and duplex mode */
    703	if (mdata->dev_comp->ipm_design) {
    704		if (!xfer->tx_buf || !xfer->rx_buf) {
    705			reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
    706			if (xfer->rx_buf)
    707				reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
    708		}
    709		writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
    710	}
    711
    712	if (master->can_dma(master, spi, xfer))
    713		return mtk_spi_dma_transfer(master, spi, xfer);
    714	else
    715		return mtk_spi_fifo_transfer(master, spi, xfer);
    716}
    717
    718static bool mtk_spi_can_dma(struct spi_master *master,
    719			    struct spi_device *spi,
    720			    struct spi_transfer *xfer)
    721{
    722	/* Buffers for DMA transactions must be 4-byte aligned */
    723	return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
    724		(unsigned long)xfer->tx_buf % 4 == 0 &&
    725		(unsigned long)xfer->rx_buf % 4 == 0);
    726}
    727
    728static int mtk_spi_setup(struct spi_device *spi)
    729{
    730	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
    731
    732	if (!spi->controller_data)
    733		spi->controller_data = (void *)&mtk_default_chip_info;
    734
    735	if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
    736		/* CS de-asserted, gpiolib will handle inversion */
    737		gpiod_direction_output(spi->cs_gpiod, 0);
    738
    739	return 0;
    740}
    741
    742static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
    743{
    744	u32 cmd, reg_val, cnt, remainder, len;
    745	struct spi_master *master = dev_id;
    746	struct mtk_spi *mdata = spi_master_get_devdata(master);
    747	struct spi_transfer *trans = mdata->cur_transfer;
    748
    749	reg_val = readl(mdata->base + SPI_STATUS0_REG);
    750	if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
    751		mdata->state = MTK_SPI_PAUSED;
    752	else
    753		mdata->state = MTK_SPI_IDLE;
    754
    755	/* SPI-MEM ops */
    756	if (mdata->use_spimem) {
    757		complete(&mdata->spimem_done);
    758		return IRQ_HANDLED;
    759	}
    760
    761	if (!master->can_dma(master, NULL, trans)) {
    762		if (trans->rx_buf) {
    763			cnt = mdata->xfer_len / 4;
    764			ioread32_rep(mdata->base + SPI_RX_DATA_REG,
    765				     trans->rx_buf + mdata->num_xfered, cnt);
    766			remainder = mdata->xfer_len % 4;
    767			if (remainder > 0) {
    768				reg_val = readl(mdata->base + SPI_RX_DATA_REG);
    769				memcpy(trans->rx_buf +
    770					mdata->num_xfered +
    771					(cnt * 4),
    772					&reg_val,
    773					remainder);
    774			}
    775		}
    776
    777		mdata->num_xfered += mdata->xfer_len;
    778		if (mdata->num_xfered == trans->len) {
    779			spi_finalize_current_transfer(master);
    780			return IRQ_HANDLED;
    781		}
    782
    783		len = trans->len - mdata->num_xfered;
    784		mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
    785		mtk_spi_setup_packet(master);
    786
    787		cnt = mdata->xfer_len / 4;
    788		iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
    789				trans->tx_buf + mdata->num_xfered, cnt);
    790
    791		remainder = mdata->xfer_len % 4;
    792		if (remainder > 0) {
    793			reg_val = 0;
    794			memcpy(&reg_val,
    795				trans->tx_buf + (cnt * 4) + mdata->num_xfered,
    796				remainder);
    797			writel(reg_val, mdata->base + SPI_TX_DATA_REG);
    798		}
    799
    800		mtk_spi_enable_transfer(master);
    801
    802		return IRQ_HANDLED;
    803	}
    804
    805	if (mdata->tx_sgl)
    806		trans->tx_dma += mdata->xfer_len;
    807	if (mdata->rx_sgl)
    808		trans->rx_dma += mdata->xfer_len;
    809
    810	if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
    811		mdata->tx_sgl = sg_next(mdata->tx_sgl);
    812		if (mdata->tx_sgl) {
    813			trans->tx_dma = sg_dma_address(mdata->tx_sgl);
    814			mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
    815		}
    816	}
    817	if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
    818		mdata->rx_sgl = sg_next(mdata->rx_sgl);
    819		if (mdata->rx_sgl) {
    820			trans->rx_dma = sg_dma_address(mdata->rx_sgl);
    821			mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
    822		}
    823	}
    824
    825	if (!mdata->tx_sgl && !mdata->rx_sgl) {
    826		/* spi disable dma */
    827		cmd = readl(mdata->base + SPI_CMD_REG);
    828		cmd &= ~SPI_CMD_TX_DMA;
    829		cmd &= ~SPI_CMD_RX_DMA;
    830		writel(cmd, mdata->base + SPI_CMD_REG);
    831
    832		spi_finalize_current_transfer(master);
    833		return IRQ_HANDLED;
    834	}
    835
    836	mtk_spi_update_mdata_len(master);
    837	mtk_spi_setup_packet(master);
    838	mtk_spi_setup_dma_addr(master, trans);
    839	mtk_spi_enable_transfer(master);
    840
    841	return IRQ_HANDLED;
    842}
    843
    844static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
    845				      struct spi_mem_op *op)
    846{
    847	int opcode_len;
    848
    849	if (op->data.dir != SPI_MEM_NO_DATA) {
    850		opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
    851		if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
    852			op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE - opcode_len;
    853			/* force data buffer dma-aligned. */
    854			op->data.nbytes -= op->data.nbytes % 4;
    855		}
    856	}
    857
    858	return 0;
    859}
    860
    861static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
    862				    const struct spi_mem_op *op)
    863{
    864	if (!spi_mem_default_supports_op(mem, op))
    865		return false;
    866
    867	if (op->addr.nbytes && op->dummy.nbytes &&
    868	    op->addr.buswidth != op->dummy.buswidth)
    869		return false;
    870
    871	if (op->addr.nbytes + op->dummy.nbytes > 16)
    872		return false;
    873
    874	if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
    875		if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
    876		    MTK_SPI_IPM_PACKET_LOOP ||
    877		    op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
    878			return false;
    879	}
    880
    881	return true;
    882}
    883
    884static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
    885				       const struct spi_mem_op *op)
    886{
    887	struct mtk_spi *mdata = spi_master_get_devdata(master);
    888
    889	writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
    890	       mdata->base + SPI_TX_SRC_REG);
    891#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
    892	if (mdata->dev_comp->dma_ext)
    893		writel((u32)(mdata->tx_dma >> 32),
    894		       mdata->base + SPI_TX_SRC_REG_64);
    895#endif
    896
    897	if (op->data.dir == SPI_MEM_DATA_IN) {
    898		writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
    899		       mdata->base + SPI_RX_DST_REG);
    900#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
    901		if (mdata->dev_comp->dma_ext)
    902			writel((u32)(mdata->rx_dma >> 32),
    903			       mdata->base + SPI_RX_DST_REG_64);
    904#endif
    905	}
    906}
    907
    908static int mtk_spi_transfer_wait(struct spi_mem *mem,
    909				 const struct spi_mem_op *op)
    910{
    911	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
    912	/*
    913	 * For each byte we wait for 8 cycles of the SPI clock.
    914	 * Since speed is defined in Hz and we want milliseconds,
    915	 * so it should be 8 * 1000.
    916	 */
    917	u64 ms = 8000LL;
    918
    919	if (op->data.dir == SPI_MEM_NO_DATA)
    920		ms *= 32; /* prevent we may get 0 for short transfers. */
    921	else
    922		ms *= op->data.nbytes;
    923	ms = div_u64(ms, mem->spi->max_speed_hz);
    924	ms += ms + 1000; /* 1s tolerance */
    925
    926	if (ms > UINT_MAX)
    927		ms = UINT_MAX;
    928
    929	if (!wait_for_completion_timeout(&mdata->spimem_done,
    930					 msecs_to_jiffies(ms))) {
    931		dev_err(mdata->dev, "spi-mem transfer timeout\n");
    932		return -ETIMEDOUT;
    933	}
    934
    935	return 0;
    936}
    937
    938static int mtk_spi_mem_exec_op(struct spi_mem *mem,
    939			       const struct spi_mem_op *op)
    940{
    941	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
    942	u32 reg_val, nio, tx_size;
    943	char *tx_tmp_buf, *rx_tmp_buf;
    944	int ret = 0;
    945
    946	mdata->use_spimem = true;
    947	reinit_completion(&mdata->spimem_done);
    948
    949	mtk_spi_reset(mdata);
    950	mtk_spi_hw_init(mem->spi->master, mem->spi);
    951	mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
    952
    953	reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
    954	/* opcode byte len */
    955	reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
    956	reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
    957
    958	/* addr & dummy byte len */
    959	reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
    960	if (op->addr.nbytes || op->dummy.nbytes)
    961		reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
    962			    SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
    963
    964	/* data byte len */
    965	if (op->data.dir == SPI_MEM_NO_DATA) {
    966		reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
    967		writel(0, mdata->base + SPI_CFG1_REG);
    968	} else {
    969		reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
    970		mdata->xfer_len = op->data.nbytes;
    971		mtk_spi_setup_packet(mem->spi->master);
    972	}
    973
    974	if (op->addr.nbytes || op->dummy.nbytes) {
    975		if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
    976			reg_val |= SPI_CFG3_IPM_XMODE_EN;
    977		else
    978			reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
    979	}
    980
    981	if (op->addr.buswidth == 2 ||
    982	    op->dummy.buswidth == 2 ||
    983	    op->data.buswidth == 2)
    984		nio = 2;
    985	else if (op->addr.buswidth == 4 ||
    986		 op->dummy.buswidth == 4 ||
    987		 op->data.buswidth == 4)
    988		nio = 4;
    989	else
    990		nio = 1;
    991
    992	reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
    993	reg_val |= PIN_MODE_CFG(nio);
    994
    995	reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
    996	if (op->data.dir == SPI_MEM_DATA_IN)
    997		reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
    998	else
    999		reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
   1000	writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
   1001
   1002	tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
   1003	if (op->data.dir == SPI_MEM_DATA_OUT)
   1004		tx_size += op->data.nbytes;
   1005
   1006	tx_size = max_t(u32, tx_size, 32);
   1007
   1008	tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
   1009	if (!tx_tmp_buf) {
   1010		mdata->use_spimem = false;
   1011		return -ENOMEM;
   1012	}
   1013
   1014	tx_tmp_buf[0] = op->cmd.opcode;
   1015
   1016	if (op->addr.nbytes) {
   1017		int i;
   1018
   1019		for (i = 0; i < op->addr.nbytes; i++)
   1020			tx_tmp_buf[i + 1] = op->addr.val >>
   1021					(8 * (op->addr.nbytes - i - 1));
   1022	}
   1023
   1024	if (op->dummy.nbytes)
   1025		memset(tx_tmp_buf + op->addr.nbytes + 1,
   1026		       0xff,
   1027		       op->dummy.nbytes);
   1028
   1029	if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
   1030		memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
   1031		       op->data.buf.out,
   1032		       op->data.nbytes);
   1033
   1034	mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
   1035				       tx_size, DMA_TO_DEVICE);
   1036	if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
   1037		ret = -ENOMEM;
   1038		goto err_exit;
   1039	}
   1040
   1041	if (op->data.dir == SPI_MEM_DATA_IN) {
   1042		if (!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
   1043			rx_tmp_buf = kzalloc(op->data.nbytes,
   1044					     GFP_KERNEL | GFP_DMA);
   1045			if (!rx_tmp_buf) {
   1046				ret = -ENOMEM;
   1047				goto unmap_tx_dma;
   1048			}
   1049		} else {
   1050			rx_tmp_buf = op->data.buf.in;
   1051		}
   1052
   1053		mdata->rx_dma = dma_map_single(mdata->dev,
   1054					       rx_tmp_buf,
   1055					       op->data.nbytes,
   1056					       DMA_FROM_DEVICE);
   1057		if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
   1058			ret = -ENOMEM;
   1059			goto kfree_rx_tmp_buf;
   1060		}
   1061	}
   1062
   1063	reg_val = readl(mdata->base + SPI_CMD_REG);
   1064	reg_val |= SPI_CMD_TX_DMA;
   1065	if (op->data.dir == SPI_MEM_DATA_IN)
   1066		reg_val |= SPI_CMD_RX_DMA;
   1067	writel(reg_val, mdata->base + SPI_CMD_REG);
   1068
   1069	mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
   1070
   1071	mtk_spi_enable_transfer(mem->spi->master);
   1072
   1073	/* Wait for the interrupt. */
   1074	ret = mtk_spi_transfer_wait(mem, op);
   1075	if (ret)
   1076		goto unmap_rx_dma;
   1077
   1078	/* spi disable dma */
   1079	reg_val = readl(mdata->base + SPI_CMD_REG);
   1080	reg_val &= ~SPI_CMD_TX_DMA;
   1081	if (op->data.dir == SPI_MEM_DATA_IN)
   1082		reg_val &= ~SPI_CMD_RX_DMA;
   1083	writel(reg_val, mdata->base + SPI_CMD_REG);
   1084
   1085unmap_rx_dma:
   1086	if (op->data.dir == SPI_MEM_DATA_IN) {
   1087		dma_unmap_single(mdata->dev, mdata->rx_dma,
   1088				 op->data.nbytes, DMA_FROM_DEVICE);
   1089		if (!IS_ALIGNED((size_t)op->data.buf.in, 4))
   1090			memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
   1091	}
   1092kfree_rx_tmp_buf:
   1093	if (op->data.dir == SPI_MEM_DATA_IN &&
   1094	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
   1095		kfree(rx_tmp_buf);
   1096unmap_tx_dma:
   1097	dma_unmap_single(mdata->dev, mdata->tx_dma,
   1098			 tx_size, DMA_TO_DEVICE);
   1099err_exit:
   1100	kfree(tx_tmp_buf);
   1101	mdata->use_spimem = false;
   1102
   1103	return ret;
   1104}
   1105
   1106static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
   1107	.adjust_op_size = mtk_spi_mem_adjust_op_size,
   1108	.supports_op = mtk_spi_mem_supports_op,
   1109	.exec_op = mtk_spi_mem_exec_op,
   1110};
   1111
   1112static int mtk_spi_probe(struct platform_device *pdev)
   1113{
   1114	struct device *dev = &pdev->dev;
   1115	struct spi_master *master;
   1116	struct mtk_spi *mdata;
   1117	int i, irq, ret, addr_bits;
   1118
   1119	master = devm_spi_alloc_master(dev, sizeof(*mdata));
   1120	if (!master)
   1121		return dev_err_probe(dev, -ENOMEM, "failed to alloc spi master\n");
   1122
   1123	master->auto_runtime_pm = true;
   1124	master->dev.of_node = dev->of_node;
   1125	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
   1126
   1127	master->set_cs = mtk_spi_set_cs;
   1128	master->prepare_message = mtk_spi_prepare_message;
   1129	master->transfer_one = mtk_spi_transfer_one;
   1130	master->can_dma = mtk_spi_can_dma;
   1131	master->setup = mtk_spi_setup;
   1132	master->set_cs_timing = mtk_spi_set_hw_cs_timing;
   1133	master->use_gpio_descriptors = true;
   1134
   1135	mdata = spi_master_get_devdata(master);
   1136	mdata->dev_comp = device_get_match_data(dev);
   1137
   1138	if (mdata->dev_comp->enhance_timing)
   1139		master->mode_bits |= SPI_CS_HIGH;
   1140
   1141	if (mdata->dev_comp->must_tx)
   1142		master->flags = SPI_MASTER_MUST_TX;
   1143	if (mdata->dev_comp->ipm_design)
   1144		master->mode_bits |= SPI_LOOP;
   1145
   1146	if (mdata->dev_comp->ipm_design) {
   1147		mdata->dev = dev;
   1148		master->mem_ops = &mtk_spi_mem_ops;
   1149		init_completion(&mdata->spimem_done);
   1150	}
   1151
   1152	if (mdata->dev_comp->need_pad_sel) {
   1153		mdata->pad_num = of_property_count_u32_elems(dev->of_node,
   1154			"mediatek,pad-select");
   1155		if (mdata->pad_num < 0)
   1156			return dev_err_probe(dev, -EINVAL,
   1157				"No 'mediatek,pad-select' property\n");
   1158
   1159		mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
   1160						    sizeof(u32), GFP_KERNEL);
   1161		if (!mdata->pad_sel)
   1162			return -ENOMEM;
   1163
   1164		for (i = 0; i < mdata->pad_num; i++) {
   1165			of_property_read_u32_index(dev->of_node,
   1166						   "mediatek,pad-select",
   1167						   i, &mdata->pad_sel[i]);
   1168			if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL)
   1169				return dev_err_probe(dev, -EINVAL,
   1170						     "wrong pad-sel[%d]: %u\n",
   1171						     i, mdata->pad_sel[i]);
   1172		}
   1173	}
   1174
   1175	platform_set_drvdata(pdev, master);
   1176	mdata->base = devm_platform_ioremap_resource(pdev, 0);
   1177	if (IS_ERR(mdata->base))
   1178		return PTR_ERR(mdata->base);
   1179
   1180	irq = platform_get_irq(pdev, 0);
   1181	if (irq < 0)
   1182		return irq;
   1183
   1184	if (!dev->dma_mask)
   1185		dev->dma_mask = &dev->coherent_dma_mask;
   1186
   1187	ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
   1188			       IRQF_TRIGGER_NONE, dev_name(dev), master);
   1189	if (ret)
   1190		return dev_err_probe(dev, ret, "failed to register irq\n");
   1191
   1192	mdata->parent_clk = devm_clk_get(dev, "parent-clk");
   1193	if (IS_ERR(mdata->parent_clk))
   1194		return dev_err_probe(dev, PTR_ERR(mdata->parent_clk),
   1195				     "failed to get parent-clk\n");
   1196
   1197	mdata->sel_clk = devm_clk_get(dev, "sel-clk");
   1198	if (IS_ERR(mdata->sel_clk))
   1199		return dev_err_probe(dev, PTR_ERR(mdata->sel_clk), "failed to get sel-clk\n");
   1200
   1201	mdata->spi_clk = devm_clk_get(dev, "spi-clk");
   1202	if (IS_ERR(mdata->spi_clk))
   1203		return dev_err_probe(dev, PTR_ERR(mdata->spi_clk), "failed to get spi-clk\n");
   1204
   1205	mdata->spi_hclk = devm_clk_get_optional(dev, "hclk");
   1206	if (IS_ERR(mdata->spi_hclk))
   1207		return dev_err_probe(dev, PTR_ERR(mdata->spi_hclk), "failed to get hclk\n");
   1208
   1209	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
   1210	if (ret < 0)
   1211		return dev_err_probe(dev, ret, "failed to clk_set_parent\n");
   1212
   1213	ret = clk_prepare_enable(mdata->spi_hclk);
   1214	if (ret < 0)
   1215		return dev_err_probe(dev, ret, "failed to enable hclk\n");
   1216
   1217	ret = clk_prepare_enable(mdata->spi_clk);
   1218	if (ret < 0) {
   1219		clk_disable_unprepare(mdata->spi_hclk);
   1220		return dev_err_probe(dev, ret, "failed to enable spi_clk\n");
   1221	}
   1222
   1223	mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
   1224
   1225	if (mdata->dev_comp->no_need_unprepare) {
   1226		clk_disable(mdata->spi_clk);
   1227		clk_disable(mdata->spi_hclk);
   1228	} else {
   1229		clk_disable_unprepare(mdata->spi_clk);
   1230		clk_disable_unprepare(mdata->spi_hclk);
   1231	}
   1232
   1233	if (mdata->dev_comp->need_pad_sel) {
   1234		if (mdata->pad_num != master->num_chipselect)
   1235			return dev_err_probe(dev, -EINVAL,
   1236				"pad_num does not match num_chipselect(%d != %d)\n",
   1237				mdata->pad_num, master->num_chipselect);
   1238
   1239		if (!master->cs_gpiods && master->num_chipselect > 1)
   1240			return dev_err_probe(dev, -EINVAL,
   1241				"cs_gpios not specified and num_chipselect > 1\n");
   1242	}
   1243
   1244	if (mdata->dev_comp->dma_ext)
   1245		addr_bits = DMA_ADDR_EXT_BITS;
   1246	else
   1247		addr_bits = DMA_ADDR_DEF_BITS;
   1248	ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
   1249	if (ret)
   1250		dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
   1251			   addr_bits, ret);
   1252
   1253	pm_runtime_enable(dev);
   1254
   1255	ret = devm_spi_register_master(dev, master);
   1256	if (ret) {
   1257		pm_runtime_disable(dev);
   1258		return dev_err_probe(dev, ret, "failed to register master\n");
   1259	}
   1260
   1261	return 0;
   1262}
   1263
   1264static int mtk_spi_remove(struct platform_device *pdev)
   1265{
   1266	struct spi_master *master = platform_get_drvdata(pdev);
   1267	struct mtk_spi *mdata = spi_master_get_devdata(master);
   1268
   1269	pm_runtime_disable(&pdev->dev);
   1270
   1271	mtk_spi_reset(mdata);
   1272
   1273	if (mdata->dev_comp->no_need_unprepare) {
   1274		clk_unprepare(mdata->spi_clk);
   1275		clk_unprepare(mdata->spi_hclk);
   1276	}
   1277
   1278	return 0;
   1279}
   1280
   1281#ifdef CONFIG_PM_SLEEP
   1282static int mtk_spi_suspend(struct device *dev)
   1283{
   1284	int ret;
   1285	struct spi_master *master = dev_get_drvdata(dev);
   1286	struct mtk_spi *mdata = spi_master_get_devdata(master);
   1287
   1288	ret = spi_master_suspend(master);
   1289	if (ret)
   1290		return ret;
   1291
   1292	if (!pm_runtime_suspended(dev)) {
   1293		clk_disable_unprepare(mdata->spi_clk);
   1294		clk_disable_unprepare(mdata->spi_hclk);
   1295	}
   1296
   1297	return ret;
   1298}
   1299
   1300static int mtk_spi_resume(struct device *dev)
   1301{
   1302	int ret;
   1303	struct spi_master *master = dev_get_drvdata(dev);
   1304	struct mtk_spi *mdata = spi_master_get_devdata(master);
   1305
   1306	if (!pm_runtime_suspended(dev)) {
   1307		ret = clk_prepare_enable(mdata->spi_clk);
   1308		if (ret < 0) {
   1309			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
   1310			return ret;
   1311		}
   1312
   1313		ret = clk_prepare_enable(mdata->spi_hclk);
   1314		if (ret < 0) {
   1315			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
   1316			clk_disable_unprepare(mdata->spi_clk);
   1317			return ret;
   1318		}
   1319	}
   1320
   1321	ret = spi_master_resume(master);
   1322	if (ret < 0) {
   1323		clk_disable_unprepare(mdata->spi_clk);
   1324		clk_disable_unprepare(mdata->spi_hclk);
   1325	}
   1326
   1327	return ret;
   1328}
   1329#endif /* CONFIG_PM_SLEEP */
   1330
   1331#ifdef CONFIG_PM
   1332static int mtk_spi_runtime_suspend(struct device *dev)
   1333{
   1334	struct spi_master *master = dev_get_drvdata(dev);
   1335	struct mtk_spi *mdata = spi_master_get_devdata(master);
   1336
   1337	if (mdata->dev_comp->no_need_unprepare) {
   1338		clk_disable(mdata->spi_clk);
   1339		clk_disable(mdata->spi_hclk);
   1340	} else {
   1341		clk_disable_unprepare(mdata->spi_clk);
   1342		clk_disable_unprepare(mdata->spi_hclk);
   1343	}
   1344
   1345	return 0;
   1346}
   1347
   1348static int mtk_spi_runtime_resume(struct device *dev)
   1349{
   1350	struct spi_master *master = dev_get_drvdata(dev);
   1351	struct mtk_spi *mdata = spi_master_get_devdata(master);
   1352	int ret;
   1353
   1354	if (mdata->dev_comp->no_need_unprepare) {
   1355		ret = clk_enable(mdata->spi_clk);
   1356		if (ret < 0) {
   1357			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
   1358			return ret;
   1359		}
   1360		ret = clk_enable(mdata->spi_hclk);
   1361		if (ret < 0) {
   1362			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
   1363			clk_disable(mdata->spi_clk);
   1364			return ret;
   1365		}
   1366	} else {
   1367		ret = clk_prepare_enable(mdata->spi_clk);
   1368		if (ret < 0) {
   1369			dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
   1370			return ret;
   1371		}
   1372
   1373		ret = clk_prepare_enable(mdata->spi_hclk);
   1374		if (ret < 0) {
   1375			dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
   1376			clk_disable_unprepare(mdata->spi_clk);
   1377			return ret;
   1378		}
   1379	}
   1380
   1381	return 0;
   1382}
   1383#endif /* CONFIG_PM */
   1384
   1385static const struct dev_pm_ops mtk_spi_pm = {
   1386	SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
   1387	SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
   1388			   mtk_spi_runtime_resume, NULL)
   1389};
   1390
   1391static struct platform_driver mtk_spi_driver = {
   1392	.driver = {
   1393		.name = "mtk-spi",
   1394		.pm	= &mtk_spi_pm,
   1395		.of_match_table = mtk_spi_of_match,
   1396	},
   1397	.probe = mtk_spi_probe,
   1398	.remove = mtk_spi_remove,
   1399};
   1400
   1401module_platform_driver(mtk_spi_driver);
   1402
   1403MODULE_DESCRIPTION("MTK SPI Controller driver");
   1404MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
   1405MODULE_LICENSE("GPL v2");
   1406MODULE_ALIAS("platform:mtk-spi");