cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

spi-ingenic.c (13977B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * SPI bus driver for the Ingenic SoCs
      4 * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
      5 * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
      6 * Copyright (c) 2022 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
      7 */
      8
      9#include <linux/clk.h>
     10#include <linux/delay.h>
     11#include <linux/dmaengine.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/iopoll.h>
     14#include <linux/module.h>
     15#include <linux/of_device.h>
     16#include <linux/platform_device.h>
     17#include <linux/regmap.h>
     18#include <linux/spi/spi.h>
     19
     20#define REG_SSIDR	0x0
     21#define REG_SSICR0	0x4
     22#define REG_SSICR1	0x8
     23#define REG_SSISR	0xc
     24#define REG_SSIGR	0x18
     25
     26#define REG_SSICR0_TENDIAN_LSB		BIT(19)
     27#define REG_SSICR0_RENDIAN_LSB		BIT(17)
     28#define REG_SSICR0_SSIE			BIT(15)
     29#define REG_SSICR0_LOOP			BIT(10)
     30#define REG_SSICR0_EACLRUN		BIT(7)
     31#define REG_SSICR0_FSEL			BIT(6)
     32#define REG_SSICR0_TFLUSH		BIT(2)
     33#define REG_SSICR0_RFLUSH		BIT(1)
     34
     35#define REG_SSICR1_FRMHL_MASK		(BIT(31) | BIT(30))
     36#define REG_SSICR1_FRMHL		BIT(30)
     37#define REG_SSICR1_LFST			BIT(25)
     38#define REG_SSICR1_UNFIN		BIT(23)
     39#define REG_SSICR1_PHA			BIT(1)
     40#define REG_SSICR1_POL			BIT(0)
     41
     42#define REG_SSISR_END			BIT(7)
     43#define REG_SSISR_BUSY			BIT(6)
     44#define REG_SSISR_TFF			BIT(5)
     45#define REG_SSISR_RFE			BIT(4)
     46#define REG_SSISR_RFHF			BIT(2)
     47#define REG_SSISR_UNDR			BIT(1)
     48#define REG_SSISR_OVER			BIT(0)
     49
     50#define SPI_INGENIC_FIFO_SIZE		128u
     51
     52struct jz_soc_info {
     53	u32 bits_per_word_mask;
     54	struct reg_field flen_field;
     55	bool has_trendian;
     56
     57	unsigned int max_speed_hz;
     58	unsigned int max_native_cs;
     59};
     60
     61struct ingenic_spi {
     62	const struct jz_soc_info *soc_info;
     63	struct clk *clk;
     64	struct resource *mem_res;
     65
     66	struct regmap *map;
     67	struct regmap_field *flen_field;
     68};
     69
     70static int spi_ingenic_wait(struct ingenic_spi *priv,
     71			    unsigned long mask,
     72			    bool condition)
     73{
     74	unsigned int val;
     75
     76	return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
     77					!!(val & mask) == condition,
     78					100, 10000);
     79}
     80
     81static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
     82{
     83	struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
     84
     85	if (disable) {
     86		regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
     87		regmap_clear_bits(priv->map, REG_SSISR,
     88				  REG_SSISR_UNDR | REG_SSISR_OVER);
     89
     90		spi_ingenic_wait(priv, REG_SSISR_END, true);
     91	} else {
     92		regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
     93	}
     94
     95	regmap_set_bits(priv->map, REG_SSICR0,
     96			REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
     97}
     98
     99static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
    100					 struct spi_device *spi,
    101					 struct spi_transfer *xfer)
    102{
    103	unsigned long clk_hz = clk_get_rate(priv->clk);
    104	u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
    105	    bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
    106
    107	cdiv = clk_hz / (speed_hz * 2);
    108	cdiv = clamp(cdiv, 1u, 0x100u) - 1;
    109
    110	regmap_write(priv->map, REG_SSIGR, cdiv);
    111
    112	regmap_field_write(priv->flen_field, bits_per_word - 2);
    113}
    114
    115static void spi_ingenic_finalize_transfer(void *controller)
    116{
    117	spi_finalize_current_transfer(controller);
    118}
    119
    120static struct dma_async_tx_descriptor *
    121spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
    122			struct sg_table *sg, enum dma_transfer_direction dir,
    123			unsigned int bits)
    124{
    125	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
    126	struct dma_slave_config cfg = {
    127		.direction = dir,
    128		.src_addr = priv->mem_res->start + REG_SSIDR,
    129		.dst_addr = priv->mem_res->start + REG_SSIDR,
    130	};
    131	struct dma_async_tx_descriptor *desc;
    132	dma_cookie_t cookie;
    133	int ret;
    134
    135	if (bits > 16) {
    136		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    137		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    138		cfg.src_maxburst = cfg.dst_maxburst = 4;
    139	} else if (bits > 8) {
    140		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
    141		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
    142		cfg.src_maxburst = cfg.dst_maxburst = 2;
    143	} else {
    144		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    145		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    146		cfg.src_maxburst = cfg.dst_maxburst = 1;
    147	}
    148
    149	ret = dmaengine_slave_config(chan, &cfg);
    150	if (ret)
    151		return ERR_PTR(ret);
    152
    153	desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
    154				       DMA_PREP_INTERRUPT);
    155	if (!desc)
    156		return ERR_PTR(-ENOMEM);
    157
    158	if (dir == DMA_DEV_TO_MEM) {
    159		desc->callback = spi_ingenic_finalize_transfer;
    160		desc->callback_param = ctlr;
    161	}
    162
    163	cookie = dmaengine_submit(desc);
    164
    165	ret = dma_submit_error(cookie);
    166	if (ret) {
    167		dmaengine_desc_free(desc);
    168		return ERR_PTR(ret);
    169	}
    170
    171	return desc;
    172}
    173
    174static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
    175			      struct spi_transfer *xfer, unsigned int bits)
    176{
    177	struct dma_async_tx_descriptor *rx_desc, *tx_desc;
    178
    179	rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
    180					  &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
    181	if (IS_ERR(rx_desc))
    182		return PTR_ERR(rx_desc);
    183
    184	tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
    185					  &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
    186	if (IS_ERR(tx_desc)) {
    187		dmaengine_terminate_async(ctlr->dma_rx);
    188		dmaengine_desc_free(rx_desc);
    189		return PTR_ERR(tx_desc);
    190	}
    191
    192	dma_async_issue_pending(ctlr->dma_rx);
    193	dma_async_issue_pending(ctlr->dma_tx);
    194
    195	return 1;
    196}
    197
    198#define SPI_INGENIC_TX(x)							\
    199static int spi_ingenic_tx##x(struct ingenic_spi *priv,				\
    200			     struct spi_transfer *xfer)				\
    201{										\
    202	unsigned int count = xfer->len / (x / 8);				\
    203	unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE);		\
    204	const u##x *tx_buf = xfer->tx_buf;					\
    205	u##x *rx_buf = xfer->rx_buf;						\
    206	unsigned int i, val;							\
    207	int err;								\
    208										\
    209	/* Fill up the TX fifo */						\
    210	for (i = 0; i < prefill; i++) {						\
    211		val = tx_buf ? tx_buf[i] : 0;					\
    212										\
    213		regmap_write(priv->map, REG_SSIDR, val);			\
    214	}									\
    215										\
    216	for (i = 0; i < count; i++) {						\
    217		err = spi_ingenic_wait(priv, REG_SSISR_RFE, false);		\
    218		if (err)							\
    219			return err;						\
    220										\
    221		regmap_read(priv->map, REG_SSIDR, &val);			\
    222		if (rx_buf)							\
    223			rx_buf[i] = val;					\
    224										\
    225		if (i < count - prefill) {					\
    226			val = tx_buf ? tx_buf[i + prefill] : 0;			\
    227										\
    228			regmap_write(priv->map, REG_SSIDR, val);		\
    229		}								\
    230	}									\
    231										\
    232	return 0;								\
    233}
    234SPI_INGENIC_TX(8)
    235SPI_INGENIC_TX(16)
    236SPI_INGENIC_TX(32)
    237#undef SPI_INGENIC_TX
    238
    239static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
    240				    struct spi_device *spi,
    241				    struct spi_transfer *xfer)
    242{
    243	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
    244	unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
    245	bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
    246
    247	spi_ingenic_prepare_transfer(priv, spi, xfer);
    248
    249	if (ctlr->cur_msg_mapped && can_dma)
    250		return spi_ingenic_dma_tx(ctlr, xfer, bits);
    251
    252	if (bits > 16)
    253		return spi_ingenic_tx32(priv, xfer);
    254
    255	if (bits > 8)
    256		return spi_ingenic_tx16(priv, xfer);
    257
    258	return spi_ingenic_tx8(priv, xfer);
    259}
    260
    261static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
    262				       struct spi_message *message)
    263{
    264	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
    265	struct spi_device *spi = message->spi;
    266	unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select;
    267	unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
    268	unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
    269	unsigned int ssicr0 = 0, ssicr1 = 0;
    270
    271	if (priv->soc_info->has_trendian) {
    272		ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
    273
    274		if (spi->mode & SPI_LSB_FIRST)
    275			ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
    276	} else {
    277		ssicr1_mask |= REG_SSICR1_LFST;
    278
    279		if (spi->mode & SPI_LSB_FIRST)
    280			ssicr1 |= REG_SSICR1_LFST;
    281	}
    282
    283	if (spi->mode & SPI_LOOP)
    284		ssicr0 |= REG_SSICR0_LOOP;
    285	if (spi->chip_select)
    286		ssicr0 |= REG_SSICR0_FSEL;
    287
    288	if (spi->mode & SPI_CPHA)
    289		ssicr1 |= REG_SSICR1_PHA;
    290	if (spi->mode & SPI_CPOL)
    291		ssicr1 |= REG_SSICR1_POL;
    292	if (spi->mode & SPI_CS_HIGH)
    293		ssicr1 |= cs;
    294
    295	regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
    296	regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
    297
    298	return 0;
    299}
    300
    301static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
    302{
    303	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
    304	int ret;
    305
    306	ret = clk_prepare_enable(priv->clk);
    307	if (ret)
    308		return ret;
    309
    310	regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
    311	regmap_write(priv->map, REG_SSICR1, 0);
    312	regmap_write(priv->map, REG_SSISR, 0);
    313	regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
    314
    315	return 0;
    316}
    317
    318static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
    319{
    320	struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
    321
    322	regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
    323
    324	clk_disable_unprepare(priv->clk);
    325
    326	return 0;
    327}
    328
    329static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
    330				struct spi_device *spi,
    331				struct spi_transfer *xfer)
    332{
    333	struct dma_slave_caps caps;
    334	int ret;
    335
    336	ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
    337	if (ret) {
    338		dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
    339		return false;
    340	}
    341
    342	return !caps.max_sg_burst ||
    343		xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
    344}
    345
    346static int spi_ingenic_request_dma(struct spi_controller *ctlr,
    347				   struct device *dev)
    348{
    349	ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
    350	if (!ctlr->dma_tx)
    351		return -ENODEV;
    352
    353	ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
    354
    355	if (!ctlr->dma_rx)
    356		return -ENODEV;
    357
    358	ctlr->can_dma = spi_ingenic_can_dma;
    359
    360	return 0;
    361}
    362
    363static void spi_ingenic_release_dma(void *data)
    364{
    365	struct spi_controller *ctlr = data;
    366
    367	if (ctlr->dma_tx)
    368		dma_release_channel(ctlr->dma_tx);
    369	if (ctlr->dma_rx)
    370		dma_release_channel(ctlr->dma_rx);
    371}
    372
    373static const struct regmap_config spi_ingenic_regmap_config = {
    374	.reg_bits = 32,
    375	.val_bits = 32,
    376	.reg_stride = 4,
    377	.max_register = REG_SSIGR,
    378};
    379
    380static int spi_ingenic_probe(struct platform_device *pdev)
    381{
    382	const struct jz_soc_info *pdata;
    383	struct device *dev = &pdev->dev;
    384	struct spi_controller *ctlr;
    385	struct ingenic_spi *priv;
    386	void __iomem *base;
    387	int num_cs, ret;
    388
    389	pdata = of_device_get_match_data(dev);
    390	if (!pdata) {
    391		dev_err(dev, "Missing platform data.\n");
    392		return -EINVAL;
    393	}
    394
    395	ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
    396	if (!ctlr) {
    397		dev_err(dev, "Unable to allocate SPI controller.\n");
    398		return -ENOMEM;
    399	}
    400
    401	priv = spi_controller_get_devdata(ctlr);
    402	priv->soc_info = pdata;
    403
    404	priv->clk = devm_clk_get(dev, NULL);
    405	if (IS_ERR(priv->clk)) {
    406		return dev_err_probe(dev, PTR_ERR(priv->clk),
    407				     "Unable to get clock.\n");
    408	}
    409
    410	base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
    411	if (IS_ERR(base))
    412		return PTR_ERR(base);
    413
    414	priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
    415	if (IS_ERR(priv->map))
    416		return PTR_ERR(priv->map);
    417
    418	priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
    419						   pdata->flen_field);
    420	if (IS_ERR(priv->flen_field))
    421		return PTR_ERR(priv->flen_field);
    422
    423	if (device_property_read_u32(dev, "num-cs", &num_cs))
    424		num_cs = pdata->max_native_cs;
    425
    426	platform_set_drvdata(pdev, ctlr);
    427
    428	ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
    429	ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
    430	ctlr->prepare_message = spi_ingenic_prepare_message;
    431	ctlr->set_cs = spi_ingenic_set_cs;
    432	ctlr->transfer_one = spi_ingenic_transfer_one;
    433	ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
    434	ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
    435	ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
    436	ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
    437	ctlr->min_speed_hz = 7200;
    438	ctlr->max_speed_hz = pdata->max_speed_hz;
    439	ctlr->use_gpio_descriptors = true;
    440	ctlr->max_native_cs = pdata->max_native_cs;
    441	ctlr->num_chipselect = num_cs;
    442	ctlr->dev.of_node = pdev->dev.of_node;
    443
    444	if (spi_ingenic_request_dma(ctlr, dev))
    445		dev_warn(dev, "DMA not available.\n");
    446
    447	ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
    448	if (ret) {
    449		dev_err(dev, "Unable to add action.\n");
    450		return ret;
    451	}
    452
    453	ret = devm_spi_register_controller(dev, ctlr);
    454	if (ret)
    455		dev_err(dev, "Unable to register SPI controller.\n");
    456
    457	return ret;
    458}
    459
    460static const struct jz_soc_info jz4750_soc_info = {
    461	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
    462	.flen_field = REG_FIELD(REG_SSICR1, 4, 7),
    463	.has_trendian = false,
    464
    465	.max_speed_hz = 54000000,
    466	.max_native_cs = 2,
    467};
    468
    469static const struct jz_soc_info jz4780_soc_info = {
    470	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
    471	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
    472	.has_trendian = true,
    473
    474	.max_speed_hz = 54000000,
    475	.max_native_cs = 2,
    476};
    477
    478static const struct jz_soc_info x1000_soc_info = {
    479	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
    480	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
    481	.has_trendian = true,
    482
    483	.max_speed_hz = 50000000,
    484	.max_native_cs = 2,
    485};
    486
    487static const struct jz_soc_info x2000_soc_info = {
    488	.bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
    489	.flen_field = REG_FIELD(REG_SSICR1, 3, 7),
    490	.has_trendian = true,
    491
    492	.max_speed_hz = 50000000,
    493	.max_native_cs = 1,
    494};
    495
    496static const struct of_device_id spi_ingenic_of_match[] = {
    497	{ .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
    498	{ .compatible = "ingenic,jz4775-spi", .data = &jz4780_soc_info },
    499	{ .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
    500	{ .compatible = "ingenic,x1000-spi", .data = &x1000_soc_info },
    501	{ .compatible = "ingenic,x2000-spi", .data = &x2000_soc_info },
    502	{}
    503};
    504MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
    505
    506static struct platform_driver spi_ingenic_driver = {
    507	.driver = {
    508		.name = "spi-ingenic",
    509		.of_match_table = spi_ingenic_of_match,
    510	},
    511	.probe = spi_ingenic_probe,
    512};
    513
    514module_platform_driver(spi_ingenic_driver);
    515MODULE_DESCRIPTION("SPI bus driver for the Ingenic SoCs");
    516MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
    517MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
    518MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
    519MODULE_LICENSE("GPL");