cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sl3516-ce-core.c (13480B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
      4 *
      5 * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
      6 *
      7 * Core file which registers crypto algorithms supported by the CryptoEngine
      8 */
      9#include <linux/clk.h>
     10#include <linux/crypto.h>
     11#include <linux/debugfs.h>
     12#include <linux/dev_printk.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/interrupt.h>
     15#include <linux/io.h>
     16#include <linux/irq.h>
     17#include <linux/module.h>
     18#include <linux/of.h>
     19#include <linux/of_device.h>
     20#include <linux/platform_device.h>
     21#include <linux/pm_runtime.h>
     22#include <linux/reset.h>
     23#include <crypto/internal/rng.h>
     24#include <crypto/internal/skcipher.h>
     25
     26#include "sl3516-ce.h"
     27
     28static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
     29{
     30	const size_t sz = sizeof(struct descriptor) * MAXDESC;
     31	int i;
     32
     33	ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
     34	if (!ce->tx)
     35		return -ENOMEM;
     36	ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
     37	if (!ce->rx)
     38		goto err_rx;
     39
     40	for (i = 0; i < MAXDESC; i++) {
     41		ce->tx[i].frame_ctrl.bits.own = CE_CPU;
     42		ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
     43	}
     44	ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
     45
     46	for (i = 0; i < MAXDESC; i++) {
     47		ce->rx[i].frame_ctrl.bits.own = CE_CPU;
     48		ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
     49	}
     50	ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
     51
     52	ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
     53				       &ce->dctrl, GFP_KERNEL);
     54	if (!ce->pctrl)
     55		goto err_pctrl;
     56
     57	return 0;
     58err_pctrl:
     59	dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
     60err_rx:
     61	dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
     62	return -ENOMEM;
     63}
     64
     65static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
     66{
     67	const size_t sz = sizeof(struct descriptor) * MAXDESC;
     68
     69	dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
     70	dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
     71	dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
     72			  ce->dctrl);
     73}
     74
     75static void start_dma_tx(struct sl3516_ce_dev *ce)
     76{
     77	u32 v;
     78
     79	v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
     80		TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
     81
     82	writel(v, ce->base + IPSEC_TXDMA_CTRL);
     83}
     84
     85static void start_dma_rx(struct sl3516_ce_dev *ce)
     86{
     87	u32 v;
     88
     89	v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
     90		RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
     91		RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
     92		RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
     93
     94	writel(v, ce->base + IPSEC_RXDMA_CTRL);
     95}
     96
     97static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
     98{
     99	struct descriptor *dd;
    100
    101	dd = &ce->tx[ce->ctx];
    102	ce->ctx++;
    103	if (ce->ctx >= MAXDESC)
    104		ce->ctx = 0;
    105	return dd;
    106}
    107
    108static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
    109{
    110	struct descriptor *rdd;
    111
    112	rdd = &ce->rx[ce->crx];
    113	ce->crx++;
    114	if (ce->crx >= MAXDESC)
    115		ce->crx = 0;
    116	return rdd;
    117}
    118
    119int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
    120		       const char *name)
    121{
    122	struct descriptor *dd, *rdd = NULL;
    123	u32 v;
    124	int i, err = 0;
    125
    126	ce->stat_req++;
    127
    128	reinit_completion(&ce->complete);
    129	ce->status = 0;
    130
    131	for (i = 0; i < rctx->nr_sgd; i++) {
    132		dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
    133			i, rctx->nr_sgd, rctx->t_dst[i].len);
    134		rdd = get_desc_rx(ce);
    135		rdd->buf_adr = rctx->t_dst[i].addr;
    136		rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
    137		rdd->frame_ctrl.bits.own = CE_DMA;
    138	}
    139	rdd->next_desc.bits.eofie = 1;
    140
    141	for (i = 0; i < rctx->nr_sgs; i++) {
    142		dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
    143			i, rctx->nr_sgs, rctx->t_src[i].len);
    144		rctx->h->algorithm_len = rctx->t_src[i].len;
    145
    146		dd = get_desc_tx(ce);
    147		dd->frame_ctrl.raw = 0;
    148		dd->flag_status.raw = 0;
    149		dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
    150		dd->buf_adr = ce->dctrl;
    151		dd->flag_status.tx_flag.tqflag = rctx->tqflag;
    152		dd->next_desc.bits.eofie = 0;
    153		dd->next_desc.bits.dec = 0;
    154		dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
    155		dd->frame_ctrl.bits.own = CE_DMA;
    156
    157		dd = get_desc_tx(ce);
    158		dd->frame_ctrl.raw = 0;
    159		dd->flag_status.raw = 0;
    160		dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
    161		dd->buf_adr = rctx->t_src[i].addr;
    162		dd->flag_status.tx_flag.tqflag = 0;
    163		dd->next_desc.bits.eofie = 0;
    164		dd->next_desc.bits.dec = 0;
    165		dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
    166		dd->frame_ctrl.bits.own = CE_DMA;
    167		start_dma_tx(ce);
    168		start_dma_rx(ce);
    169	}
    170	wait_for_completion_interruptible_timeout(&ce->complete,
    171						  msecs_to_jiffies(5000));
    172	if (ce->status == 0) {
    173		dev_err(ce->dev, "DMA timeout for %s\n", name);
    174		err = -EFAULT;
    175	}
    176	v = readl(ce->base + IPSEC_STATUS_REG);
    177	if (v & 0xFFF) {
    178		dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
    179		err = -EFAULT;
    180	}
    181
    182	return err;
    183}
    184
    185static irqreturn_t ce_irq_handler(int irq, void *data)
    186{
    187	struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
    188	u32 v;
    189
    190	ce->stat_irq++;
    191
    192	v = readl(ce->base + IPSEC_DMA_STATUS);
    193	writel(v, ce->base + IPSEC_DMA_STATUS);
    194
    195	if (v & DMA_STATUS_TS_DERR)
    196		dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
    197	if (v & DMA_STATUS_TS_PERR)
    198		dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
    199	if (v & DMA_STATUS_RS_DERR)
    200		dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
    201	if (v & DMA_STATUS_RS_PERR)
    202		dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
    203
    204	if (v & DMA_STATUS_TS_EOFI)
    205		ce->stat_irq_tx++;
    206	if (v & DMA_STATUS_RS_EOFI) {
    207		ce->status = 1;
    208		complete(&ce->complete);
    209		ce->stat_irq_rx++;
    210		return IRQ_HANDLED;
    211	}
    212
    213	return IRQ_HANDLED;
    214}
    215
    216static struct sl3516_ce_alg_template ce_algs[] = {
    217{
    218	.type = CRYPTO_ALG_TYPE_SKCIPHER,
    219	.mode = ECB_AES,
    220	.alg.skcipher = {
    221		.base = {
    222			.cra_name = "ecb(aes)",
    223			.cra_driver_name = "ecb-aes-sl3516",
    224			.cra_priority = 400,
    225			.cra_blocksize = AES_BLOCK_SIZE,
    226			.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
    227				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
    228			.cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
    229			.cra_module = THIS_MODULE,
    230			.cra_alignmask = 0xf,
    231			.cra_init = sl3516_ce_cipher_init,
    232			.cra_exit = sl3516_ce_cipher_exit,
    233		},
    234		.min_keysize	= AES_MIN_KEY_SIZE,
    235		.max_keysize	= AES_MAX_KEY_SIZE,
    236		.setkey		= sl3516_ce_aes_setkey,
    237		.encrypt	= sl3516_ce_skencrypt,
    238		.decrypt	= sl3516_ce_skdecrypt,
    239	}
    240},
    241};
    242
    243#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
    244static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
    245{
    246	struct sl3516_ce_dev *ce = seq->private;
    247	unsigned int i;
    248
    249	seq_printf(seq, "HWRNG %lu %lu\n",
    250		   ce->hwrng_stat_req, ce->hwrng_stat_bytes);
    251	seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
    252	seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
    253	seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
    254	seq_printf(seq, "nreq %lu\n", ce->stat_req);
    255	seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
    256	seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
    257	seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
    258	seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
    259	seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
    260
    261	for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
    262		if (!ce_algs[i].ce)
    263			continue;
    264		switch (ce_algs[i].type) {
    265		case CRYPTO_ALG_TYPE_SKCIPHER:
    266			seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
    267				   ce_algs[i].alg.skcipher.base.cra_driver_name,
    268				   ce_algs[i].alg.skcipher.base.cra_name,
    269				   ce_algs[i].stat_req, ce_algs[i].stat_fb);
    270			break;
    271		}
    272	}
    273	return 0;
    274}
    275
    276DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
    277#endif
    278
    279static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
    280{
    281	int err;
    282	unsigned int i;
    283
    284	for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
    285		ce_algs[i].ce = ce;
    286		switch (ce_algs[i].type) {
    287		case CRYPTO_ALG_TYPE_SKCIPHER:
    288			dev_info(ce->dev, "DEBUG: Register %s\n",
    289				 ce_algs[i].alg.skcipher.base.cra_name);
    290			err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
    291			if (err) {
    292				dev_err(ce->dev, "Fail to register %s\n",
    293					ce_algs[i].alg.skcipher.base.cra_name);
    294				ce_algs[i].ce = NULL;
    295				return err;
    296			}
    297			break;
    298		default:
    299			ce_algs[i].ce = NULL;
    300			dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
    301		}
    302	}
    303	return 0;
    304}
    305
    306static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
    307{
    308	unsigned int i;
    309
    310	for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
    311		if (!ce_algs[i].ce)
    312			continue;
    313		switch (ce_algs[i].type) {
    314		case CRYPTO_ALG_TYPE_SKCIPHER:
    315			dev_info(ce->dev, "Unregister %d %s\n", i,
    316				 ce_algs[i].alg.skcipher.base.cra_name);
    317			crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
    318			break;
    319		}
    320	}
    321}
    322
    323static void sl3516_ce_start(struct sl3516_ce_dev *ce)
    324{
    325	ce->ctx = 0;
    326	ce->crx = 0;
    327	writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
    328	writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
    329	writel(0, ce->base + IPSEC_DMA_STATUS);
    330}
    331
    332/*
    333 * Power management strategy: The device is suspended unless a TFM exists for
    334 * one of the algorithms proposed by this driver.
    335 */
    336static int sl3516_ce_pm_suspend(struct device *dev)
    337{
    338	struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
    339
    340	reset_control_assert(ce->reset);
    341	clk_disable_unprepare(ce->clks);
    342	return 0;
    343}
    344
    345static int sl3516_ce_pm_resume(struct device *dev)
    346{
    347	struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
    348	int err;
    349
    350	err = clk_prepare_enable(ce->clks);
    351	if (err) {
    352		dev_err(ce->dev, "Cannot prepare_enable\n");
    353		goto error;
    354	}
    355	err = reset_control_deassert(ce->reset);
    356	if (err) {
    357		dev_err(ce->dev, "Cannot deassert reset control\n");
    358		goto error;
    359	}
    360
    361	sl3516_ce_start(ce);
    362
    363	return 0;
    364error:
    365	sl3516_ce_pm_suspend(dev);
    366	return err;
    367}
    368
    369static const struct dev_pm_ops sl3516_ce_pm_ops = {
    370	SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
    371};
    372
    373static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
    374{
    375	int err;
    376
    377	pm_runtime_use_autosuspend(ce->dev);
    378	pm_runtime_set_autosuspend_delay(ce->dev, 2000);
    379
    380	err = pm_runtime_set_suspended(ce->dev);
    381	if (err)
    382		return err;
    383	pm_runtime_enable(ce->dev);
    384	return err;
    385}
    386
    387static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
    388{
    389	pm_runtime_disable(ce->dev);
    390}
    391
    392static int sl3516_ce_probe(struct platform_device *pdev)
    393{
    394	struct sl3516_ce_dev *ce;
    395	int err, irq;
    396	u32 v;
    397
    398	ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
    399	if (!ce)
    400		return -ENOMEM;
    401
    402	ce->dev = &pdev->dev;
    403	platform_set_drvdata(pdev, ce);
    404
    405	ce->base = devm_platform_ioremap_resource(pdev, 0);
    406	if (IS_ERR(ce->base))
    407		return PTR_ERR(ce->base);
    408
    409	irq = platform_get_irq(pdev, 0);
    410	if (irq < 0)
    411		return irq;
    412
    413	err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
    414	if (err) {
    415		dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
    416		return err;
    417	}
    418
    419	ce->reset = devm_reset_control_get(&pdev->dev, NULL);
    420	if (IS_ERR(ce->reset))
    421		return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
    422				     "No reset control found\n");
    423	ce->clks = devm_clk_get(ce->dev, NULL);
    424	if (IS_ERR(ce->clks)) {
    425		err = PTR_ERR(ce->clks);
    426		dev_err(ce->dev, "Cannot get clock err=%d\n", err);
    427		return err;
    428	}
    429
    430	err = sl3516_ce_desc_init(ce);
    431	if (err)
    432		return err;
    433
    434	err = sl3516_ce_pm_init(ce);
    435	if (err)
    436		goto error_pm;
    437
    438	init_completion(&ce->complete);
    439
    440	ce->engine = crypto_engine_alloc_init(ce->dev, true);
    441	if (!ce->engine) {
    442		dev_err(ce->dev, "Cannot allocate engine\n");
    443		err = -ENOMEM;
    444		goto error_engine;
    445	}
    446
    447	err = crypto_engine_start(ce->engine);
    448	if (err) {
    449		dev_err(ce->dev, "Cannot start engine\n");
    450		goto error_engine;
    451	}
    452
    453	err = sl3516_ce_register_algs(ce);
    454	if (err)
    455		goto error_alg;
    456
    457	err = sl3516_ce_rng_register(ce);
    458	if (err)
    459		goto error_rng;
    460
    461	err = pm_runtime_resume_and_get(ce->dev);
    462	if (err < 0)
    463		goto error_pmuse;
    464
    465	v = readl(ce->base + IPSEC_ID);
    466	dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
    467		 v & GENMASK(31, 4),
    468		 v & GENMASK(3, 0));
    469	v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
    470	dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
    471		 v & GENMASK(15, 4),
    472		 v & GENMASK(3, 0));
    473
    474	pm_runtime_put_sync(ce->dev);
    475
    476#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
    477	/* Ignore error of debugfs */
    478	ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
    479	ce->dbgfs_stats = debugfs_create_file("stats", 0444,
    480					      ce->dbgfs_dir, ce,
    481					      &sl3516_ce_debugfs_fops);
    482#endif
    483
    484	return 0;
    485error_pmuse:
    486	sl3516_ce_rng_unregister(ce);
    487error_rng:
    488	sl3516_ce_unregister_algs(ce);
    489error_alg:
    490	crypto_engine_exit(ce->engine);
    491error_engine:
    492	sl3516_ce_pm_exit(ce);
    493error_pm:
    494	sl3516_ce_free_descs(ce);
    495	return err;
    496}
    497
    498static int sl3516_ce_remove(struct platform_device *pdev)
    499{
    500	struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
    501
    502	sl3516_ce_rng_unregister(ce);
    503	sl3516_ce_unregister_algs(ce);
    504	crypto_engine_exit(ce->engine);
    505	sl3516_ce_pm_exit(ce);
    506	sl3516_ce_free_descs(ce);
    507
    508#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
    509	debugfs_remove_recursive(ce->dbgfs_dir);
    510#endif
    511
    512	return 0;
    513}
    514
    515static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
    516	{ .compatible = "cortina,sl3516-crypto"},
    517	{}
    518};
    519MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
    520
    521static struct platform_driver sl3516_ce_driver = {
    522	.probe		 = sl3516_ce_probe,
    523	.remove		 = sl3516_ce_remove,
    524	.driver		 = {
    525		.name		= "sl3516-crypto",
    526		.pm		= &sl3516_ce_pm_ops,
    527		.of_match_table	= sl3516_ce_crypto_of_match_table,
    528	},
    529};
    530
    531module_platform_driver(sl3516_ce_driver);
    532
    533MODULE_DESCRIPTION("SL3516 cryptographic offloader");
    534MODULE_LICENSE("GPL");
    535MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");