cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sl3516-ce-cipher.c (10605B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * sl3516-ce-cipher.c - hardware cryptographic offloader for Storlink SL3516 SoC
      4 *
      5 * Copyright (C) 2021 Corentin LABBE <clabbe@baylibre.com>
      6 *
      7 * This file adds support for AES cipher with 128,192,256 bits keysize in
      8 * ECB mode.
      9 */
     10
     11#include <linux/crypto.h>
     12#include <linux/dma-mapping.h>
     13#include <linux/delay.h>
     14#include <linux/io.h>
     15#include <linux/pm_runtime.h>
     16#include <crypto/scatterwalk.h>
     17#include <crypto/internal/skcipher.h>
     18#include "sl3516-ce.h"
     19
     20/* sl3516_ce_need_fallback - check if a request can be handled by the CE */
     21static bool sl3516_ce_need_fallback(struct skcipher_request *areq)
     22{
     23	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
     24	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
     25	struct sl3516_ce_dev *ce = op->ce;
     26	struct scatterlist *in_sg;
     27	struct scatterlist *out_sg;
     28	struct scatterlist *sg;
     29
     30	if (areq->cryptlen == 0 || areq->cryptlen % 16) {
     31		ce->fallback_mod16++;
     32		return true;
     33	}
     34
     35	/*
     36	 * check if we have enough descriptors for TX
     37	 * Note: TX need one control desc for each SG
     38	 */
     39	if (sg_nents(areq->src) > MAXDESC / 2) {
     40		ce->fallback_sg_count_tx++;
     41		return true;
     42	}
     43	/* check if we have enough descriptors for RX */
     44	if (sg_nents(areq->dst) > MAXDESC) {
     45		ce->fallback_sg_count_rx++;
     46		return true;
     47	}
     48
     49	sg = areq->src;
     50	while (sg) {
     51		if ((sg->length % 16) != 0) {
     52			ce->fallback_mod16++;
     53			return true;
     54		}
     55		if ((sg_dma_len(sg) % 16) != 0) {
     56			ce->fallback_mod16++;
     57			return true;
     58		}
     59		if (!IS_ALIGNED(sg->offset, 16)) {
     60			ce->fallback_align16++;
     61			return true;
     62		}
     63		sg = sg_next(sg);
     64	}
     65	sg = areq->dst;
     66	while (sg) {
     67		if ((sg->length % 16) != 0) {
     68			ce->fallback_mod16++;
     69			return true;
     70		}
     71		if ((sg_dma_len(sg) % 16) != 0) {
     72			ce->fallback_mod16++;
     73			return true;
     74		}
     75		if (!IS_ALIGNED(sg->offset, 16)) {
     76			ce->fallback_align16++;
     77			return true;
     78		}
     79		sg = sg_next(sg);
     80	}
     81
     82	/* need same numbers of SG (with same length) for source and destination */
     83	in_sg = areq->src;
     84	out_sg = areq->dst;
     85	while (in_sg && out_sg) {
     86		if (in_sg->length != out_sg->length) {
     87			ce->fallback_not_same_len++;
     88			return true;
     89		}
     90		in_sg = sg_next(in_sg);
     91		out_sg = sg_next(out_sg);
     92	}
     93	if (in_sg || out_sg)
     94		return true;
     95
     96	return false;
     97}
     98
     99static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
    100{
    101	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
    102	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
    103	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
    104	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
    105	struct sl3516_ce_alg_template *algt;
    106	int err;
    107
    108	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
    109	algt->stat_fb++;
    110
    111	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
    112	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
    113				      areq->base.complete, areq->base.data);
    114	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
    115				   areq->cryptlen, areq->iv);
    116	if (rctx->op_dir == CE_DECRYPTION)
    117		err = crypto_skcipher_decrypt(&rctx->fallback_req);
    118	else
    119		err = crypto_skcipher_encrypt(&rctx->fallback_req);
    120	return err;
    121}
    122
    123static int sl3516_ce_cipher(struct skcipher_request *areq)
    124{
    125	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
    126	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
    127	struct sl3516_ce_dev *ce = op->ce;
    128	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
    129	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
    130	struct sl3516_ce_alg_template *algt;
    131	struct scatterlist *sg;
    132	unsigned int todo, len;
    133	struct pkt_control_ecb *ecb;
    134	int nr_sgs = 0;
    135	int nr_sgd = 0;
    136	int err = 0;
    137	int i;
    138
    139	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
    140
    141	dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
    142		crypto_tfm_alg_name(areq->base.tfm),
    143		areq->cryptlen,
    144		rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
    145		op->keylen);
    146
    147	algt->stat_req++;
    148
    149	if (areq->src == areq->dst) {
    150		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
    151				    DMA_BIDIRECTIONAL);
    152		if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
    153			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
    154			err = -EINVAL;
    155			goto theend;
    156		}
    157		nr_sgd = nr_sgs;
    158	} else {
    159		nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
    160				    DMA_TO_DEVICE);
    161		if (nr_sgs <= 0 || nr_sgs > MAXDESC / 2) {
    162			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
    163			err = -EINVAL;
    164			goto theend;
    165		}
    166		nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
    167				    DMA_FROM_DEVICE);
    168		if (nr_sgd <= 0 || nr_sgd > MAXDESC) {
    169			dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
    170			err = -EINVAL;
    171			goto theend_sgs;
    172		}
    173	}
    174
    175	len = areq->cryptlen;
    176	i = 0;
    177	sg = areq->src;
    178	while (i < nr_sgs && sg && len) {
    179		if (sg_dma_len(sg) == 0)
    180			goto sgs_next;
    181		rctx->t_src[i].addr = sg_dma_address(sg);
    182		todo = min(len, sg_dma_len(sg));
    183		rctx->t_src[i].len = todo;
    184		dev_dbg(ce->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__,
    185			areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo);
    186		len -= todo;
    187		i++;
    188sgs_next:
    189		sg = sg_next(sg);
    190	}
    191	if (len > 0) {
    192		dev_err(ce->dev, "remaining len %d/%u nr_sgs=%d\n", len, areq->cryptlen, nr_sgs);
    193		err = -EINVAL;
    194		goto theend_sgs;
    195	}
    196
    197	len = areq->cryptlen;
    198	i = 0;
    199	sg = areq->dst;
    200	while (i < nr_sgd && sg && len) {
    201		if (sg_dma_len(sg) == 0)
    202			goto sgd_next;
    203		rctx->t_dst[i].addr = sg_dma_address(sg);
    204		todo = min(len, sg_dma_len(sg));
    205		rctx->t_dst[i].len = todo;
    206		dev_dbg(ce->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__,
    207			areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo);
    208		len -= todo;
    209		i++;
    210
    211sgd_next:
    212		sg = sg_next(sg);
    213	}
    214	if (len > 0) {
    215		dev_err(ce->dev, "remaining len %d\n", len);
    216		err = -EINVAL;
    217		goto theend_sgs;
    218	}
    219
    220	switch (algt->mode) {
    221	case ECB_AES:
    222		rctx->pctrllen = sizeof(struct pkt_control_ecb);
    223		ecb = (struct pkt_control_ecb *)ce->pctrl;
    224
    225		rctx->tqflag = TQ0_TYPE_CTRL;
    226		rctx->tqflag |= TQ1_CIPHER;
    227		ecb->control.op_mode = rctx->op_dir;
    228		ecb->control.cipher_algorithm = ECB_AES;
    229		ecb->cipher.header_len = 0;
    230		ecb->cipher.algorithm_len = areq->cryptlen;
    231		cpu_to_be32_array((__be32 *)ecb->key, (u32 *)op->key, op->keylen / 4);
    232		rctx->h = &ecb->cipher;
    233
    234		rctx->tqflag |= TQ4_KEY0;
    235		rctx->tqflag |= TQ5_KEY4;
    236		rctx->tqflag |= TQ6_KEY6;
    237		ecb->control.aesnk = op->keylen / 4;
    238		break;
    239	}
    240
    241	rctx->nr_sgs = nr_sgs;
    242	rctx->nr_sgd = nr_sgd;
    243	err = sl3516_ce_run_task(ce, rctx, crypto_tfm_alg_name(areq->base.tfm));
    244
    245theend_sgs:
    246	if (areq->src == areq->dst) {
    247		dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
    248			     DMA_BIDIRECTIONAL);
    249	} else {
    250		dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
    251			     DMA_TO_DEVICE);
    252		dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
    253			     DMA_FROM_DEVICE);
    254	}
    255
    256theend:
    257
    258	return err;
    259}
    260
    261static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
    262{
    263	int err;
    264	struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
    265
    266	err = sl3516_ce_cipher(breq);
    267	local_bh_disable();
    268	crypto_finalize_skcipher_request(engine, breq, err);
    269	local_bh_enable();
    270
    271	return 0;
    272}
    273
    274int sl3516_ce_skdecrypt(struct skcipher_request *areq)
    275{
    276	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
    277	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
    278	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
    279	struct crypto_engine *engine;
    280
    281	memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
    282	rctx->op_dir = CE_DECRYPTION;
    283
    284	if (sl3516_ce_need_fallback(areq))
    285		return sl3516_ce_cipher_fallback(areq);
    286
    287	engine = op->ce->engine;
    288
    289	return crypto_transfer_skcipher_request_to_engine(engine, areq);
    290}
    291
    292int sl3516_ce_skencrypt(struct skcipher_request *areq)
    293{
    294	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
    295	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
    296	struct sl3516_ce_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
    297	struct crypto_engine *engine;
    298
    299	memset(rctx, 0, sizeof(struct sl3516_ce_cipher_req_ctx));
    300	rctx->op_dir = CE_ENCRYPTION;
    301
    302	if (sl3516_ce_need_fallback(areq))
    303		return sl3516_ce_cipher_fallback(areq);
    304
    305	engine = op->ce->engine;
    306
    307	return crypto_transfer_skcipher_request_to_engine(engine, areq);
    308}
    309
    310int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
    311{
    312	struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
    313	struct sl3516_ce_alg_template *algt;
    314	const char *name = crypto_tfm_alg_name(tfm);
    315	struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
    316	struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
    317	int err;
    318
    319	memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
    320
    321	algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
    322	op->ce = algt->ce;
    323
    324	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
    325	if (IS_ERR(op->fallback_tfm)) {
    326		dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
    327			name, PTR_ERR(op->fallback_tfm));
    328		return PTR_ERR(op->fallback_tfm);
    329	}
    330
    331	sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
    332			 crypto_skcipher_reqsize(op->fallback_tfm);
    333
    334	dev_info(op->ce->dev, "Fallback for %s is %s\n",
    335		 crypto_tfm_alg_driver_name(&sktfm->base),
    336		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
    337
    338	op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
    339	op->enginectx.op.prepare_request = NULL;
    340	op->enginectx.op.unprepare_request = NULL;
    341
    342	err = pm_runtime_get_sync(op->ce->dev);
    343	if (err < 0)
    344		goto error_pm;
    345
    346	return 0;
    347error_pm:
    348	pm_runtime_put_noidle(op->ce->dev);
    349	crypto_free_skcipher(op->fallback_tfm);
    350	return err;
    351}
    352
    353void sl3516_ce_cipher_exit(struct crypto_tfm *tfm)
    354{
    355	struct sl3516_ce_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
    356
    357	kfree_sensitive(op->key);
    358	crypto_free_skcipher(op->fallback_tfm);
    359	pm_runtime_put_sync_suspend(op->ce->dev);
    360}
    361
    362int sl3516_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
    363			 unsigned int keylen)
    364{
    365	struct sl3516_ce_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
    366	struct sl3516_ce_dev *ce = op->ce;
    367
    368	switch (keylen) {
    369	case 128 / 8:
    370		break;
    371	case 192 / 8:
    372		break;
    373	case 256 / 8:
    374		break;
    375	default:
    376		dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
    377		return -EINVAL;
    378	}
    379	kfree_sensitive(op->key);
    380	op->keylen = keylen;
    381	op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
    382	if (!op->key)
    383		return -ENOMEM;
    384
    385	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
    386	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
    387
    388	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
    389}