cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sun8i-ce-hash.c (13376B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * sun8i-ce-hash.c - hardware cryptographic offloader for
      4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
      5 *
      6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
      7 *
      8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
      9 *
     10 * You could find the datasheet in Documentation/arm/sunxi.rst
     11 */
     12#include <linux/bottom_half.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/pm_runtime.h>
     15#include <linux/scatterlist.h>
     16#include <crypto/internal/hash.h>
     17#include <crypto/sha1.h>
     18#include <crypto/sha2.h>
     19#include <crypto/md5.h>
     20#include "sun8i-ce.h"
     21
     22int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
     23{
     24	struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
     25	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
     26	struct sun8i_ce_alg_template *algt;
     27	int err;
     28
     29	memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
     30
     31	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
     32	op->ce = algt->ce;
     33
     34	op->enginectx.op.do_one_request = sun8i_ce_hash_run;
     35	op->enginectx.op.prepare_request = NULL;
     36	op->enginectx.op.unprepare_request = NULL;
     37
     38	/* FALLBACK */
     39	op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
     40					      CRYPTO_ALG_NEED_FALLBACK);
     41	if (IS_ERR(op->fallback_tfm)) {
     42		dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
     43		return PTR_ERR(op->fallback_tfm);
     44	}
     45
     46	if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
     47		algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
     48
     49	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
     50				 sizeof(struct sun8i_ce_hash_reqctx) +
     51				 crypto_ahash_reqsize(op->fallback_tfm));
     52
     53	memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
     54	       CRYPTO_MAX_ALG_NAME);
     55
     56	err = pm_runtime_get_sync(op->ce->dev);
     57	if (err < 0)
     58		goto error_pm;
     59	return 0;
     60error_pm:
     61	pm_runtime_put_noidle(op->ce->dev);
     62	crypto_free_ahash(op->fallback_tfm);
     63	return err;
     64}
     65
     66void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
     67{
     68	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
     69
     70	crypto_free_ahash(tfmctx->fallback_tfm);
     71	pm_runtime_put_sync_suspend(tfmctx->ce->dev);
     72}
     73
     74int sun8i_ce_hash_init(struct ahash_request *areq)
     75{
     76	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
     77	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
     78	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
     79
     80	memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
     81
     82	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
     83	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
     84
     85	return crypto_ahash_init(&rctx->fallback_req);
     86}
     87
     88int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
     89{
     90	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
     91	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
     92	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
     93
     94	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
     95	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
     96
     97	return crypto_ahash_export(&rctx->fallback_req, out);
     98}
     99
    100int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
    101{
    102	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    103	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    104	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
    105
    106	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
    107	rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
    108
    109	return crypto_ahash_import(&rctx->fallback_req, in);
    110}
    111
    112int sun8i_ce_hash_final(struct ahash_request *areq)
    113{
    114	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    115	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    116	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
    117#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    118	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    119	struct sun8i_ce_alg_template *algt;
    120#endif
    121
    122	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
    123	rctx->fallback_req.base.flags = areq->base.flags &
    124					CRYPTO_TFM_REQ_MAY_SLEEP;
    125	rctx->fallback_req.result = areq->result;
    126
    127#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    128	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    129	algt->stat_fb++;
    130#endif
    131
    132	return crypto_ahash_final(&rctx->fallback_req);
    133}
    134
    135int sun8i_ce_hash_update(struct ahash_request *areq)
    136{
    137	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    138	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    139	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
    140
    141	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
    142	rctx->fallback_req.base.flags = areq->base.flags &
    143					CRYPTO_TFM_REQ_MAY_SLEEP;
    144	rctx->fallback_req.nbytes = areq->nbytes;
    145	rctx->fallback_req.src = areq->src;
    146
    147	return crypto_ahash_update(&rctx->fallback_req);
    148}
    149
    150int sun8i_ce_hash_finup(struct ahash_request *areq)
    151{
    152	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    153	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    154	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
    155#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    156	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    157	struct sun8i_ce_alg_template *algt;
    158#endif
    159
    160	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
    161	rctx->fallback_req.base.flags = areq->base.flags &
    162					CRYPTO_TFM_REQ_MAY_SLEEP;
    163
    164	rctx->fallback_req.nbytes = areq->nbytes;
    165	rctx->fallback_req.src = areq->src;
    166	rctx->fallback_req.result = areq->result;
    167#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    168	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    169	algt->stat_fb++;
    170#endif
    171
    172	return crypto_ahash_finup(&rctx->fallback_req);
    173}
    174
    175static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
    176{
    177	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    178	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    179	struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
    180#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    181	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    182	struct sun8i_ce_alg_template *algt;
    183#endif
    184
    185	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
    186	rctx->fallback_req.base.flags = areq->base.flags &
    187					CRYPTO_TFM_REQ_MAY_SLEEP;
    188
    189	rctx->fallback_req.nbytes = areq->nbytes;
    190	rctx->fallback_req.src = areq->src;
    191	rctx->fallback_req.result = areq->result;
    192#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    193	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    194	algt->stat_fb++;
    195#endif
    196
    197	return crypto_ahash_digest(&rctx->fallback_req);
    198}
    199
    200static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
    201{
    202	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    203	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    204	struct sun8i_ce_alg_template *algt;
    205	struct scatterlist *sg;
    206
    207	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    208
    209	if (areq->nbytes == 0) {
    210		algt->stat_fb_len0++;
    211		return true;
    212	}
    213	/* we need to reserve one SG for padding one */
    214	if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) {
    215		algt->stat_fb_maxsg++;
    216		return true;
    217	}
    218	sg = areq->src;
    219	while (sg) {
    220		if (sg->length % 4) {
    221			algt->stat_fb_srclen++;
    222			return true;
    223		}
    224		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
    225			algt->stat_fb_srcali++;
    226			return true;
    227		}
    228		sg = sg_next(sg);
    229	}
    230	return false;
    231}
    232
    233int sun8i_ce_hash_digest(struct ahash_request *areq)
    234{
    235	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    236	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    237	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    238	struct sun8i_ce_alg_template *algt;
    239	struct sun8i_ce_dev *ce;
    240	struct crypto_engine *engine;
    241	struct scatterlist *sg;
    242	int nr_sgs, e, i;
    243
    244	if (sun8i_ce_hash_need_fallback(areq))
    245		return sun8i_ce_hash_digest_fb(areq);
    246
    247	nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
    248	if (nr_sgs > MAX_SG - 1)
    249		return sun8i_ce_hash_digest_fb(areq);
    250
    251	for_each_sg(areq->src, sg, nr_sgs, i) {
    252		if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
    253			return sun8i_ce_hash_digest_fb(areq);
    254	}
    255
    256	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    257	ce = algt->ce;
    258
    259	e = sun8i_ce_get_engine_number(ce);
    260	rctx->flow = e;
    261	engine = ce->chanlist[e].engine;
    262
    263	return crypto_transfer_hash_request_to_engine(engine, areq);
    264}
    265
    266static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
    267{
    268	u64 fill, min_fill, j, k;
    269	__be64 *bebits;
    270	__le64 *lebits;
    271
    272	j = padi;
    273	buf[j++] = cpu_to_le32(0x80);
    274
    275	if (bs == 64) {
    276		fill = 64 - (byte_count % 64);
    277		min_fill = 2 * sizeof(u32) + sizeof(u32);
    278	} else {
    279		fill = 128 - (byte_count % 128);
    280		min_fill = 4 * sizeof(u32) + sizeof(u32);
    281	}
    282
    283	if (fill < min_fill)
    284		fill += bs;
    285
    286	k = j;
    287	j += (fill - min_fill) / sizeof(u32);
    288	if (j * 4 > bufsize) {
    289		pr_err("%s OVERFLOW %llu\n", __func__, j);
    290		return 0;
    291	}
    292	for (; k < j; k++)
    293		buf[k] = 0;
    294
    295	if (le) {
    296		/* MD5 */
    297		lebits = (__le64 *)&buf[j];
    298		*lebits = cpu_to_le64(byte_count << 3);
    299		j += 2;
    300	} else {
    301		if (bs == 64) {
    302			/* sha1 sha224 sha256 */
    303			bebits = (__be64 *)&buf[j];
    304			*bebits = cpu_to_be64(byte_count << 3);
    305			j += 2;
    306		} else {
    307			/* sha384 sha512*/
    308			bebits = (__be64 *)&buf[j];
    309			*bebits = cpu_to_be64(byte_count >> 61);
    310			j += 2;
    311			bebits = (__be64 *)&buf[j];
    312			*bebits = cpu_to_be64(byte_count << 3);
    313			j += 2;
    314		}
    315	}
    316	if (j * 4 > bufsize) {
    317		pr_err("%s OVERFLOW %llu\n", __func__, j);
    318		return 0;
    319	}
    320
    321	return j;
    322}
    323
    324int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
    325{
    326	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
    327	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
    328	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
    329	struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
    330	struct sun8i_ce_alg_template *algt;
    331	struct sun8i_ce_dev *ce;
    332	struct sun8i_ce_flow *chan;
    333	struct ce_task *cet;
    334	struct scatterlist *sg;
    335	int nr_sgs, flow, err;
    336	unsigned int len;
    337	u32 common;
    338	u64 byte_count;
    339	__le32 *bf;
    340	void *buf = NULL;
    341	int j, i, todo;
    342	void *result = NULL;
    343	u64 bs;
    344	int digestsize;
    345	dma_addr_t addr_res, addr_pad;
    346	int ns = sg_nents_for_len(areq->src, areq->nbytes);
    347
    348	algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
    349	ce = algt->ce;
    350
    351	bs = algt->alg.hash.halg.base.cra_blocksize;
    352	digestsize = algt->alg.hash.halg.digestsize;
    353	if (digestsize == SHA224_DIGEST_SIZE)
    354		digestsize = SHA256_DIGEST_SIZE;
    355	if (digestsize == SHA384_DIGEST_SIZE)
    356		digestsize = SHA512_DIGEST_SIZE;
    357
    358	/* the padding could be up to two block. */
    359	buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
    360	if (!buf) {
    361		err = -ENOMEM;
    362		goto theend;
    363	}
    364	bf = (__le32 *)buf;
    365
    366	result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
    367	if (!result) {
    368		err = -ENOMEM;
    369		goto theend;
    370	}
    371
    372	flow = rctx->flow;
    373	chan = &ce->chanlist[flow];
    374
    375#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
    376	algt->stat_req++;
    377#endif
    378	dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
    379
    380	cet = chan->tl;
    381	memset(cet, 0, sizeof(struct ce_task));
    382
    383	cet->t_id = cpu_to_le32(flow);
    384	common = ce->variant->alg_hash[algt->ce_algo_id];
    385	common |= CE_COMM_INT;
    386	cet->t_common_ctl = cpu_to_le32(common);
    387
    388	cet->t_sym_ctl = 0;
    389	cet->t_asym_ctl = 0;
    390
    391	nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
    392	if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
    393		dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
    394		err = -EINVAL;
    395		goto theend;
    396	}
    397
    398	len = areq->nbytes;
    399	for_each_sg(areq->src, sg, nr_sgs, i) {
    400		cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
    401		todo = min(len, sg_dma_len(sg));
    402		cet->t_src[i].len = cpu_to_le32(todo / 4);
    403		len -= todo;
    404	}
    405	if (len > 0) {
    406		dev_err(ce->dev, "remaining len %d\n", len);
    407		err = -EINVAL;
    408		goto theend;
    409	}
    410	addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
    411	cet->t_dst[0].addr = cpu_to_le32(addr_res);
    412	cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
    413	if (dma_mapping_error(ce->dev, addr_res)) {
    414		dev_err(ce->dev, "DMA map dest\n");
    415		err = -EINVAL;
    416		goto theend;
    417	}
    418
    419	byte_count = areq->nbytes;
    420	j = 0;
    421
    422	switch (algt->ce_algo_id) {
    423	case CE_ID_HASH_MD5:
    424		j = hash_pad(bf, 2 * bs, j, byte_count, true, bs);
    425		break;
    426	case CE_ID_HASH_SHA1:
    427	case CE_ID_HASH_SHA224:
    428	case CE_ID_HASH_SHA256:
    429		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
    430		break;
    431	case CE_ID_HASH_SHA384:
    432	case CE_ID_HASH_SHA512:
    433		j = hash_pad(bf, 2 * bs, j, byte_count, false, bs);
    434		break;
    435	}
    436	if (!j) {
    437		err = -EINVAL;
    438		goto theend;
    439	}
    440
    441	addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
    442	cet->t_src[i].addr = cpu_to_le32(addr_pad);
    443	cet->t_src[i].len = cpu_to_le32(j);
    444	if (dma_mapping_error(ce->dev, addr_pad)) {
    445		dev_err(ce->dev, "DMA error on padding SG\n");
    446		err = -EINVAL;
    447		goto theend;
    448	}
    449
    450	if (ce->variant->hash_t_dlen_in_bits)
    451		cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
    452	else
    453		cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
    454
    455	chan->timeout = areq->nbytes;
    456
    457	err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
    458
    459	dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
    460	dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
    461	dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
    462
    463
    464	memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
    465theend:
    466	kfree(buf);
    467	kfree(result);
    468	local_bh_disable();
    469	crypto_finalize_hash_request(engine, breq, err);
    470	local_bh_enable();
    471	return 0;
    472}