cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rk3288_crypto_ahash.c (11176B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Crypto acceleration support for Rockchip RK3288
      4 *
      5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
      6 *
      7 * Author: Zain Wang <zain.wang@rock-chips.com>
      8 *
      9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
     10 */
     11#include <linux/device.h>
     12#include "rk3288_crypto.h"
     13
     14/*
     15 * IC can not process zero message hash,
     16 * so we put the fixed hash out when met zero message.
     17 */
     18
     19static int zero_message_process(struct ahash_request *req)
     20{
     21	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
     22	int rk_digest_size = crypto_ahash_digestsize(tfm);
     23
     24	switch (rk_digest_size) {
     25	case SHA1_DIGEST_SIZE:
     26		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
     27		break;
     28	case SHA256_DIGEST_SIZE:
     29		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
     30		break;
     31	case MD5_DIGEST_SIZE:
     32		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
     33		break;
     34	default:
     35		return -EINVAL;
     36	}
     37
     38	return 0;
     39}
     40
     41static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
     42{
     43	if (base->complete)
     44		base->complete(base, err);
     45}
     46
     47static void rk_ahash_reg_init(struct rk_crypto_info *dev)
     48{
     49	struct ahash_request *req = ahash_request_cast(dev->async_req);
     50	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
     51	int reg_status;
     52
     53	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
     54		     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
     55	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
     56
     57	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
     58	reg_status &= (~RK_CRYPTO_HASH_FLUSH);
     59	reg_status |= _SBF(0xffff, 16);
     60	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
     61
     62	memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
     63
     64	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
     65					    RK_CRYPTO_HRDMA_DONE_ENA);
     66
     67	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
     68					    RK_CRYPTO_HRDMA_DONE_INT);
     69
     70	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
     71					       RK_CRYPTO_HASH_SWAP_DO);
     72
     73	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
     74					  RK_CRYPTO_BYTESWAP_BRFIFO |
     75					  RK_CRYPTO_BYTESWAP_BTFIFO);
     76
     77	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, dev->total);
     78}
     79
     80static int rk_ahash_init(struct ahash_request *req)
     81{
     82	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
     83	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
     84	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
     85
     86	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
     87	rctx->fallback_req.base.flags = req->base.flags &
     88					CRYPTO_TFM_REQ_MAY_SLEEP;
     89
     90	return crypto_ahash_init(&rctx->fallback_req);
     91}
     92
     93static int rk_ahash_update(struct ahash_request *req)
     94{
     95	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
     96	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
     97	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
     98
     99	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
    100	rctx->fallback_req.base.flags = req->base.flags &
    101					CRYPTO_TFM_REQ_MAY_SLEEP;
    102	rctx->fallback_req.nbytes = req->nbytes;
    103	rctx->fallback_req.src = req->src;
    104
    105	return crypto_ahash_update(&rctx->fallback_req);
    106}
    107
    108static int rk_ahash_final(struct ahash_request *req)
    109{
    110	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
    111	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    112	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
    113
    114	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
    115	rctx->fallback_req.base.flags = req->base.flags &
    116					CRYPTO_TFM_REQ_MAY_SLEEP;
    117	rctx->fallback_req.result = req->result;
    118
    119	return crypto_ahash_final(&rctx->fallback_req);
    120}
    121
    122static int rk_ahash_finup(struct ahash_request *req)
    123{
    124	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
    125	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    126	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
    127
    128	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
    129	rctx->fallback_req.base.flags = req->base.flags &
    130					CRYPTO_TFM_REQ_MAY_SLEEP;
    131
    132	rctx->fallback_req.nbytes = req->nbytes;
    133	rctx->fallback_req.src = req->src;
    134	rctx->fallback_req.result = req->result;
    135
    136	return crypto_ahash_finup(&rctx->fallback_req);
    137}
    138
    139static int rk_ahash_import(struct ahash_request *req, const void *in)
    140{
    141	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
    142	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    143	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
    144
    145	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
    146	rctx->fallback_req.base.flags = req->base.flags &
    147					CRYPTO_TFM_REQ_MAY_SLEEP;
    148
    149	return crypto_ahash_import(&rctx->fallback_req, in);
    150}
    151
    152static int rk_ahash_export(struct ahash_request *req, void *out)
    153{
    154	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
    155	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    156	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
    157
    158	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
    159	rctx->fallback_req.base.flags = req->base.flags &
    160					CRYPTO_TFM_REQ_MAY_SLEEP;
    161
    162	return crypto_ahash_export(&rctx->fallback_req, out);
    163}
    164
    165static int rk_ahash_digest(struct ahash_request *req)
    166{
    167	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
    168	struct rk_crypto_info *dev = tctx->dev;
    169
    170	if (!req->nbytes)
    171		return zero_message_process(req);
    172	else
    173		return dev->enqueue(dev, &req->base);
    174}
    175
    176static void crypto_ahash_dma_start(struct rk_crypto_info *dev)
    177{
    178	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, dev->addr_in);
    179	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, (dev->count + 3) / 4);
    180	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
    181					  (RK_CRYPTO_HASH_START << 16));
    182}
    183
    184static int rk_ahash_set_data_start(struct rk_crypto_info *dev)
    185{
    186	int err;
    187
    188	err = dev->load_data(dev, dev->sg_src, NULL);
    189	if (!err)
    190		crypto_ahash_dma_start(dev);
    191	return err;
    192}
    193
    194static int rk_ahash_start(struct rk_crypto_info *dev)
    195{
    196	struct ahash_request *req = ahash_request_cast(dev->async_req);
    197	struct crypto_ahash *tfm;
    198	struct rk_ahash_rctx *rctx;
    199
    200	dev->total = req->nbytes;
    201	dev->left_bytes = req->nbytes;
    202	dev->aligned = 0;
    203	dev->align_size = 4;
    204	dev->sg_dst = NULL;
    205	dev->sg_src = req->src;
    206	dev->first = req->src;
    207	dev->src_nents = sg_nents(req->src);
    208	rctx = ahash_request_ctx(req);
    209	rctx->mode = 0;
    210
    211	tfm = crypto_ahash_reqtfm(req);
    212	switch (crypto_ahash_digestsize(tfm)) {
    213	case SHA1_DIGEST_SIZE:
    214		rctx->mode = RK_CRYPTO_HASH_SHA1;
    215		break;
    216	case SHA256_DIGEST_SIZE:
    217		rctx->mode = RK_CRYPTO_HASH_SHA256;
    218		break;
    219	case MD5_DIGEST_SIZE:
    220		rctx->mode = RK_CRYPTO_HASH_MD5;
    221		break;
    222	default:
    223		return -EINVAL;
    224	}
    225
    226	rk_ahash_reg_init(dev);
    227	return rk_ahash_set_data_start(dev);
    228}
    229
    230static int rk_ahash_crypto_rx(struct rk_crypto_info *dev)
    231{
    232	int err = 0;
    233	struct ahash_request *req = ahash_request_cast(dev->async_req);
    234	struct crypto_ahash *tfm;
    235
    236	dev->unload_data(dev);
    237	if (dev->left_bytes) {
    238		if (dev->aligned) {
    239			if (sg_is_last(dev->sg_src)) {
    240				dev_warn(dev->dev, "[%s:%d], Lack of data\n",
    241					 __func__, __LINE__);
    242				err = -ENOMEM;
    243				goto out_rx;
    244			}
    245			dev->sg_src = sg_next(dev->sg_src);
    246		}
    247		err = rk_ahash_set_data_start(dev);
    248	} else {
    249		/*
    250		 * it will take some time to process date after last dma
    251		 * transmission.
    252		 *
    253		 * waiting time is relative with the last date len,
    254		 * so cannot set a fixed time here.
    255		 * 10us makes system not call here frequently wasting
    256		 * efficiency, and make it response quickly when dma
    257		 * complete.
    258		 */
    259		while (!CRYPTO_READ(dev, RK_CRYPTO_HASH_STS))
    260			udelay(10);
    261
    262		tfm = crypto_ahash_reqtfm(req);
    263		memcpy_fromio(req->result, dev->reg + RK_CRYPTO_HASH_DOUT_0,
    264			      crypto_ahash_digestsize(tfm));
    265		dev->complete(dev->async_req, 0);
    266		tasklet_schedule(&dev->queue_task);
    267	}
    268
    269out_rx:
    270	return err;
    271}
    272
    273static int rk_cra_hash_init(struct crypto_tfm *tfm)
    274{
    275	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
    276	struct rk_crypto_tmp *algt;
    277	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
    278
    279	const char *alg_name = crypto_tfm_alg_name(tfm);
    280
    281	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
    282
    283	tctx->dev = algt->dev;
    284	tctx->dev->addr_vir = (void *)__get_free_page(GFP_KERNEL);
    285	if (!tctx->dev->addr_vir) {
    286		dev_err(tctx->dev->dev, "failed to kmalloc for addr_vir\n");
    287		return -ENOMEM;
    288	}
    289	tctx->dev->start = rk_ahash_start;
    290	tctx->dev->update = rk_ahash_crypto_rx;
    291	tctx->dev->complete = rk_ahash_crypto_complete;
    292
    293	/* for fallback */
    294	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
    295					       CRYPTO_ALG_NEED_FALLBACK);
    296	if (IS_ERR(tctx->fallback_tfm)) {
    297		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
    298		return PTR_ERR(tctx->fallback_tfm);
    299	}
    300	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
    301				 sizeof(struct rk_ahash_rctx) +
    302				 crypto_ahash_reqsize(tctx->fallback_tfm));
    303
    304	return tctx->dev->enable_clk(tctx->dev);
    305}
    306
    307static void rk_cra_hash_exit(struct crypto_tfm *tfm)
    308{
    309	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
    310
    311	free_page((unsigned long)tctx->dev->addr_vir);
    312	return tctx->dev->disable_clk(tctx->dev);
    313}
    314
    315struct rk_crypto_tmp rk_ahash_sha1 = {
    316	.type = ALG_TYPE_HASH,
    317	.alg.hash = {
    318		.init = rk_ahash_init,
    319		.update = rk_ahash_update,
    320		.final = rk_ahash_final,
    321		.finup = rk_ahash_finup,
    322		.export = rk_ahash_export,
    323		.import = rk_ahash_import,
    324		.digest = rk_ahash_digest,
    325		.halg = {
    326			 .digestsize = SHA1_DIGEST_SIZE,
    327			 .statesize = sizeof(struct sha1_state),
    328			 .base = {
    329				  .cra_name = "sha1",
    330				  .cra_driver_name = "rk-sha1",
    331				  .cra_priority = 300,
    332				  .cra_flags = CRYPTO_ALG_ASYNC |
    333					       CRYPTO_ALG_NEED_FALLBACK,
    334				  .cra_blocksize = SHA1_BLOCK_SIZE,
    335				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
    336				  .cra_alignmask = 3,
    337				  .cra_init = rk_cra_hash_init,
    338				  .cra_exit = rk_cra_hash_exit,
    339				  .cra_module = THIS_MODULE,
    340				  }
    341			 }
    342	}
    343};
    344
    345struct rk_crypto_tmp rk_ahash_sha256 = {
    346	.type = ALG_TYPE_HASH,
    347	.alg.hash = {
    348		.init = rk_ahash_init,
    349		.update = rk_ahash_update,
    350		.final = rk_ahash_final,
    351		.finup = rk_ahash_finup,
    352		.export = rk_ahash_export,
    353		.import = rk_ahash_import,
    354		.digest = rk_ahash_digest,
    355		.halg = {
    356			 .digestsize = SHA256_DIGEST_SIZE,
    357			 .statesize = sizeof(struct sha256_state),
    358			 .base = {
    359				  .cra_name = "sha256",
    360				  .cra_driver_name = "rk-sha256",
    361				  .cra_priority = 300,
    362				  .cra_flags = CRYPTO_ALG_ASYNC |
    363					       CRYPTO_ALG_NEED_FALLBACK,
    364				  .cra_blocksize = SHA256_BLOCK_SIZE,
    365				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
    366				  .cra_alignmask = 3,
    367				  .cra_init = rk_cra_hash_init,
    368				  .cra_exit = rk_cra_hash_exit,
    369				  .cra_module = THIS_MODULE,
    370				  }
    371			 }
    372	}
    373};
    374
    375struct rk_crypto_tmp rk_ahash_md5 = {
    376	.type = ALG_TYPE_HASH,
    377	.alg.hash = {
    378		.init = rk_ahash_init,
    379		.update = rk_ahash_update,
    380		.final = rk_ahash_final,
    381		.finup = rk_ahash_finup,
    382		.export = rk_ahash_export,
    383		.import = rk_ahash_import,
    384		.digest = rk_ahash_digest,
    385		.halg = {
    386			 .digestsize = MD5_DIGEST_SIZE,
    387			 .statesize = sizeof(struct md5_state),
    388			 .base = {
    389				  .cra_name = "md5",
    390				  .cra_driver_name = "rk-md5",
    391				  .cra_priority = 300,
    392				  .cra_flags = CRYPTO_ALG_ASYNC |
    393					       CRYPTO_ALG_NEED_FALLBACK,
    394				  .cra_blocksize = SHA1_BLOCK_SIZE,
    395				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
    396				  .cra_alignmask = 3,
    397				  .cra_init = rk_cra_hash_init,
    398				  .cra_exit = rk_cra_hash_exit,
    399				  .cra_module = THIS_MODULE,
    400				  }
    401			}
    402	}
    403};