cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ccp-crypto-aes.c (9043B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * AMD Cryptographic Coprocessor (CCP) AES crypto API support
      4 *
      5 * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
      6 *
      7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
      8 */
      9
     10#include <linux/module.h>
     11#include <linux/sched.h>
     12#include <linux/delay.h>
     13#include <linux/scatterlist.h>
     14#include <linux/crypto.h>
     15#include <crypto/algapi.h>
     16#include <crypto/aes.h>
     17#include <crypto/ctr.h>
     18#include <crypto/scatterwalk.h>
     19
     20#include "ccp-crypto.h"
     21
     22static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
     23{
     24	struct skcipher_request *req = skcipher_request_cast(async_req);
     25	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
     26	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
     27
     28	if (ret)
     29		return ret;
     30
     31	if (ctx->u.aes.mode != CCP_AES_MODE_ECB)
     32		memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
     33
     34	return 0;
     35}
     36
     37static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
     38			  unsigned int key_len)
     39{
     40	struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
     41	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
     42
     43	switch (key_len) {
     44	case AES_KEYSIZE_128:
     45		ctx->u.aes.type = CCP_AES_TYPE_128;
     46		break;
     47	case AES_KEYSIZE_192:
     48		ctx->u.aes.type = CCP_AES_TYPE_192;
     49		break;
     50	case AES_KEYSIZE_256:
     51		ctx->u.aes.type = CCP_AES_TYPE_256;
     52		break;
     53	default:
     54		return -EINVAL;
     55	}
     56	ctx->u.aes.mode = alg->mode;
     57	ctx->u.aes.key_len = key_len;
     58
     59	memcpy(ctx->u.aes.key, key, key_len);
     60	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
     61
     62	return 0;
     63}
     64
     65static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
     66{
     67	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
     68	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
     69	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
     70	struct scatterlist *iv_sg = NULL;
     71	unsigned int iv_len = 0;
     72
     73	if (!ctx->u.aes.key_len)
     74		return -EINVAL;
     75
     76	if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
     77	     (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
     78	    (req->cryptlen & (AES_BLOCK_SIZE - 1)))
     79		return -EINVAL;
     80
     81	if (ctx->u.aes.mode != CCP_AES_MODE_ECB) {
     82		if (!req->iv)
     83			return -EINVAL;
     84
     85		memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
     86		iv_sg = &rctx->iv_sg;
     87		iv_len = AES_BLOCK_SIZE;
     88		sg_init_one(iv_sg, rctx->iv, iv_len);
     89	}
     90
     91	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
     92	INIT_LIST_HEAD(&rctx->cmd.entry);
     93	rctx->cmd.engine = CCP_ENGINE_AES;
     94	rctx->cmd.u.aes.type = ctx->u.aes.type;
     95	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
     96	rctx->cmd.u.aes.action =
     97		(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
     98	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
     99	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
    100	rctx->cmd.u.aes.iv = iv_sg;
    101	rctx->cmd.u.aes.iv_len = iv_len;
    102	rctx->cmd.u.aes.src = req->src;
    103	rctx->cmd.u.aes.src_len = req->cryptlen;
    104	rctx->cmd.u.aes.dst = req->dst;
    105
    106	return ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
    107}
    108
    109static int ccp_aes_encrypt(struct skcipher_request *req)
    110{
    111	return ccp_aes_crypt(req, true);
    112}
    113
    114static int ccp_aes_decrypt(struct skcipher_request *req)
    115{
    116	return ccp_aes_crypt(req, false);
    117}
    118
    119static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
    120{
    121	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
    122
    123	ctx->complete = ccp_aes_complete;
    124	ctx->u.aes.key_len = 0;
    125
    126	crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
    127
    128	return 0;
    129}
    130
    131static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
    132				    int ret)
    133{
    134	struct skcipher_request *req = skcipher_request_cast(async_req);
    135	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
    136
    137	/* Restore the original pointer */
    138	req->iv = rctx->rfc3686_info;
    139
    140	return ccp_aes_complete(async_req, ret);
    141}
    142
    143static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
    144				  unsigned int key_len)
    145{
    146	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
    147
    148	if (key_len < CTR_RFC3686_NONCE_SIZE)
    149		return -EINVAL;
    150
    151	key_len -= CTR_RFC3686_NONCE_SIZE;
    152	memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE);
    153
    154	return ccp_aes_setkey(tfm, key, key_len);
    155}
    156
    157static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
    158{
    159	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    160	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
    161	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
    162	u8 *iv;
    163
    164	/* Initialize the CTR block */
    165	iv = rctx->rfc3686_iv;
    166	memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE);
    167
    168	iv += CTR_RFC3686_NONCE_SIZE;
    169	memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE);
    170
    171	iv += CTR_RFC3686_IV_SIZE;
    172	*(__be32 *)iv = cpu_to_be32(1);
    173
    174	/* Point to the new IV */
    175	rctx->rfc3686_info = req->iv;
    176	req->iv = rctx->rfc3686_iv;
    177
    178	return ccp_aes_crypt(req, encrypt);
    179}
    180
    181static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req)
    182{
    183	return ccp_aes_rfc3686_crypt(req, true);
    184}
    185
    186static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)
    187{
    188	return ccp_aes_rfc3686_crypt(req, false);
    189}
    190
    191static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
    192{
    193	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
    194
    195	ctx->complete = ccp_aes_rfc3686_complete;
    196	ctx->u.aes.key_len = 0;
    197
    198	crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
    199
    200	return 0;
    201}
    202
    203static const struct skcipher_alg ccp_aes_defaults = {
    204	.setkey			= ccp_aes_setkey,
    205	.encrypt		= ccp_aes_encrypt,
    206	.decrypt		= ccp_aes_decrypt,
    207	.min_keysize		= AES_MIN_KEY_SIZE,
    208	.max_keysize		= AES_MAX_KEY_SIZE,
    209	.init			= ccp_aes_init_tfm,
    210
    211	.base.cra_flags		= CRYPTO_ALG_ASYNC |
    212				  CRYPTO_ALG_ALLOCATES_MEMORY |
    213				  CRYPTO_ALG_KERN_DRIVER_ONLY |
    214				  CRYPTO_ALG_NEED_FALLBACK,
    215	.base.cra_blocksize	= AES_BLOCK_SIZE,
    216	.base.cra_ctxsize	= sizeof(struct ccp_ctx),
    217	.base.cra_priority	= CCP_CRA_PRIORITY,
    218	.base.cra_module	= THIS_MODULE,
    219};
    220
    221static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
    222	.setkey			= ccp_aes_rfc3686_setkey,
    223	.encrypt		= ccp_aes_rfc3686_encrypt,
    224	.decrypt		= ccp_aes_rfc3686_decrypt,
    225	.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
    226	.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
    227	.init			= ccp_aes_rfc3686_init_tfm,
    228
    229	.base.cra_flags		= CRYPTO_ALG_ASYNC |
    230				  CRYPTO_ALG_ALLOCATES_MEMORY |
    231				  CRYPTO_ALG_KERN_DRIVER_ONLY |
    232				  CRYPTO_ALG_NEED_FALLBACK,
    233	.base.cra_blocksize	= CTR_RFC3686_BLOCK_SIZE,
    234	.base.cra_ctxsize	= sizeof(struct ccp_ctx),
    235	.base.cra_priority	= CCP_CRA_PRIORITY,
    236	.base.cra_module	= THIS_MODULE,
    237};
    238
    239struct ccp_aes_def {
    240	enum ccp_aes_mode mode;
    241	unsigned int version;
    242	const char *name;
    243	const char *driver_name;
    244	unsigned int blocksize;
    245	unsigned int ivsize;
    246	const struct skcipher_alg *alg_defaults;
    247};
    248
    249static struct ccp_aes_def aes_algs[] = {
    250	{
    251		.mode		= CCP_AES_MODE_ECB,
    252		.version	= CCP_VERSION(3, 0),
    253		.name		= "ecb(aes)",
    254		.driver_name	= "ecb-aes-ccp",
    255		.blocksize	= AES_BLOCK_SIZE,
    256		.ivsize		= 0,
    257		.alg_defaults	= &ccp_aes_defaults,
    258	},
    259	{
    260		.mode		= CCP_AES_MODE_CBC,
    261		.version	= CCP_VERSION(3, 0),
    262		.name		= "cbc(aes)",
    263		.driver_name	= "cbc-aes-ccp",
    264		.blocksize	= AES_BLOCK_SIZE,
    265		.ivsize		= AES_BLOCK_SIZE,
    266		.alg_defaults	= &ccp_aes_defaults,
    267	},
    268	{
    269		.mode		= CCP_AES_MODE_CFB,
    270		.version	= CCP_VERSION(3, 0),
    271		.name		= "cfb(aes)",
    272		.driver_name	= "cfb-aes-ccp",
    273		.blocksize	= 1,
    274		.ivsize		= AES_BLOCK_SIZE,
    275		.alg_defaults	= &ccp_aes_defaults,
    276	},
    277	{
    278		.mode		= CCP_AES_MODE_OFB,
    279		.version	= CCP_VERSION(3, 0),
    280		.name		= "ofb(aes)",
    281		.driver_name	= "ofb-aes-ccp",
    282		.blocksize	= 1,
    283		.ivsize		= AES_BLOCK_SIZE,
    284		.alg_defaults	= &ccp_aes_defaults,
    285	},
    286	{
    287		.mode		= CCP_AES_MODE_CTR,
    288		.version	= CCP_VERSION(3, 0),
    289		.name		= "ctr(aes)",
    290		.driver_name	= "ctr-aes-ccp",
    291		.blocksize	= 1,
    292		.ivsize		= AES_BLOCK_SIZE,
    293		.alg_defaults	= &ccp_aes_defaults,
    294	},
    295	{
    296		.mode		= CCP_AES_MODE_CTR,
    297		.version	= CCP_VERSION(3, 0),
    298		.name		= "rfc3686(ctr(aes))",
    299		.driver_name	= "rfc3686-ctr-aes-ccp",
    300		.blocksize	= 1,
    301		.ivsize		= CTR_RFC3686_IV_SIZE,
    302		.alg_defaults	= &ccp_aes_rfc3686_defaults,
    303	},
    304};
    305
    306static int ccp_register_aes_alg(struct list_head *head,
    307				const struct ccp_aes_def *def)
    308{
    309	struct ccp_crypto_skcipher_alg *ccp_alg;
    310	struct skcipher_alg *alg;
    311	int ret;
    312
    313	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
    314	if (!ccp_alg)
    315		return -ENOMEM;
    316
    317	INIT_LIST_HEAD(&ccp_alg->entry);
    318
    319	ccp_alg->mode = def->mode;
    320
    321	/* Copy the defaults and override as necessary */
    322	alg = &ccp_alg->alg;
    323	*alg = *def->alg_defaults;
    324	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
    325	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
    326		 def->driver_name);
    327	alg->base.cra_blocksize = def->blocksize;
    328	alg->ivsize = def->ivsize;
    329
    330	ret = crypto_register_skcipher(alg);
    331	if (ret) {
    332		pr_err("%s skcipher algorithm registration error (%d)\n",
    333		       alg->base.cra_name, ret);
    334		kfree(ccp_alg);
    335		return ret;
    336	}
    337
    338	list_add(&ccp_alg->entry, head);
    339
    340	return 0;
    341}
    342
    343int ccp_register_aes_algs(struct list_head *head)
    344{
    345	int i, ret;
    346	unsigned int ccpversion = ccp_version();
    347
    348	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
    349		if (aes_algs[i].version > ccpversion)
    350			continue;
    351		ret = ccp_register_aes_alg(head, &aes_algs[i]);
    352		if (ret)
    353			return ret;
    354	}
    355
    356	return 0;
    357}