cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ghash-clmulni-intel_glue.c (9013B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Accelerated GHASH implementation with Intel PCLMULQDQ-NI
      4 * instructions. This file contains glue code.
      5 *
      6 * Copyright (c) 2009 Intel Corp.
      7 *   Author: Huang Ying <ying.huang@intel.com>
      8 */
      9
     10#include <linux/err.h>
     11#include <linux/module.h>
     12#include <linux/init.h>
     13#include <linux/kernel.h>
     14#include <linux/crypto.h>
     15#include <crypto/algapi.h>
     16#include <crypto/cryptd.h>
     17#include <crypto/gf128mul.h>
     18#include <crypto/internal/hash.h>
     19#include <crypto/internal/simd.h>
     20#include <asm/cpu_device_id.h>
     21#include <asm/simd.h>
     22
     23#define GHASH_BLOCK_SIZE	16
     24#define GHASH_DIGEST_SIZE	16
     25
     26void clmul_ghash_mul(char *dst, const u128 *shash);
     27
     28void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
     29			const u128 *shash);
     30
     31struct ghash_async_ctx {
     32	struct cryptd_ahash *cryptd_tfm;
     33};
     34
     35struct ghash_ctx {
     36	u128 shash;
     37};
     38
     39struct ghash_desc_ctx {
     40	u8 buffer[GHASH_BLOCK_SIZE];
     41	u32 bytes;
     42};
     43
     44static int ghash_init(struct shash_desc *desc)
     45{
     46	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
     47
     48	memset(dctx, 0, sizeof(*dctx));
     49
     50	return 0;
     51}
     52
     53static int ghash_setkey(struct crypto_shash *tfm,
     54			const u8 *key, unsigned int keylen)
     55{
     56	struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
     57	be128 *x = (be128 *)key;
     58	u64 a, b;
     59
     60	if (keylen != GHASH_BLOCK_SIZE)
     61		return -EINVAL;
     62
     63	/* perform multiplication by 'x' in GF(2^128) */
     64	a = be64_to_cpu(x->a);
     65	b = be64_to_cpu(x->b);
     66
     67	ctx->shash.a = (b << 1) | (a >> 63);
     68	ctx->shash.b = (a << 1) | (b >> 63);
     69
     70	if (a >> 63)
     71		ctx->shash.b ^= ((u64)0xc2) << 56;
     72
     73	return 0;
     74}
     75
     76static int ghash_update(struct shash_desc *desc,
     77			 const u8 *src, unsigned int srclen)
     78{
     79	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
     80	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
     81	u8 *dst = dctx->buffer;
     82
     83	kernel_fpu_begin();
     84	if (dctx->bytes) {
     85		int n = min(srclen, dctx->bytes);
     86		u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
     87
     88		dctx->bytes -= n;
     89		srclen -= n;
     90
     91		while (n--)
     92			*pos++ ^= *src++;
     93
     94		if (!dctx->bytes)
     95			clmul_ghash_mul(dst, &ctx->shash);
     96	}
     97
     98	clmul_ghash_update(dst, src, srclen, &ctx->shash);
     99	kernel_fpu_end();
    100
    101	if (srclen & 0xf) {
    102		src += srclen - (srclen & 0xf);
    103		srclen &= 0xf;
    104		dctx->bytes = GHASH_BLOCK_SIZE - srclen;
    105		while (srclen--)
    106			*dst++ ^= *src++;
    107	}
    108
    109	return 0;
    110}
    111
    112static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
    113{
    114	u8 *dst = dctx->buffer;
    115
    116	if (dctx->bytes) {
    117		u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
    118
    119		while (dctx->bytes--)
    120			*tmp++ ^= 0;
    121
    122		kernel_fpu_begin();
    123		clmul_ghash_mul(dst, &ctx->shash);
    124		kernel_fpu_end();
    125	}
    126
    127	dctx->bytes = 0;
    128}
    129
    130static int ghash_final(struct shash_desc *desc, u8 *dst)
    131{
    132	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
    133	struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
    134	u8 *buf = dctx->buffer;
    135
    136	ghash_flush(ctx, dctx);
    137	memcpy(dst, buf, GHASH_BLOCK_SIZE);
    138
    139	return 0;
    140}
    141
    142static struct shash_alg ghash_alg = {
    143	.digestsize	= GHASH_DIGEST_SIZE,
    144	.init		= ghash_init,
    145	.update		= ghash_update,
    146	.final		= ghash_final,
    147	.setkey		= ghash_setkey,
    148	.descsize	= sizeof(struct ghash_desc_ctx),
    149	.base		= {
    150		.cra_name		= "__ghash",
    151		.cra_driver_name	= "__ghash-pclmulqdqni",
    152		.cra_priority		= 0,
    153		.cra_flags		= CRYPTO_ALG_INTERNAL,
    154		.cra_blocksize		= GHASH_BLOCK_SIZE,
    155		.cra_ctxsize		= sizeof(struct ghash_ctx),
    156		.cra_module		= THIS_MODULE,
    157	},
    158};
    159
    160static int ghash_async_init(struct ahash_request *req)
    161{
    162	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    163	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
    164	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    165	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
    166	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    167	struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
    168
    169	desc->tfm = child;
    170	return crypto_shash_init(desc);
    171}
    172
    173static int ghash_async_update(struct ahash_request *req)
    174{
    175	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    176	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    177	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
    178	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
    179
    180	if (!crypto_simd_usable() ||
    181	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
    182		memcpy(cryptd_req, req, sizeof(*req));
    183		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
    184		return crypto_ahash_update(cryptd_req);
    185	} else {
    186		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    187		return shash_ahash_update(req, desc);
    188	}
    189}
    190
    191static int ghash_async_final(struct ahash_request *req)
    192{
    193	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    194	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    195	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
    196	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
    197
    198	if (!crypto_simd_usable() ||
    199	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
    200		memcpy(cryptd_req, req, sizeof(*req));
    201		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
    202		return crypto_ahash_final(cryptd_req);
    203	} else {
    204		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    205		return crypto_shash_final(desc, req->result);
    206	}
    207}
    208
    209static int ghash_async_import(struct ahash_request *req, const void *in)
    210{
    211	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    212	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    213	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
    214
    215	ghash_async_init(req);
    216	memcpy(dctx, in, sizeof(*dctx));
    217	return 0;
    218
    219}
    220
    221static int ghash_async_export(struct ahash_request *req, void *out)
    222{
    223	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    224	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    225	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
    226
    227	memcpy(out, dctx, sizeof(*dctx));
    228	return 0;
    229
    230}
    231
    232static int ghash_async_digest(struct ahash_request *req)
    233{
    234	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    235	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
    236	struct ahash_request *cryptd_req = ahash_request_ctx(req);
    237	struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
    238
    239	if (!crypto_simd_usable() ||
    240	    (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
    241		memcpy(cryptd_req, req, sizeof(*req));
    242		ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
    243		return crypto_ahash_digest(cryptd_req);
    244	} else {
    245		struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
    246		struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
    247
    248		desc->tfm = child;
    249		return shash_ahash_digest(req, desc);
    250	}
    251}
    252
    253static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
    254			      unsigned int keylen)
    255{
    256	struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
    257	struct crypto_ahash *child = &ctx->cryptd_tfm->base;
    258
    259	crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
    260	crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm)
    261			       & CRYPTO_TFM_REQ_MASK);
    262	return crypto_ahash_setkey(child, key, keylen);
    263}
    264
    265static int ghash_async_init_tfm(struct crypto_tfm *tfm)
    266{
    267	struct cryptd_ahash *cryptd_tfm;
    268	struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
    269
    270	cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni",
    271					CRYPTO_ALG_INTERNAL,
    272					CRYPTO_ALG_INTERNAL);
    273	if (IS_ERR(cryptd_tfm))
    274		return PTR_ERR(cryptd_tfm);
    275	ctx->cryptd_tfm = cryptd_tfm;
    276	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
    277				 sizeof(struct ahash_request) +
    278				 crypto_ahash_reqsize(&cryptd_tfm->base));
    279
    280	return 0;
    281}
    282
    283static void ghash_async_exit_tfm(struct crypto_tfm *tfm)
    284{
    285	struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm);
    286
    287	cryptd_free_ahash(ctx->cryptd_tfm);
    288}
    289
    290static struct ahash_alg ghash_async_alg = {
    291	.init		= ghash_async_init,
    292	.update		= ghash_async_update,
    293	.final		= ghash_async_final,
    294	.setkey		= ghash_async_setkey,
    295	.digest		= ghash_async_digest,
    296	.export		= ghash_async_export,
    297	.import		= ghash_async_import,
    298	.halg = {
    299		.digestsize	= GHASH_DIGEST_SIZE,
    300		.statesize = sizeof(struct ghash_desc_ctx),
    301		.base = {
    302			.cra_name		= "ghash",
    303			.cra_driver_name	= "ghash-clmulni",
    304			.cra_priority		= 400,
    305			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
    306			.cra_flags		= CRYPTO_ALG_ASYNC,
    307			.cra_blocksize		= GHASH_BLOCK_SIZE,
    308			.cra_module		= THIS_MODULE,
    309			.cra_init		= ghash_async_init_tfm,
    310			.cra_exit		= ghash_async_exit_tfm,
    311		},
    312	},
    313};
    314
    315static const struct x86_cpu_id pcmul_cpu_id[] = {
    316	X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */
    317	{}
    318};
    319MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
    320
    321static int __init ghash_pclmulqdqni_mod_init(void)
    322{
    323	int err;
    324
    325	if (!x86_match_cpu(pcmul_cpu_id))
    326		return -ENODEV;
    327
    328	err = crypto_register_shash(&ghash_alg);
    329	if (err)
    330		goto err_out;
    331	err = crypto_register_ahash(&ghash_async_alg);
    332	if (err)
    333		goto err_shash;
    334
    335	return 0;
    336
    337err_shash:
    338	crypto_unregister_shash(&ghash_alg);
    339err_out:
    340	return err;
    341}
    342
    343static void __exit ghash_pclmulqdqni_mod_exit(void)
    344{
    345	crypto_unregister_ahash(&ghash_async_alg);
    346	crypto_unregister_shash(&ghash_alg);
    347}
    348
    349module_init(ghash_pclmulqdqni_mod_init);
    350module_exit(ghash_pclmulqdqni_mod_exit);
    351
    352MODULE_LICENSE("GPL");
    353MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI");
    354MODULE_ALIAS_CRYPTO("ghash");