cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ahash.c (16040B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Asynchronous Cryptographic Hash operations.
      4 *
      5 * This is the asynchronous version of hash.c with notification of
      6 * completion via a callback.
      7 *
      8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
      9 */
     10
     11#include <crypto/internal/hash.h>
     12#include <crypto/scatterwalk.h>
     13#include <linux/err.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/sched.h>
     17#include <linux/slab.h>
     18#include <linux/seq_file.h>
     19#include <linux/cryptouser.h>
     20#include <linux/compiler.h>
     21#include <net/netlink.h>
     22
     23#include "internal.h"
     24
     25static const struct crypto_type crypto_ahash_type;
     26
     27struct ahash_request_priv {
     28	crypto_completion_t complete;
     29	void *data;
     30	u8 *result;
     31	u32 flags;
     32	void *ubuf[] CRYPTO_MINALIGN_ATTR;
     33};
     34
     35static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
     36{
     37	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
     38			    halg);
     39}
     40
     41static int hash_walk_next(struct crypto_hash_walk *walk)
     42{
     43	unsigned int alignmask = walk->alignmask;
     44	unsigned int offset = walk->offset;
     45	unsigned int nbytes = min(walk->entrylen,
     46				  ((unsigned int)(PAGE_SIZE)) - offset);
     47
     48	walk->data = kmap_atomic(walk->pg);
     49	walk->data += offset;
     50
     51	if (offset & alignmask) {
     52		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
     53
     54		if (nbytes > unaligned)
     55			nbytes = unaligned;
     56	}
     57
     58	walk->entrylen -= nbytes;
     59	return nbytes;
     60}
     61
     62static int hash_walk_new_entry(struct crypto_hash_walk *walk)
     63{
     64	struct scatterlist *sg;
     65
     66	sg = walk->sg;
     67	walk->offset = sg->offset;
     68	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
     69	walk->offset = offset_in_page(walk->offset);
     70	walk->entrylen = sg->length;
     71
     72	if (walk->entrylen > walk->total)
     73		walk->entrylen = walk->total;
     74	walk->total -= walk->entrylen;
     75
     76	return hash_walk_next(walk);
     77}
     78
     79int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
     80{
     81	unsigned int alignmask = walk->alignmask;
     82
     83	walk->data -= walk->offset;
     84
     85	if (walk->entrylen && (walk->offset & alignmask) && !err) {
     86		unsigned int nbytes;
     87
     88		walk->offset = ALIGN(walk->offset, alignmask + 1);
     89		nbytes = min(walk->entrylen,
     90			     (unsigned int)(PAGE_SIZE - walk->offset));
     91		if (nbytes) {
     92			walk->entrylen -= nbytes;
     93			walk->data += walk->offset;
     94			return nbytes;
     95		}
     96	}
     97
     98	kunmap_atomic(walk->data);
     99	crypto_yield(walk->flags);
    100
    101	if (err)
    102		return err;
    103
    104	if (walk->entrylen) {
    105		walk->offset = 0;
    106		walk->pg++;
    107		return hash_walk_next(walk);
    108	}
    109
    110	if (!walk->total)
    111		return 0;
    112
    113	walk->sg = sg_next(walk->sg);
    114
    115	return hash_walk_new_entry(walk);
    116}
    117EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
    118
    119int crypto_hash_walk_first(struct ahash_request *req,
    120			   struct crypto_hash_walk *walk)
    121{
    122	walk->total = req->nbytes;
    123
    124	if (!walk->total) {
    125		walk->entrylen = 0;
    126		return 0;
    127	}
    128
    129	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
    130	walk->sg = req->src;
    131	walk->flags = req->base.flags;
    132
    133	return hash_walk_new_entry(walk);
    134}
    135EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
    136
    137static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
    138				unsigned int keylen)
    139{
    140	unsigned long alignmask = crypto_ahash_alignmask(tfm);
    141	int ret;
    142	u8 *buffer, *alignbuffer;
    143	unsigned long absize;
    144
    145	absize = keylen + alignmask;
    146	buffer = kmalloc(absize, GFP_KERNEL);
    147	if (!buffer)
    148		return -ENOMEM;
    149
    150	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
    151	memcpy(alignbuffer, key, keylen);
    152	ret = tfm->setkey(tfm, alignbuffer, keylen);
    153	kfree_sensitive(buffer);
    154	return ret;
    155}
    156
    157static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
    158			  unsigned int keylen)
    159{
    160	return -ENOSYS;
    161}
    162
    163static void ahash_set_needkey(struct crypto_ahash *tfm)
    164{
    165	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
    166
    167	if (tfm->setkey != ahash_nosetkey &&
    168	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
    169		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
    170}
    171
    172int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
    173			unsigned int keylen)
    174{
    175	unsigned long alignmask = crypto_ahash_alignmask(tfm);
    176	int err;
    177
    178	if ((unsigned long)key & alignmask)
    179		err = ahash_setkey_unaligned(tfm, key, keylen);
    180	else
    181		err = tfm->setkey(tfm, key, keylen);
    182
    183	if (unlikely(err)) {
    184		ahash_set_needkey(tfm);
    185		return err;
    186	}
    187
    188	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
    189	return 0;
    190}
    191EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
    192
    193static inline unsigned int ahash_align_buffer_size(unsigned len,
    194						   unsigned long mask)
    195{
    196	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
    197}
    198
    199static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
    200{
    201	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    202	unsigned long alignmask = crypto_ahash_alignmask(tfm);
    203	unsigned int ds = crypto_ahash_digestsize(tfm);
    204	struct ahash_request_priv *priv;
    205
    206	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
    207		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
    208		       GFP_KERNEL : GFP_ATOMIC);
    209	if (!priv)
    210		return -ENOMEM;
    211
    212	/*
    213	 * WARNING: Voodoo programming below!
    214	 *
    215	 * The code below is obscure and hard to understand, thus explanation
    216	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
    217	 * to understand the layout of structures used here!
    218	 *
    219	 * The code here will replace portions of the ORIGINAL request with
    220	 * pointers to new code and buffers so the hashing operation can store
    221	 * the result in aligned buffer. We will call the modified request
    222	 * an ADJUSTED request.
    223	 *
    224	 * The newly mangled request will look as such:
    225	 *
    226	 * req {
    227	 *   .result        = ADJUSTED[new aligned buffer]
    228	 *   .base.complete = ADJUSTED[pointer to completion function]
    229	 *   .base.data     = ADJUSTED[*req (pointer to self)]
    230	 *   .priv          = ADJUSTED[new priv] {
    231	 *           .result   = ORIGINAL(result)
    232	 *           .complete = ORIGINAL(base.complete)
    233	 *           .data     = ORIGINAL(base.data)
    234	 *   }
    235	 */
    236
    237	priv->result = req->result;
    238	priv->complete = req->base.complete;
    239	priv->data = req->base.data;
    240	priv->flags = req->base.flags;
    241
    242	/*
    243	 * WARNING: We do not backup req->priv here! The req->priv
    244	 *          is for internal use of the Crypto API and the
    245	 *          user must _NOT_ _EVER_ depend on it's content!
    246	 */
    247
    248	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
    249	req->base.complete = cplt;
    250	req->base.data = req;
    251	req->priv = priv;
    252
    253	return 0;
    254}
    255
    256static void ahash_restore_req(struct ahash_request *req, int err)
    257{
    258	struct ahash_request_priv *priv = req->priv;
    259
    260	if (!err)
    261		memcpy(priv->result, req->result,
    262		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
    263
    264	/* Restore the original crypto request. */
    265	req->result = priv->result;
    266
    267	ahash_request_set_callback(req, priv->flags,
    268				   priv->complete, priv->data);
    269	req->priv = NULL;
    270
    271	/* Free the req->priv.priv from the ADJUSTED request. */
    272	kfree_sensitive(priv);
    273}
    274
    275static void ahash_notify_einprogress(struct ahash_request *req)
    276{
    277	struct ahash_request_priv *priv = req->priv;
    278	struct crypto_async_request oreq;
    279
    280	oreq.data = priv->data;
    281
    282	priv->complete(&oreq, -EINPROGRESS);
    283}
    284
    285static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
    286{
    287	struct ahash_request *areq = req->data;
    288
    289	if (err == -EINPROGRESS) {
    290		ahash_notify_einprogress(areq);
    291		return;
    292	}
    293
    294	/*
    295	 * Restore the original request, see ahash_op_unaligned() for what
    296	 * goes where.
    297	 *
    298	 * The "struct ahash_request *req" here is in fact the "req.base"
    299	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
    300	 * is a pointer to self, it is also the ADJUSTED "req" .
    301	 */
    302
    303	/* First copy req->result into req->priv.result */
    304	ahash_restore_req(areq, err);
    305
    306	/* Complete the ORIGINAL request. */
    307	areq->base.complete(&areq->base, err);
    308}
    309
    310static int ahash_op_unaligned(struct ahash_request *req,
    311			      int (*op)(struct ahash_request *))
    312{
    313	int err;
    314
    315	err = ahash_save_req(req, ahash_op_unaligned_done);
    316	if (err)
    317		return err;
    318
    319	err = op(req);
    320	if (err == -EINPROGRESS || err == -EBUSY)
    321		return err;
    322
    323	ahash_restore_req(req, err);
    324
    325	return err;
    326}
    327
    328static int crypto_ahash_op(struct ahash_request *req,
    329			   int (*op)(struct ahash_request *))
    330{
    331	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    332	unsigned long alignmask = crypto_ahash_alignmask(tfm);
    333
    334	if ((unsigned long)req->result & alignmask)
    335		return ahash_op_unaligned(req, op);
    336
    337	return op(req);
    338}
    339
    340int crypto_ahash_final(struct ahash_request *req)
    341{
    342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    343	struct crypto_alg *alg = tfm->base.__crt_alg;
    344	unsigned int nbytes = req->nbytes;
    345	int ret;
    346
    347	crypto_stats_get(alg);
    348	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
    349	crypto_stats_ahash_final(nbytes, ret, alg);
    350	return ret;
    351}
    352EXPORT_SYMBOL_GPL(crypto_ahash_final);
    353
    354int crypto_ahash_finup(struct ahash_request *req)
    355{
    356	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    357	struct crypto_alg *alg = tfm->base.__crt_alg;
    358	unsigned int nbytes = req->nbytes;
    359	int ret;
    360
    361	crypto_stats_get(alg);
    362	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
    363	crypto_stats_ahash_final(nbytes, ret, alg);
    364	return ret;
    365}
    366EXPORT_SYMBOL_GPL(crypto_ahash_finup);
    367
    368int crypto_ahash_digest(struct ahash_request *req)
    369{
    370	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    371	struct crypto_alg *alg = tfm->base.__crt_alg;
    372	unsigned int nbytes = req->nbytes;
    373	int ret;
    374
    375	crypto_stats_get(alg);
    376	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
    377		ret = -ENOKEY;
    378	else
    379		ret = crypto_ahash_op(req, tfm->digest);
    380	crypto_stats_ahash_final(nbytes, ret, alg);
    381	return ret;
    382}
    383EXPORT_SYMBOL_GPL(crypto_ahash_digest);
    384
    385static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
    386{
    387	struct ahash_request *areq = req->data;
    388
    389	if (err == -EINPROGRESS)
    390		return;
    391
    392	ahash_restore_req(areq, err);
    393
    394	areq->base.complete(&areq->base, err);
    395}
    396
    397static int ahash_def_finup_finish1(struct ahash_request *req, int err)
    398{
    399	if (err)
    400		goto out;
    401
    402	req->base.complete = ahash_def_finup_done2;
    403
    404	err = crypto_ahash_reqtfm(req)->final(req);
    405	if (err == -EINPROGRESS || err == -EBUSY)
    406		return err;
    407
    408out:
    409	ahash_restore_req(req, err);
    410	return err;
    411}
    412
    413static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
    414{
    415	struct ahash_request *areq = req->data;
    416
    417	if (err == -EINPROGRESS) {
    418		ahash_notify_einprogress(areq);
    419		return;
    420	}
    421
    422	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
    423
    424	err = ahash_def_finup_finish1(areq, err);
    425	if (areq->priv)
    426		return;
    427
    428	areq->base.complete(&areq->base, err);
    429}
    430
    431static int ahash_def_finup(struct ahash_request *req)
    432{
    433	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    434	int err;
    435
    436	err = ahash_save_req(req, ahash_def_finup_done1);
    437	if (err)
    438		return err;
    439
    440	err = tfm->update(req);
    441	if (err == -EINPROGRESS || err == -EBUSY)
    442		return err;
    443
    444	return ahash_def_finup_finish1(req, err);
    445}
    446
    447static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
    448{
    449	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
    450	struct ahash_alg *alg = crypto_ahash_alg(hash);
    451
    452	alg->exit_tfm(hash);
    453}
    454
    455static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
    456{
    457	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
    458	struct ahash_alg *alg = crypto_ahash_alg(hash);
    459
    460	hash->setkey = ahash_nosetkey;
    461
    462	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
    463		return crypto_init_shash_ops_async(tfm);
    464
    465	hash->init = alg->init;
    466	hash->update = alg->update;
    467	hash->final = alg->final;
    468	hash->finup = alg->finup ?: ahash_def_finup;
    469	hash->digest = alg->digest;
    470	hash->export = alg->export;
    471	hash->import = alg->import;
    472
    473	if (alg->setkey) {
    474		hash->setkey = alg->setkey;
    475		ahash_set_needkey(hash);
    476	}
    477
    478	if (alg->exit_tfm)
    479		tfm->exit = crypto_ahash_exit_tfm;
    480
    481	return alg->init_tfm ? alg->init_tfm(hash) : 0;
    482}
    483
    484static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
    485{
    486	if (alg->cra_type != &crypto_ahash_type)
    487		return sizeof(struct crypto_shash *);
    488
    489	return crypto_alg_extsize(alg);
    490}
    491
    492static void crypto_ahash_free_instance(struct crypto_instance *inst)
    493{
    494	struct ahash_instance *ahash = ahash_instance(inst);
    495
    496	ahash->free(ahash);
    497}
    498
    499#ifdef CONFIG_NET
    500static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
    501{
    502	struct crypto_report_hash rhash;
    503
    504	memset(&rhash, 0, sizeof(rhash));
    505
    506	strscpy(rhash.type, "ahash", sizeof(rhash.type));
    507
    508	rhash.blocksize = alg->cra_blocksize;
    509	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
    510
    511	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
    512}
    513#else
    514static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
    515{
    516	return -ENOSYS;
    517}
    518#endif
    519
    520static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
    521	__maybe_unused;
    522static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
    523{
    524	seq_printf(m, "type         : ahash\n");
    525	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
    526					     "yes" : "no");
    527	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
    528	seq_printf(m, "digestsize   : %u\n",
    529		   __crypto_hash_alg_common(alg)->digestsize);
    530}
    531
    532static const struct crypto_type crypto_ahash_type = {
    533	.extsize = crypto_ahash_extsize,
    534	.init_tfm = crypto_ahash_init_tfm,
    535	.free = crypto_ahash_free_instance,
    536#ifdef CONFIG_PROC_FS
    537	.show = crypto_ahash_show,
    538#endif
    539	.report = crypto_ahash_report,
    540	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
    541	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
    542	.type = CRYPTO_ALG_TYPE_AHASH,
    543	.tfmsize = offsetof(struct crypto_ahash, base),
    544};
    545
    546int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
    547		      struct crypto_instance *inst,
    548		      const char *name, u32 type, u32 mask)
    549{
    550	spawn->base.frontend = &crypto_ahash_type;
    551	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
    552}
    553EXPORT_SYMBOL_GPL(crypto_grab_ahash);
    554
    555struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
    556					u32 mask)
    557{
    558	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
    559}
    560EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
    561
    562int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
    563{
    564	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
    565}
    566EXPORT_SYMBOL_GPL(crypto_has_ahash);
    567
    568static int ahash_prepare_alg(struct ahash_alg *alg)
    569{
    570	struct crypto_alg *base = &alg->halg.base;
    571
    572	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
    573	    alg->halg.statesize > HASH_MAX_STATESIZE ||
    574	    alg->halg.statesize == 0)
    575		return -EINVAL;
    576
    577	base->cra_type = &crypto_ahash_type;
    578	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
    579	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
    580
    581	return 0;
    582}
    583
    584int crypto_register_ahash(struct ahash_alg *alg)
    585{
    586	struct crypto_alg *base = &alg->halg.base;
    587	int err;
    588
    589	err = ahash_prepare_alg(alg);
    590	if (err)
    591		return err;
    592
    593	return crypto_register_alg(base);
    594}
    595EXPORT_SYMBOL_GPL(crypto_register_ahash);
    596
    597void crypto_unregister_ahash(struct ahash_alg *alg)
    598{
    599	crypto_unregister_alg(&alg->halg.base);
    600}
    601EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
    602
    603int crypto_register_ahashes(struct ahash_alg *algs, int count)
    604{
    605	int i, ret;
    606
    607	for (i = 0; i < count; i++) {
    608		ret = crypto_register_ahash(&algs[i]);
    609		if (ret)
    610			goto err;
    611	}
    612
    613	return 0;
    614
    615err:
    616	for (--i; i >= 0; --i)
    617		crypto_unregister_ahash(&algs[i]);
    618
    619	return ret;
    620}
    621EXPORT_SYMBOL_GPL(crypto_register_ahashes);
    622
    623void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
    624{
    625	int i;
    626
    627	for (i = count - 1; i >= 0; --i)
    628		crypto_unregister_ahash(&algs[i]);
    629}
    630EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
    631
    632int ahash_register_instance(struct crypto_template *tmpl,
    633			    struct ahash_instance *inst)
    634{
    635	int err;
    636
    637	if (WARN_ON(!inst->free))
    638		return -EINVAL;
    639
    640	err = ahash_prepare_alg(&inst->alg);
    641	if (err)
    642		return err;
    643
    644	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
    645}
    646EXPORT_SYMBOL_GPL(ahash_register_instance);
    647
    648bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
    649{
    650	struct crypto_alg *alg = &halg->base;
    651
    652	if (alg->cra_type != &crypto_ahash_type)
    653		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
    654
    655	return __crypto_ahash_alg(alg)->setkey != NULL;
    656}
    657EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
    658
    659MODULE_LICENSE("GPL");
    660MODULE_DESCRIPTION("Asynchronous cryptographic hash type");