cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cryptd.c (29171B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Software async crypto daemon.
      4 *
      5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
      6 *
      7 * Added AEAD support to cryptd.
      8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
      9 *             Adrian Hoban <adrian.hoban@intel.com>
     10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
     11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
     12 *    Copyright (c) 2010, Intel Corporation.
     13 */
     14
     15#include <crypto/internal/hash.h>
     16#include <crypto/internal/aead.h>
     17#include <crypto/internal/skcipher.h>
     18#include <crypto/cryptd.h>
     19#include <linux/refcount.h>
     20#include <linux/err.h>
     21#include <linux/init.h>
     22#include <linux/kernel.h>
     23#include <linux/list.h>
     24#include <linux/module.h>
     25#include <linux/scatterlist.h>
     26#include <linux/sched.h>
     27#include <linux/slab.h>
     28#include <linux/workqueue.h>
     29
     30static unsigned int cryptd_max_cpu_qlen = 1000;
     31module_param(cryptd_max_cpu_qlen, uint, 0);
     32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
     33
     34static struct workqueue_struct *cryptd_wq;
     35
     36struct cryptd_cpu_queue {
     37	struct crypto_queue queue;
     38	struct work_struct work;
     39};
     40
     41struct cryptd_queue {
     42	/*
     43	 * Protected by disabling BH to allow enqueueing from softinterrupt and
     44	 * dequeuing from kworker (cryptd_queue_worker()).
     45	 */
     46	struct cryptd_cpu_queue __percpu *cpu_queue;
     47};
     48
     49struct cryptd_instance_ctx {
     50	struct crypto_spawn spawn;
     51	struct cryptd_queue *queue;
     52};
     53
     54struct skcipherd_instance_ctx {
     55	struct crypto_skcipher_spawn spawn;
     56	struct cryptd_queue *queue;
     57};
     58
     59struct hashd_instance_ctx {
     60	struct crypto_shash_spawn spawn;
     61	struct cryptd_queue *queue;
     62};
     63
     64struct aead_instance_ctx {
     65	struct crypto_aead_spawn aead_spawn;
     66	struct cryptd_queue *queue;
     67};
     68
     69struct cryptd_skcipher_ctx {
     70	refcount_t refcnt;
     71	struct crypto_sync_skcipher *child;
     72};
     73
     74struct cryptd_skcipher_request_ctx {
     75	crypto_completion_t complete;
     76};
     77
     78struct cryptd_hash_ctx {
     79	refcount_t refcnt;
     80	struct crypto_shash *child;
     81};
     82
     83struct cryptd_hash_request_ctx {
     84	crypto_completion_t complete;
     85	struct shash_desc desc;
     86};
     87
     88struct cryptd_aead_ctx {
     89	refcount_t refcnt;
     90	struct crypto_aead *child;
     91};
     92
     93struct cryptd_aead_request_ctx {
     94	crypto_completion_t complete;
     95};
     96
     97static void cryptd_queue_worker(struct work_struct *work);
     98
     99static int cryptd_init_queue(struct cryptd_queue *queue,
    100			     unsigned int max_cpu_qlen)
    101{
    102	int cpu;
    103	struct cryptd_cpu_queue *cpu_queue;
    104
    105	queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
    106	if (!queue->cpu_queue)
    107		return -ENOMEM;
    108	for_each_possible_cpu(cpu) {
    109		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
    110		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
    111		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
    112	}
    113	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
    114	return 0;
    115}
    116
    117static void cryptd_fini_queue(struct cryptd_queue *queue)
    118{
    119	int cpu;
    120	struct cryptd_cpu_queue *cpu_queue;
    121
    122	for_each_possible_cpu(cpu) {
    123		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
    124		BUG_ON(cpu_queue->queue.qlen);
    125	}
    126	free_percpu(queue->cpu_queue);
    127}
    128
    129static int cryptd_enqueue_request(struct cryptd_queue *queue,
    130				  struct crypto_async_request *request)
    131{
    132	int err;
    133	struct cryptd_cpu_queue *cpu_queue;
    134	refcount_t *refcnt;
    135
    136	local_bh_disable();
    137	cpu_queue = this_cpu_ptr(queue->cpu_queue);
    138	err = crypto_enqueue_request(&cpu_queue->queue, request);
    139
    140	refcnt = crypto_tfm_ctx(request->tfm);
    141
    142	if (err == -ENOSPC)
    143		goto out;
    144
    145	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
    146
    147	if (!refcount_read(refcnt))
    148		goto out;
    149
    150	refcount_inc(refcnt);
    151
    152out:
    153	local_bh_enable();
    154
    155	return err;
    156}
    157
    158/* Called in workqueue context, do one real cryption work (via
    159 * req->complete) and reschedule itself if there are more work to
    160 * do. */
    161static void cryptd_queue_worker(struct work_struct *work)
    162{
    163	struct cryptd_cpu_queue *cpu_queue;
    164	struct crypto_async_request *req, *backlog;
    165
    166	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
    167	/*
    168	 * Only handle one request at a time to avoid hogging crypto workqueue.
    169	 */
    170	local_bh_disable();
    171	backlog = crypto_get_backlog(&cpu_queue->queue);
    172	req = crypto_dequeue_request(&cpu_queue->queue);
    173	local_bh_enable();
    174
    175	if (!req)
    176		return;
    177
    178	if (backlog)
    179		backlog->complete(backlog, -EINPROGRESS);
    180	req->complete(req, 0);
    181
    182	if (cpu_queue->queue.qlen)
    183		queue_work(cryptd_wq, &cpu_queue->work);
    184}
    185
    186static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
    187{
    188	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
    189	struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
    190	return ictx->queue;
    191}
    192
    193static void cryptd_type_and_mask(struct crypto_attr_type *algt,
    194				 u32 *type, u32 *mask)
    195{
    196	/*
    197	 * cryptd is allowed to wrap internal algorithms, but in that case the
    198	 * resulting cryptd instance will be marked as internal as well.
    199	 */
    200	*type = algt->type & CRYPTO_ALG_INTERNAL;
    201	*mask = algt->mask & CRYPTO_ALG_INTERNAL;
    202
    203	/* No point in cryptd wrapping an algorithm that's already async. */
    204	*mask |= CRYPTO_ALG_ASYNC;
    205
    206	*mask |= crypto_algt_inherited_mask(algt);
    207}
    208
    209static int cryptd_init_instance(struct crypto_instance *inst,
    210				struct crypto_alg *alg)
    211{
    212	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
    213		     "cryptd(%s)",
    214		     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
    215		return -ENAMETOOLONG;
    216
    217	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
    218
    219	inst->alg.cra_priority = alg->cra_priority + 50;
    220	inst->alg.cra_blocksize = alg->cra_blocksize;
    221	inst->alg.cra_alignmask = alg->cra_alignmask;
    222
    223	return 0;
    224}
    225
    226static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
    227				  const u8 *key, unsigned int keylen)
    228{
    229	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
    230	struct crypto_sync_skcipher *child = ctx->child;
    231
    232	crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
    233	crypto_sync_skcipher_set_flags(child,
    234				       crypto_skcipher_get_flags(parent) &
    235					 CRYPTO_TFM_REQ_MASK);
    236	return crypto_sync_skcipher_setkey(child, key, keylen);
    237}
    238
    239static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
    240{
    241	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    242	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
    243	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
    244	int refcnt = refcount_read(&ctx->refcnt);
    245
    246	local_bh_disable();
    247	rctx->complete(&req->base, err);
    248	local_bh_enable();
    249
    250	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
    251		crypto_free_skcipher(tfm);
    252}
    253
    254static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
    255				    int err)
    256{
    257	struct skcipher_request *req = skcipher_request_cast(base);
    258	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
    259	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    260	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
    261	struct crypto_sync_skcipher *child = ctx->child;
    262	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
    263
    264	if (unlikely(err == -EINPROGRESS))
    265		goto out;
    266
    267	skcipher_request_set_sync_tfm(subreq, child);
    268	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
    269				      NULL, NULL);
    270	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
    271				   req->iv);
    272
    273	err = crypto_skcipher_encrypt(subreq);
    274	skcipher_request_zero(subreq);
    275
    276	req->base.complete = rctx->complete;
    277
    278out:
    279	cryptd_skcipher_complete(req, err);
    280}
    281
    282static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
    283				    int err)
    284{
    285	struct skcipher_request *req = skcipher_request_cast(base);
    286	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
    287	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    288	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
    289	struct crypto_sync_skcipher *child = ctx->child;
    290	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
    291
    292	if (unlikely(err == -EINPROGRESS))
    293		goto out;
    294
    295	skcipher_request_set_sync_tfm(subreq, child);
    296	skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
    297				      NULL, NULL);
    298	skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
    299				   req->iv);
    300
    301	err = crypto_skcipher_decrypt(subreq);
    302	skcipher_request_zero(subreq);
    303
    304	req->base.complete = rctx->complete;
    305
    306out:
    307	cryptd_skcipher_complete(req, err);
    308}
    309
    310static int cryptd_skcipher_enqueue(struct skcipher_request *req,
    311				   crypto_completion_t compl)
    312{
    313	struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
    314	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    315	struct cryptd_queue *queue;
    316
    317	queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
    318	rctx->complete = req->base.complete;
    319	req->base.complete = compl;
    320
    321	return cryptd_enqueue_request(queue, &req->base);
    322}
    323
    324static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
    325{
    326	return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
    327}
    328
    329static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
    330{
    331	return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
    332}
    333
    334static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
    335{
    336	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
    337	struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
    338	struct crypto_skcipher_spawn *spawn = &ictx->spawn;
    339	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
    340	struct crypto_skcipher *cipher;
    341
    342	cipher = crypto_spawn_skcipher(spawn);
    343	if (IS_ERR(cipher))
    344		return PTR_ERR(cipher);
    345
    346	ctx->child = (struct crypto_sync_skcipher *)cipher;
    347	crypto_skcipher_set_reqsize(
    348		tfm, sizeof(struct cryptd_skcipher_request_ctx));
    349	return 0;
    350}
    351
    352static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
    353{
    354	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
    355
    356	crypto_free_sync_skcipher(ctx->child);
    357}
    358
    359static void cryptd_skcipher_free(struct skcipher_instance *inst)
    360{
    361	struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
    362
    363	crypto_drop_skcipher(&ctx->spawn);
    364	kfree(inst);
    365}
    366
    367static int cryptd_create_skcipher(struct crypto_template *tmpl,
    368				  struct rtattr **tb,
    369				  struct crypto_attr_type *algt,
    370				  struct cryptd_queue *queue)
    371{
    372	struct skcipherd_instance_ctx *ctx;
    373	struct skcipher_instance *inst;
    374	struct skcipher_alg *alg;
    375	u32 type;
    376	u32 mask;
    377	int err;
    378
    379	cryptd_type_and_mask(algt, &type, &mask);
    380
    381	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
    382	if (!inst)
    383		return -ENOMEM;
    384
    385	ctx = skcipher_instance_ctx(inst);
    386	ctx->queue = queue;
    387
    388	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
    389				   crypto_attr_alg_name(tb[1]), type, mask);
    390	if (err)
    391		goto err_free_inst;
    392
    393	alg = crypto_spawn_skcipher_alg(&ctx->spawn);
    394	err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
    395	if (err)
    396		goto err_free_inst;
    397
    398	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
    399		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
    400	inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
    401	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
    402	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
    403	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
    404
    405	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
    406
    407	inst->alg.init = cryptd_skcipher_init_tfm;
    408	inst->alg.exit = cryptd_skcipher_exit_tfm;
    409
    410	inst->alg.setkey = cryptd_skcipher_setkey;
    411	inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
    412	inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
    413
    414	inst->free = cryptd_skcipher_free;
    415
    416	err = skcipher_register_instance(tmpl, inst);
    417	if (err) {
    418err_free_inst:
    419		cryptd_skcipher_free(inst);
    420	}
    421	return err;
    422}
    423
    424static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
    425{
    426	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
    427	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
    428	struct crypto_shash_spawn *spawn = &ictx->spawn;
    429	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
    430	struct crypto_shash *hash;
    431
    432	hash = crypto_spawn_shash(spawn);
    433	if (IS_ERR(hash))
    434		return PTR_ERR(hash);
    435
    436	ctx->child = hash;
    437	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
    438				 sizeof(struct cryptd_hash_request_ctx) +
    439				 crypto_shash_descsize(hash));
    440	return 0;
    441}
    442
    443static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
    444{
    445	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
    446
    447	crypto_free_shash(ctx->child);
    448}
    449
    450static int cryptd_hash_setkey(struct crypto_ahash *parent,
    451				   const u8 *key, unsigned int keylen)
    452{
    453	struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
    454	struct crypto_shash *child = ctx->child;
    455
    456	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
    457	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
    458				      CRYPTO_TFM_REQ_MASK);
    459	return crypto_shash_setkey(child, key, keylen);
    460}
    461
    462static int cryptd_hash_enqueue(struct ahash_request *req,
    463				crypto_completion_t compl)
    464{
    465	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    466	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    467	struct cryptd_queue *queue =
    468		cryptd_get_queue(crypto_ahash_tfm(tfm));
    469
    470	rctx->complete = req->base.complete;
    471	req->base.complete = compl;
    472
    473	return cryptd_enqueue_request(queue, &req->base);
    474}
    475
    476static void cryptd_hash_complete(struct ahash_request *req, int err)
    477{
    478	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    479	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    480	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    481	int refcnt = refcount_read(&ctx->refcnt);
    482
    483	local_bh_disable();
    484	rctx->complete(&req->base, err);
    485	local_bh_enable();
    486
    487	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
    488		crypto_free_ahash(tfm);
    489}
    490
    491static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
    492{
    493	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
    494	struct crypto_shash *child = ctx->child;
    495	struct ahash_request *req = ahash_request_cast(req_async);
    496	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    497	struct shash_desc *desc = &rctx->desc;
    498
    499	if (unlikely(err == -EINPROGRESS))
    500		goto out;
    501
    502	desc->tfm = child;
    503
    504	err = crypto_shash_init(desc);
    505
    506	req->base.complete = rctx->complete;
    507
    508out:
    509	cryptd_hash_complete(req, err);
    510}
    511
    512static int cryptd_hash_init_enqueue(struct ahash_request *req)
    513{
    514	return cryptd_hash_enqueue(req, cryptd_hash_init);
    515}
    516
    517static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
    518{
    519	struct ahash_request *req = ahash_request_cast(req_async);
    520	struct cryptd_hash_request_ctx *rctx;
    521
    522	rctx = ahash_request_ctx(req);
    523
    524	if (unlikely(err == -EINPROGRESS))
    525		goto out;
    526
    527	err = shash_ahash_update(req, &rctx->desc);
    528
    529	req->base.complete = rctx->complete;
    530
    531out:
    532	cryptd_hash_complete(req, err);
    533}
    534
    535static int cryptd_hash_update_enqueue(struct ahash_request *req)
    536{
    537	return cryptd_hash_enqueue(req, cryptd_hash_update);
    538}
    539
    540static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
    541{
    542	struct ahash_request *req = ahash_request_cast(req_async);
    543	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    544
    545	if (unlikely(err == -EINPROGRESS))
    546		goto out;
    547
    548	err = crypto_shash_final(&rctx->desc, req->result);
    549
    550	req->base.complete = rctx->complete;
    551
    552out:
    553	cryptd_hash_complete(req, err);
    554}
    555
    556static int cryptd_hash_final_enqueue(struct ahash_request *req)
    557{
    558	return cryptd_hash_enqueue(req, cryptd_hash_final);
    559}
    560
    561static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
    562{
    563	struct ahash_request *req = ahash_request_cast(req_async);
    564	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    565
    566	if (unlikely(err == -EINPROGRESS))
    567		goto out;
    568
    569	err = shash_ahash_finup(req, &rctx->desc);
    570
    571	req->base.complete = rctx->complete;
    572
    573out:
    574	cryptd_hash_complete(req, err);
    575}
    576
    577static int cryptd_hash_finup_enqueue(struct ahash_request *req)
    578{
    579	return cryptd_hash_enqueue(req, cryptd_hash_finup);
    580}
    581
    582static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
    583{
    584	struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
    585	struct crypto_shash *child = ctx->child;
    586	struct ahash_request *req = ahash_request_cast(req_async);
    587	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    588	struct shash_desc *desc = &rctx->desc;
    589
    590	if (unlikely(err == -EINPROGRESS))
    591		goto out;
    592
    593	desc->tfm = child;
    594
    595	err = shash_ahash_digest(req, desc);
    596
    597	req->base.complete = rctx->complete;
    598
    599out:
    600	cryptd_hash_complete(req, err);
    601}
    602
    603static int cryptd_hash_digest_enqueue(struct ahash_request *req)
    604{
    605	return cryptd_hash_enqueue(req, cryptd_hash_digest);
    606}
    607
    608static int cryptd_hash_export(struct ahash_request *req, void *out)
    609{
    610	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    611
    612	return crypto_shash_export(&rctx->desc, out);
    613}
    614
    615static int cryptd_hash_import(struct ahash_request *req, const void *in)
    616{
    617	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    618	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    619	struct shash_desc *desc = cryptd_shash_desc(req);
    620
    621	desc->tfm = ctx->child;
    622
    623	return crypto_shash_import(desc, in);
    624}
    625
    626static void cryptd_hash_free(struct ahash_instance *inst)
    627{
    628	struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
    629
    630	crypto_drop_shash(&ctx->spawn);
    631	kfree(inst);
    632}
    633
    634static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
    635			      struct crypto_attr_type *algt,
    636			      struct cryptd_queue *queue)
    637{
    638	struct hashd_instance_ctx *ctx;
    639	struct ahash_instance *inst;
    640	struct shash_alg *alg;
    641	u32 type;
    642	u32 mask;
    643	int err;
    644
    645	cryptd_type_and_mask(algt, &type, &mask);
    646
    647	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
    648	if (!inst)
    649		return -ENOMEM;
    650
    651	ctx = ahash_instance_ctx(inst);
    652	ctx->queue = queue;
    653
    654	err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
    655				crypto_attr_alg_name(tb[1]), type, mask);
    656	if (err)
    657		goto err_free_inst;
    658	alg = crypto_spawn_shash_alg(&ctx->spawn);
    659
    660	err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
    661	if (err)
    662		goto err_free_inst;
    663
    664	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
    665		(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
    666					CRYPTO_ALG_OPTIONAL_KEY));
    667	inst->alg.halg.digestsize = alg->digestsize;
    668	inst->alg.halg.statesize = alg->statesize;
    669	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
    670
    671	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
    672	inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
    673
    674	inst->alg.init   = cryptd_hash_init_enqueue;
    675	inst->alg.update = cryptd_hash_update_enqueue;
    676	inst->alg.final  = cryptd_hash_final_enqueue;
    677	inst->alg.finup  = cryptd_hash_finup_enqueue;
    678	inst->alg.export = cryptd_hash_export;
    679	inst->alg.import = cryptd_hash_import;
    680	if (crypto_shash_alg_has_setkey(alg))
    681		inst->alg.setkey = cryptd_hash_setkey;
    682	inst->alg.digest = cryptd_hash_digest_enqueue;
    683
    684	inst->free = cryptd_hash_free;
    685
    686	err = ahash_register_instance(tmpl, inst);
    687	if (err) {
    688err_free_inst:
    689		cryptd_hash_free(inst);
    690	}
    691	return err;
    692}
    693
    694static int cryptd_aead_setkey(struct crypto_aead *parent,
    695			      const u8 *key, unsigned int keylen)
    696{
    697	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
    698	struct crypto_aead *child = ctx->child;
    699
    700	return crypto_aead_setkey(child, key, keylen);
    701}
    702
    703static int cryptd_aead_setauthsize(struct crypto_aead *parent,
    704				   unsigned int authsize)
    705{
    706	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
    707	struct crypto_aead *child = ctx->child;
    708
    709	return crypto_aead_setauthsize(child, authsize);
    710}
    711
    712static void cryptd_aead_crypt(struct aead_request *req,
    713			struct crypto_aead *child,
    714			int err,
    715			int (*crypt)(struct aead_request *req))
    716{
    717	struct cryptd_aead_request_ctx *rctx;
    718	struct cryptd_aead_ctx *ctx;
    719	crypto_completion_t compl;
    720	struct crypto_aead *tfm;
    721	int refcnt;
    722
    723	rctx = aead_request_ctx(req);
    724	compl = rctx->complete;
    725
    726	tfm = crypto_aead_reqtfm(req);
    727
    728	if (unlikely(err == -EINPROGRESS))
    729		goto out;
    730	aead_request_set_tfm(req, child);
    731	err = crypt( req );
    732
    733out:
    734	ctx = crypto_aead_ctx(tfm);
    735	refcnt = refcount_read(&ctx->refcnt);
    736
    737	local_bh_disable();
    738	compl(&req->base, err);
    739	local_bh_enable();
    740
    741	if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
    742		crypto_free_aead(tfm);
    743}
    744
    745static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
    746{
    747	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
    748	struct crypto_aead *child = ctx->child;
    749	struct aead_request *req;
    750
    751	req = container_of(areq, struct aead_request, base);
    752	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
    753}
    754
    755static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
    756{
    757	struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
    758	struct crypto_aead *child = ctx->child;
    759	struct aead_request *req;
    760
    761	req = container_of(areq, struct aead_request, base);
    762	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
    763}
    764
    765static int cryptd_aead_enqueue(struct aead_request *req,
    766				    crypto_completion_t compl)
    767{
    768	struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
    769	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
    770	struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
    771
    772	rctx->complete = req->base.complete;
    773	req->base.complete = compl;
    774	return cryptd_enqueue_request(queue, &req->base);
    775}
    776
    777static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
    778{
    779	return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
    780}
    781
    782static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
    783{
    784	return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
    785}
    786
    787static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
    788{
    789	struct aead_instance *inst = aead_alg_instance(tfm);
    790	struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
    791	struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
    792	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
    793	struct crypto_aead *cipher;
    794
    795	cipher = crypto_spawn_aead(spawn);
    796	if (IS_ERR(cipher))
    797		return PTR_ERR(cipher);
    798
    799	ctx->child = cipher;
    800	crypto_aead_set_reqsize(
    801		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
    802			 crypto_aead_reqsize(cipher)));
    803	return 0;
    804}
    805
    806static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
    807{
    808	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
    809	crypto_free_aead(ctx->child);
    810}
    811
    812static void cryptd_aead_free(struct aead_instance *inst)
    813{
    814	struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
    815
    816	crypto_drop_aead(&ctx->aead_spawn);
    817	kfree(inst);
    818}
    819
    820static int cryptd_create_aead(struct crypto_template *tmpl,
    821		              struct rtattr **tb,
    822			      struct crypto_attr_type *algt,
    823			      struct cryptd_queue *queue)
    824{
    825	struct aead_instance_ctx *ctx;
    826	struct aead_instance *inst;
    827	struct aead_alg *alg;
    828	u32 type;
    829	u32 mask;
    830	int err;
    831
    832	cryptd_type_and_mask(algt, &type, &mask);
    833
    834	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
    835	if (!inst)
    836		return -ENOMEM;
    837
    838	ctx = aead_instance_ctx(inst);
    839	ctx->queue = queue;
    840
    841	err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
    842			       crypto_attr_alg_name(tb[1]), type, mask);
    843	if (err)
    844		goto err_free_inst;
    845
    846	alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
    847	err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
    848	if (err)
    849		goto err_free_inst;
    850
    851	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
    852		(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
    853	inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
    854
    855	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
    856	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
    857
    858	inst->alg.init = cryptd_aead_init_tfm;
    859	inst->alg.exit = cryptd_aead_exit_tfm;
    860	inst->alg.setkey = cryptd_aead_setkey;
    861	inst->alg.setauthsize = cryptd_aead_setauthsize;
    862	inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
    863	inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
    864
    865	inst->free = cryptd_aead_free;
    866
    867	err = aead_register_instance(tmpl, inst);
    868	if (err) {
    869err_free_inst:
    870		cryptd_aead_free(inst);
    871	}
    872	return err;
    873}
    874
    875static struct cryptd_queue queue;
    876
    877static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
    878{
    879	struct crypto_attr_type *algt;
    880
    881	algt = crypto_get_attr_type(tb);
    882	if (IS_ERR(algt))
    883		return PTR_ERR(algt);
    884
    885	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
    886	case CRYPTO_ALG_TYPE_SKCIPHER:
    887		return cryptd_create_skcipher(tmpl, tb, algt, &queue);
    888	case CRYPTO_ALG_TYPE_HASH:
    889		return cryptd_create_hash(tmpl, tb, algt, &queue);
    890	case CRYPTO_ALG_TYPE_AEAD:
    891		return cryptd_create_aead(tmpl, tb, algt, &queue);
    892	}
    893
    894	return -EINVAL;
    895}
    896
    897static struct crypto_template cryptd_tmpl = {
    898	.name = "cryptd",
    899	.create = cryptd_create,
    900	.module = THIS_MODULE,
    901};
    902
    903struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
    904					      u32 type, u32 mask)
    905{
    906	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
    907	struct cryptd_skcipher_ctx *ctx;
    908	struct crypto_skcipher *tfm;
    909
    910	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
    911		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
    912		return ERR_PTR(-EINVAL);
    913
    914	tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
    915	if (IS_ERR(tfm))
    916		return ERR_CAST(tfm);
    917
    918	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
    919		crypto_free_skcipher(tfm);
    920		return ERR_PTR(-EINVAL);
    921	}
    922
    923	ctx = crypto_skcipher_ctx(tfm);
    924	refcount_set(&ctx->refcnt, 1);
    925
    926	return container_of(tfm, struct cryptd_skcipher, base);
    927}
    928EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
    929
    930struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
    931{
    932	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
    933
    934	return &ctx->child->base;
    935}
    936EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
    937
    938bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
    939{
    940	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
    941
    942	return refcount_read(&ctx->refcnt) - 1;
    943}
    944EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
    945
    946void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
    947{
    948	struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
    949
    950	if (refcount_dec_and_test(&ctx->refcnt))
    951		crypto_free_skcipher(&tfm->base);
    952}
    953EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
    954
    955struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
    956					u32 type, u32 mask)
    957{
    958	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
    959	struct cryptd_hash_ctx *ctx;
    960	struct crypto_ahash *tfm;
    961
    962	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
    963		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
    964		return ERR_PTR(-EINVAL);
    965	tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
    966	if (IS_ERR(tfm))
    967		return ERR_CAST(tfm);
    968	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
    969		crypto_free_ahash(tfm);
    970		return ERR_PTR(-EINVAL);
    971	}
    972
    973	ctx = crypto_ahash_ctx(tfm);
    974	refcount_set(&ctx->refcnt, 1);
    975
    976	return __cryptd_ahash_cast(tfm);
    977}
    978EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
    979
    980struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
    981{
    982	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
    983
    984	return ctx->child;
    985}
    986EXPORT_SYMBOL_GPL(cryptd_ahash_child);
    987
    988struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
    989{
    990	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
    991	return &rctx->desc;
    992}
    993EXPORT_SYMBOL_GPL(cryptd_shash_desc);
    994
    995bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
    996{
    997	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
    998
    999	return refcount_read(&ctx->refcnt) - 1;
   1000}
   1001EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
   1002
   1003void cryptd_free_ahash(struct cryptd_ahash *tfm)
   1004{
   1005	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
   1006
   1007	if (refcount_dec_and_test(&ctx->refcnt))
   1008		crypto_free_ahash(&tfm->base);
   1009}
   1010EXPORT_SYMBOL_GPL(cryptd_free_ahash);
   1011
   1012struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
   1013						  u32 type, u32 mask)
   1014{
   1015	char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
   1016	struct cryptd_aead_ctx *ctx;
   1017	struct crypto_aead *tfm;
   1018
   1019	if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
   1020		     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
   1021		return ERR_PTR(-EINVAL);
   1022	tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
   1023	if (IS_ERR(tfm))
   1024		return ERR_CAST(tfm);
   1025	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
   1026		crypto_free_aead(tfm);
   1027		return ERR_PTR(-EINVAL);
   1028	}
   1029
   1030	ctx = crypto_aead_ctx(tfm);
   1031	refcount_set(&ctx->refcnt, 1);
   1032
   1033	return __cryptd_aead_cast(tfm);
   1034}
   1035EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
   1036
   1037struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
   1038{
   1039	struct cryptd_aead_ctx *ctx;
   1040	ctx = crypto_aead_ctx(&tfm->base);
   1041	return ctx->child;
   1042}
   1043EXPORT_SYMBOL_GPL(cryptd_aead_child);
   1044
   1045bool cryptd_aead_queued(struct cryptd_aead *tfm)
   1046{
   1047	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
   1048
   1049	return refcount_read(&ctx->refcnt) - 1;
   1050}
   1051EXPORT_SYMBOL_GPL(cryptd_aead_queued);
   1052
   1053void cryptd_free_aead(struct cryptd_aead *tfm)
   1054{
   1055	struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
   1056
   1057	if (refcount_dec_and_test(&ctx->refcnt))
   1058		crypto_free_aead(&tfm->base);
   1059}
   1060EXPORT_SYMBOL_GPL(cryptd_free_aead);
   1061
   1062static int __init cryptd_init(void)
   1063{
   1064	int err;
   1065
   1066	cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
   1067				    1);
   1068	if (!cryptd_wq)
   1069		return -ENOMEM;
   1070
   1071	err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
   1072	if (err)
   1073		goto err_destroy_wq;
   1074
   1075	err = crypto_register_template(&cryptd_tmpl);
   1076	if (err)
   1077		goto err_fini_queue;
   1078
   1079	return 0;
   1080
   1081err_fini_queue:
   1082	cryptd_fini_queue(&queue);
   1083err_destroy_wq:
   1084	destroy_workqueue(cryptd_wq);
   1085	return err;
   1086}
   1087
   1088static void __exit cryptd_exit(void)
   1089{
   1090	destroy_workqueue(cryptd_wq);
   1091	cryptd_fini_queue(&queue);
   1092	crypto_unregister_template(&cryptd_tmpl);
   1093}
   1094
   1095subsys_initcall(cryptd_init);
   1096module_exit(cryptd_exit);
   1097
   1098MODULE_LICENSE("GPL");
   1099MODULE_DESCRIPTION("Software async crypto daemon");
   1100MODULE_ALIAS_CRYPTO("cryptd");