cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

crypto_engine.c (16596B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Handle async block request by crypto hardware engine.
      4 *
      5 * Copyright (C) 2016 Linaro, Inc.
      6 *
      7 * Author: Baolin Wang <baolin.wang@linaro.org>
      8 */
      9
     10#include <linux/err.h>
     11#include <linux/delay.h>
     12#include <linux/device.h>
     13#include <crypto/engine.h>
     14#include <uapi/linux/sched/types.h>
     15#include "internal.h"
     16
     17#define CRYPTO_ENGINE_MAX_QLEN 10
     18
     19/**
     20 * crypto_finalize_request - finalize one request if the request is done
     21 * @engine: the hardware engine
     22 * @req: the request need to be finalized
     23 * @err: error number
     24 */
     25static void crypto_finalize_request(struct crypto_engine *engine,
     26				    struct crypto_async_request *req, int err)
     27{
     28	unsigned long flags;
     29	bool finalize_req = false;
     30	int ret;
     31	struct crypto_engine_ctx *enginectx;
     32
     33	/*
     34	 * If hardware cannot enqueue more requests
     35	 * and retry mechanism is not supported
     36	 * make sure we are completing the current request
     37	 */
     38	if (!engine->retry_support) {
     39		spin_lock_irqsave(&engine->queue_lock, flags);
     40		if (engine->cur_req == req) {
     41			finalize_req = true;
     42			engine->cur_req = NULL;
     43		}
     44		spin_unlock_irqrestore(&engine->queue_lock, flags);
     45	}
     46
     47	if (finalize_req || engine->retry_support) {
     48		enginectx = crypto_tfm_ctx(req->tfm);
     49		if (enginectx->op.prepare_request &&
     50		    enginectx->op.unprepare_request) {
     51			ret = enginectx->op.unprepare_request(engine, req);
     52			if (ret)
     53				dev_err(engine->dev, "failed to unprepare request\n");
     54		}
     55	}
     56	lockdep_assert_in_softirq();
     57	req->complete(req, err);
     58
     59	kthread_queue_work(engine->kworker, &engine->pump_requests);
     60}
     61
     62/**
     63 * crypto_pump_requests - dequeue one request from engine queue to process
     64 * @engine: the hardware engine
     65 * @in_kthread: true if we are in the context of the request pump thread
     66 *
     67 * This function checks if there is any request in the engine queue that
     68 * needs processing and if so call out to the driver to initialize hardware
     69 * and handle each request.
     70 */
     71static void crypto_pump_requests(struct crypto_engine *engine,
     72				 bool in_kthread)
     73{
     74	struct crypto_async_request *async_req, *backlog;
     75	unsigned long flags;
     76	bool was_busy = false;
     77	int ret;
     78	struct crypto_engine_ctx *enginectx;
     79
     80	spin_lock_irqsave(&engine->queue_lock, flags);
     81
     82	/* Make sure we are not already running a request */
     83	if (!engine->retry_support && engine->cur_req)
     84		goto out;
     85
     86	/* If another context is idling then defer */
     87	if (engine->idling) {
     88		kthread_queue_work(engine->kworker, &engine->pump_requests);
     89		goto out;
     90	}
     91
     92	/* Check if the engine queue is idle */
     93	if (!crypto_queue_len(&engine->queue) || !engine->running) {
     94		if (!engine->busy)
     95			goto out;
     96
     97		/* Only do teardown in the thread */
     98		if (!in_kthread) {
     99			kthread_queue_work(engine->kworker,
    100					   &engine->pump_requests);
    101			goto out;
    102		}
    103
    104		engine->busy = false;
    105		engine->idling = true;
    106		spin_unlock_irqrestore(&engine->queue_lock, flags);
    107
    108		if (engine->unprepare_crypt_hardware &&
    109		    engine->unprepare_crypt_hardware(engine))
    110			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
    111
    112		spin_lock_irqsave(&engine->queue_lock, flags);
    113		engine->idling = false;
    114		goto out;
    115	}
    116
    117start_request:
    118	/* Get the fist request from the engine queue to handle */
    119	backlog = crypto_get_backlog(&engine->queue);
    120	async_req = crypto_dequeue_request(&engine->queue);
    121	if (!async_req)
    122		goto out;
    123
    124	/*
    125	 * If hardware doesn't support the retry mechanism,
    126	 * keep track of the request we are processing now.
    127	 * We'll need it on completion (crypto_finalize_request).
    128	 */
    129	if (!engine->retry_support)
    130		engine->cur_req = async_req;
    131
    132	if (backlog)
    133		backlog->complete(backlog, -EINPROGRESS);
    134
    135	if (engine->busy)
    136		was_busy = true;
    137	else
    138		engine->busy = true;
    139
    140	spin_unlock_irqrestore(&engine->queue_lock, flags);
    141
    142	/* Until here we get the request need to be encrypted successfully */
    143	if (!was_busy && engine->prepare_crypt_hardware) {
    144		ret = engine->prepare_crypt_hardware(engine);
    145		if (ret) {
    146			dev_err(engine->dev, "failed to prepare crypt hardware\n");
    147			goto req_err_2;
    148		}
    149	}
    150
    151	enginectx = crypto_tfm_ctx(async_req->tfm);
    152
    153	if (enginectx->op.prepare_request) {
    154		ret = enginectx->op.prepare_request(engine, async_req);
    155		if (ret) {
    156			dev_err(engine->dev, "failed to prepare request: %d\n",
    157				ret);
    158			goto req_err_2;
    159		}
    160	}
    161	if (!enginectx->op.do_one_request) {
    162		dev_err(engine->dev, "failed to do request\n");
    163		ret = -EINVAL;
    164		goto req_err_1;
    165	}
    166
    167	ret = enginectx->op.do_one_request(engine, async_req);
    168
    169	/* Request unsuccessfully executed by hardware */
    170	if (ret < 0) {
    171		/*
    172		 * If hardware queue is full (-ENOSPC), requeue request
    173		 * regardless of backlog flag.
    174		 * Otherwise, unprepare and complete the request.
    175		 */
    176		if (!engine->retry_support ||
    177		    (ret != -ENOSPC)) {
    178			dev_err(engine->dev,
    179				"Failed to do one request from queue: %d\n",
    180				ret);
    181			goto req_err_1;
    182		}
    183		/*
    184		 * If retry mechanism is supported,
    185		 * unprepare current request and
    186		 * enqueue it back into crypto-engine queue.
    187		 */
    188		if (enginectx->op.unprepare_request) {
    189			ret = enginectx->op.unprepare_request(engine,
    190							      async_req);
    191			if (ret)
    192				dev_err(engine->dev,
    193					"failed to unprepare request\n");
    194		}
    195		spin_lock_irqsave(&engine->queue_lock, flags);
    196		/*
    197		 * If hardware was unable to execute request, enqueue it
    198		 * back in front of crypto-engine queue, to keep the order
    199		 * of requests.
    200		 */
    201		crypto_enqueue_request_head(&engine->queue, async_req);
    202
    203		kthread_queue_work(engine->kworker, &engine->pump_requests);
    204		goto out;
    205	}
    206
    207	goto retry;
    208
    209req_err_1:
    210	if (enginectx->op.unprepare_request) {
    211		ret = enginectx->op.unprepare_request(engine, async_req);
    212		if (ret)
    213			dev_err(engine->dev, "failed to unprepare request\n");
    214	}
    215
    216req_err_2:
    217	async_req->complete(async_req, ret);
    218
    219retry:
    220	/* If retry mechanism is supported, send new requests to engine */
    221	if (engine->retry_support) {
    222		spin_lock_irqsave(&engine->queue_lock, flags);
    223		goto start_request;
    224	}
    225	return;
    226
    227out:
    228	spin_unlock_irqrestore(&engine->queue_lock, flags);
    229
    230	/*
    231	 * Batch requests is possible only if
    232	 * hardware can enqueue multiple requests
    233	 */
    234	if (engine->do_batch_requests) {
    235		ret = engine->do_batch_requests(engine);
    236		if (ret)
    237			dev_err(engine->dev, "failed to do batch requests: %d\n",
    238				ret);
    239	}
    240
    241	return;
    242}
    243
    244static void crypto_pump_work(struct kthread_work *work)
    245{
    246	struct crypto_engine *engine =
    247		container_of(work, struct crypto_engine, pump_requests);
    248
    249	crypto_pump_requests(engine, true);
    250}
    251
    252/**
    253 * crypto_transfer_request - transfer the new request into the engine queue
    254 * @engine: the hardware engine
    255 * @req: the request need to be listed into the engine queue
    256 * @need_pump: indicates whether queue the pump of request to kthread_work
    257 */
    258static int crypto_transfer_request(struct crypto_engine *engine,
    259				   struct crypto_async_request *req,
    260				   bool need_pump)
    261{
    262	unsigned long flags;
    263	int ret;
    264
    265	spin_lock_irqsave(&engine->queue_lock, flags);
    266
    267	if (!engine->running) {
    268		spin_unlock_irqrestore(&engine->queue_lock, flags);
    269		return -ESHUTDOWN;
    270	}
    271
    272	ret = crypto_enqueue_request(&engine->queue, req);
    273
    274	if (!engine->busy && need_pump)
    275		kthread_queue_work(engine->kworker, &engine->pump_requests);
    276
    277	spin_unlock_irqrestore(&engine->queue_lock, flags);
    278	return ret;
    279}
    280
    281/**
    282 * crypto_transfer_request_to_engine - transfer one request to list
    283 * into the engine queue
    284 * @engine: the hardware engine
    285 * @req: the request need to be listed into the engine queue
    286 */
    287static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
    288					     struct crypto_async_request *req)
    289{
    290	return crypto_transfer_request(engine, req, true);
    291}
    292
    293/**
    294 * crypto_transfer_aead_request_to_engine - transfer one aead_request
    295 * to list into the engine queue
    296 * @engine: the hardware engine
    297 * @req: the request need to be listed into the engine queue
    298 */
    299int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
    300					   struct aead_request *req)
    301{
    302	return crypto_transfer_request_to_engine(engine, &req->base);
    303}
    304EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
    305
    306/**
    307 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
    308 * to list into the engine queue
    309 * @engine: the hardware engine
    310 * @req: the request need to be listed into the engine queue
    311 */
    312int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
    313					       struct akcipher_request *req)
    314{
    315	return crypto_transfer_request_to_engine(engine, &req->base);
    316}
    317EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
    318
    319/**
    320 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
    321 * to list into the engine queue
    322 * @engine: the hardware engine
    323 * @req: the request need to be listed into the engine queue
    324 */
    325int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
    326					   struct ahash_request *req)
    327{
    328	return crypto_transfer_request_to_engine(engine, &req->base);
    329}
    330EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
    331
    332/**
    333 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
    334 * into the engine queue
    335 * @engine: the hardware engine
    336 * @req: the request need to be listed into the engine queue
    337 */
    338int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
    339					  struct kpp_request *req)
    340{
    341	return crypto_transfer_request_to_engine(engine, &req->base);
    342}
    343EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
    344
    345/**
    346 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
    347 * to list into the engine queue
    348 * @engine: the hardware engine
    349 * @req: the request need to be listed into the engine queue
    350 */
    351int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
    352					       struct skcipher_request *req)
    353{
    354	return crypto_transfer_request_to_engine(engine, &req->base);
    355}
    356EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
    357
    358/**
    359 * crypto_finalize_aead_request - finalize one aead_request if
    360 * the request is done
    361 * @engine: the hardware engine
    362 * @req: the request need to be finalized
    363 * @err: error number
    364 */
    365void crypto_finalize_aead_request(struct crypto_engine *engine,
    366				  struct aead_request *req, int err)
    367{
    368	return crypto_finalize_request(engine, &req->base, err);
    369}
    370EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
    371
    372/**
    373 * crypto_finalize_akcipher_request - finalize one akcipher_request if
    374 * the request is done
    375 * @engine: the hardware engine
    376 * @req: the request need to be finalized
    377 * @err: error number
    378 */
    379void crypto_finalize_akcipher_request(struct crypto_engine *engine,
    380				      struct akcipher_request *req, int err)
    381{
    382	return crypto_finalize_request(engine, &req->base, err);
    383}
    384EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
    385
    386/**
    387 * crypto_finalize_hash_request - finalize one ahash_request if
    388 * the request is done
    389 * @engine: the hardware engine
    390 * @req: the request need to be finalized
    391 * @err: error number
    392 */
    393void crypto_finalize_hash_request(struct crypto_engine *engine,
    394				  struct ahash_request *req, int err)
    395{
    396	return crypto_finalize_request(engine, &req->base, err);
    397}
    398EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
    399
    400/**
    401 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
    402 * @engine: the hardware engine
    403 * @req: the request need to be finalized
    404 * @err: error number
    405 */
    406void crypto_finalize_kpp_request(struct crypto_engine *engine,
    407				 struct kpp_request *req, int err)
    408{
    409	return crypto_finalize_request(engine, &req->base, err);
    410}
    411EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
    412
    413/**
    414 * crypto_finalize_skcipher_request - finalize one skcipher_request if
    415 * the request is done
    416 * @engine: the hardware engine
    417 * @req: the request need to be finalized
    418 * @err: error number
    419 */
    420void crypto_finalize_skcipher_request(struct crypto_engine *engine,
    421				      struct skcipher_request *req, int err)
    422{
    423	return crypto_finalize_request(engine, &req->base, err);
    424}
    425EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
    426
    427/**
    428 * crypto_engine_start - start the hardware engine
    429 * @engine: the hardware engine need to be started
    430 *
    431 * Return 0 on success, else on fail.
    432 */
    433int crypto_engine_start(struct crypto_engine *engine)
    434{
    435	unsigned long flags;
    436
    437	spin_lock_irqsave(&engine->queue_lock, flags);
    438
    439	if (engine->running || engine->busy) {
    440		spin_unlock_irqrestore(&engine->queue_lock, flags);
    441		return -EBUSY;
    442	}
    443
    444	engine->running = true;
    445	spin_unlock_irqrestore(&engine->queue_lock, flags);
    446
    447	kthread_queue_work(engine->kworker, &engine->pump_requests);
    448
    449	return 0;
    450}
    451EXPORT_SYMBOL_GPL(crypto_engine_start);
    452
    453/**
    454 * crypto_engine_stop - stop the hardware engine
    455 * @engine: the hardware engine need to be stopped
    456 *
    457 * Return 0 on success, else on fail.
    458 */
    459int crypto_engine_stop(struct crypto_engine *engine)
    460{
    461	unsigned long flags;
    462	unsigned int limit = 500;
    463	int ret = 0;
    464
    465	spin_lock_irqsave(&engine->queue_lock, flags);
    466
    467	/*
    468	 * If the engine queue is not empty or the engine is on busy state,
    469	 * we need to wait for a while to pump the requests of engine queue.
    470	 */
    471	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
    472		spin_unlock_irqrestore(&engine->queue_lock, flags);
    473		msleep(20);
    474		spin_lock_irqsave(&engine->queue_lock, flags);
    475	}
    476
    477	if (crypto_queue_len(&engine->queue) || engine->busy)
    478		ret = -EBUSY;
    479	else
    480		engine->running = false;
    481
    482	spin_unlock_irqrestore(&engine->queue_lock, flags);
    483
    484	if (ret)
    485		dev_warn(engine->dev, "could not stop engine\n");
    486
    487	return ret;
    488}
    489EXPORT_SYMBOL_GPL(crypto_engine_stop);
    490
    491/**
    492 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
    493 * and initialize it by setting the maximum number of entries in the software
    494 * crypto-engine queue.
    495 * @dev: the device attached with one hardware engine
    496 * @retry_support: whether hardware has support for retry mechanism
    497 * @cbk_do_batch: pointer to a callback function to be invoked when executing
    498 *                a batch of requests.
    499 *                This has the form:
    500 *                callback(struct crypto_engine *engine)
    501 *                where:
    502 *                @engine: the crypto engine structure.
    503 * @rt: whether this queue is set to run as a realtime task
    504 * @qlen: maximum size of the crypto-engine queue
    505 *
    506 * This must be called from context that can sleep.
    507 * Return: the crypto engine structure on success, else NULL.
    508 */
    509struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
    510						       bool retry_support,
    511						       int (*cbk_do_batch)(struct crypto_engine *engine),
    512						       bool rt, int qlen)
    513{
    514	struct crypto_engine *engine;
    515
    516	if (!dev)
    517		return NULL;
    518
    519	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
    520	if (!engine)
    521		return NULL;
    522
    523	engine->dev = dev;
    524	engine->rt = rt;
    525	engine->running = false;
    526	engine->busy = false;
    527	engine->idling = false;
    528	engine->retry_support = retry_support;
    529	engine->priv_data = dev;
    530	/*
    531	 * Batch requests is possible only if
    532	 * hardware has support for retry mechanism.
    533	 */
    534	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
    535
    536	snprintf(engine->name, sizeof(engine->name),
    537		 "%s-engine", dev_name(dev));
    538
    539	crypto_init_queue(&engine->queue, qlen);
    540	spin_lock_init(&engine->queue_lock);
    541
    542	engine->kworker = kthread_create_worker(0, "%s", engine->name);
    543	if (IS_ERR(engine->kworker)) {
    544		dev_err(dev, "failed to create crypto request pump task\n");
    545		return NULL;
    546	}
    547	kthread_init_work(&engine->pump_requests, crypto_pump_work);
    548
    549	if (engine->rt) {
    550		dev_info(dev, "will run requests pump with realtime priority\n");
    551		sched_set_fifo(engine->kworker->task);
    552	}
    553
    554	return engine;
    555}
    556EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
    557
    558/**
    559 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
    560 * initialize it.
    561 * @dev: the device attached with one hardware engine
    562 * @rt: whether this queue is set to run as a realtime task
    563 *
    564 * This must be called from context that can sleep.
    565 * Return: the crypto engine structure on success, else NULL.
    566 */
    567struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
    568{
    569	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
    570						CRYPTO_ENGINE_MAX_QLEN);
    571}
    572EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
    573
    574/**
    575 * crypto_engine_exit - free the resources of hardware engine when exit
    576 * @engine: the hardware engine need to be freed
    577 *
    578 * Return 0 for success.
    579 */
    580int crypto_engine_exit(struct crypto_engine *engine)
    581{
    582	int ret;
    583
    584	ret = crypto_engine_stop(engine);
    585	if (ret)
    586		return ret;
    587
    588	kthread_destroy_worker(engine->kworker);
    589
    590	return 0;
    591}
    592EXPORT_SYMBOL_GPL(crypto_engine_exit);
    593
    594MODULE_LICENSE("GPL");
    595MODULE_DESCRIPTION("Crypto hardware engine framework");