cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

testmgr.c (151694B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Algorithm testing framework and tests.
      4 *
      5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
      6 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
      7 * Copyright (c) 2007 Nokia Siemens Networks
      8 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
      9 * Copyright (c) 2019 Google LLC
     10 *
     11 * Updated RFC4106 AES-GCM testing.
     12 *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
     13 *             Adrian Hoban <adrian.hoban@intel.com>
     14 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
     15 *             Tadeusz Struk (tadeusz.struk@intel.com)
     16 *    Copyright (c) 2010, Intel Corporation.
     17 */
     18
     19#include <crypto/aead.h>
     20#include <crypto/hash.h>
     21#include <crypto/skcipher.h>
     22#include <linux/err.h>
     23#include <linux/fips.h>
     24#include <linux/module.h>
     25#include <linux/once.h>
     26#include <linux/random.h>
     27#include <linux/scatterlist.h>
     28#include <linux/slab.h>
     29#include <linux/string.h>
     30#include <linux/uio.h>
     31#include <crypto/rng.h>
     32#include <crypto/drbg.h>
     33#include <crypto/akcipher.h>
     34#include <crypto/kpp.h>
     35#include <crypto/acompress.h>
     36#include <crypto/internal/cipher.h>
     37#include <crypto/internal/simd.h>
     38
     39#include "internal.h"
     40
     41MODULE_IMPORT_NS(CRYPTO_INTERNAL);
     42
     43static bool notests;
     44module_param(notests, bool, 0644);
     45MODULE_PARM_DESC(notests, "disable crypto self-tests");
     46
     47static bool panic_on_fail;
     48module_param(panic_on_fail, bool, 0444);
     49
     50#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
     51static bool noextratests;
     52module_param(noextratests, bool, 0644);
     53MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests");
     54
     55static unsigned int fuzz_iterations = 100;
     56module_param(fuzz_iterations, uint, 0644);
     57MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations");
     58#endif
     59
     60#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
     61
     62/* a perfect nop */
     63int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
     64{
     65	return 0;
     66}
     67
     68#else
     69
     70#include "testmgr.h"
     71
     72/*
     73 * Need slab memory for testing (size in number of pages).
     74 */
     75#define XBUFSIZE	8
     76
     77/*
     78* Used by test_cipher()
     79*/
     80#define ENCRYPT 1
     81#define DECRYPT 0
     82
     83struct aead_test_suite {
     84	const struct aead_testvec *vecs;
     85	unsigned int count;
     86
     87	/*
     88	 * Set if trying to decrypt an inauthentic ciphertext with this
     89	 * algorithm might result in EINVAL rather than EBADMSG, due to other
     90	 * validation the algorithm does on the inputs such as length checks.
     91	 */
     92	unsigned int einval_allowed : 1;
     93
     94	/*
     95	 * Set if this algorithm requires that the IV be located at the end of
     96	 * the AAD buffer, in addition to being given in the normal way.  The
     97	 * behavior when the two IV copies differ is implementation-defined.
     98	 */
     99	unsigned int aad_iv : 1;
    100};
    101
    102struct cipher_test_suite {
    103	const struct cipher_testvec *vecs;
    104	unsigned int count;
    105};
    106
    107struct comp_test_suite {
    108	struct {
    109		const struct comp_testvec *vecs;
    110		unsigned int count;
    111	} comp, decomp;
    112};
    113
    114struct hash_test_suite {
    115	const struct hash_testvec *vecs;
    116	unsigned int count;
    117};
    118
    119struct cprng_test_suite {
    120	const struct cprng_testvec *vecs;
    121	unsigned int count;
    122};
    123
    124struct drbg_test_suite {
    125	const struct drbg_testvec *vecs;
    126	unsigned int count;
    127};
    128
    129struct akcipher_test_suite {
    130	const struct akcipher_testvec *vecs;
    131	unsigned int count;
    132};
    133
    134struct kpp_test_suite {
    135	const struct kpp_testvec *vecs;
    136	unsigned int count;
    137};
    138
    139struct alg_test_desc {
    140	const char *alg;
    141	const char *generic_driver;
    142	int (*test)(const struct alg_test_desc *desc, const char *driver,
    143		    u32 type, u32 mask);
    144	int fips_allowed;	/* set if alg is allowed in fips mode */
    145
    146	union {
    147		struct aead_test_suite aead;
    148		struct cipher_test_suite cipher;
    149		struct comp_test_suite comp;
    150		struct hash_test_suite hash;
    151		struct cprng_test_suite cprng;
    152		struct drbg_test_suite drbg;
    153		struct akcipher_test_suite akcipher;
    154		struct kpp_test_suite kpp;
    155	} suite;
    156};
    157
    158static void hexdump(unsigned char *buf, unsigned int len)
    159{
    160	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
    161			16, 1,
    162			buf, len, false);
    163}
    164
    165static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order)
    166{
    167	int i;
    168
    169	for (i = 0; i < XBUFSIZE; i++) {
    170		buf[i] = (char *)__get_free_pages(GFP_KERNEL, order);
    171		if (!buf[i])
    172			goto err_free_buf;
    173	}
    174
    175	return 0;
    176
    177err_free_buf:
    178	while (i-- > 0)
    179		free_pages((unsigned long)buf[i], order);
    180
    181	return -ENOMEM;
    182}
    183
    184static int testmgr_alloc_buf(char *buf[XBUFSIZE])
    185{
    186	return __testmgr_alloc_buf(buf, 0);
    187}
    188
    189static void __testmgr_free_buf(char *buf[XBUFSIZE], int order)
    190{
    191	int i;
    192
    193	for (i = 0; i < XBUFSIZE; i++)
    194		free_pages((unsigned long)buf[i], order);
    195}
    196
    197static void testmgr_free_buf(char *buf[XBUFSIZE])
    198{
    199	__testmgr_free_buf(buf, 0);
    200}
    201
    202#define TESTMGR_POISON_BYTE	0xfe
    203#define TESTMGR_POISON_LEN	16
    204
    205static inline void testmgr_poison(void *addr, size_t len)
    206{
    207	memset(addr, TESTMGR_POISON_BYTE, len);
    208}
    209
    210/* Is the memory region still fully poisoned? */
    211static inline bool testmgr_is_poison(const void *addr, size_t len)
    212{
    213	return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL;
    214}
    215
    216/* flush type for hash algorithms */
    217enum flush_type {
    218	/* merge with update of previous buffer(s) */
    219	FLUSH_TYPE_NONE = 0,
    220
    221	/* update with previous buffer(s) before doing this one */
    222	FLUSH_TYPE_FLUSH,
    223
    224	/* likewise, but also export and re-import the intermediate state */
    225	FLUSH_TYPE_REIMPORT,
    226};
    227
    228/* finalization function for hash algorithms */
    229enum finalization_type {
    230	FINALIZATION_TYPE_FINAL,	/* use final() */
    231	FINALIZATION_TYPE_FINUP,	/* use finup() */
    232	FINALIZATION_TYPE_DIGEST,	/* use digest() */
    233};
    234
    235/*
    236 * Whether the crypto operation will occur in-place, and if so whether the
    237 * source and destination scatterlist pointers will coincide (req->src ==
    238 * req->dst), or whether they'll merely point to two separate scatterlists
    239 * (req->src != req->dst) that reference the same underlying memory.
    240 *
    241 * This is only relevant for algorithm types that support in-place operation.
    242 */
    243enum inplace_mode {
    244	OUT_OF_PLACE,
    245	INPLACE_ONE_SGLIST,
    246	INPLACE_TWO_SGLISTS,
    247};
    248
    249#define TEST_SG_TOTAL	10000
    250
    251/**
    252 * struct test_sg_division - description of a scatterlist entry
    253 *
    254 * This struct describes one entry of a scatterlist being constructed to check a
    255 * crypto test vector.
    256 *
    257 * @proportion_of_total: length of this chunk relative to the total length,
    258 *			 given as a proportion out of TEST_SG_TOTAL so that it
    259 *			 scales to fit any test vector
    260 * @offset: byte offset into a 2-page buffer at which this chunk will start
    261 * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the
    262 *				  @offset
    263 * @flush_type: for hashes, whether an update() should be done now vs.
    264 *		continuing to accumulate data
    265 * @nosimd: if doing the pending update(), do it with SIMD disabled?
    266 */
    267struct test_sg_division {
    268	unsigned int proportion_of_total;
    269	unsigned int offset;
    270	bool offset_relative_to_alignmask;
    271	enum flush_type flush_type;
    272	bool nosimd;
    273};
    274
    275/**
    276 * struct testvec_config - configuration for testing a crypto test vector
    277 *
    278 * This struct describes the data layout and other parameters with which each
    279 * crypto test vector can be tested.
    280 *
    281 * @name: name of this config, logged for debugging purposes if a test fails
    282 * @inplace_mode: whether and how to operate on the data in-place, if applicable
    283 * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP
    284 * @src_divs: description of how to arrange the source scatterlist
    285 * @dst_divs: description of how to arrange the dst scatterlist, if applicable
    286 *	      for the algorithm type.  Defaults to @src_divs if unset.
    287 * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1],
    288 *	       where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary
    289 * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
    290 *				     the @iv_offset
    291 * @key_offset: misalignment of the key, where 0 is default alignment
    292 * @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to
    293 *				      the @key_offset
    294 * @finalization_type: what finalization function to use for hashes
    295 * @nosimd: execute with SIMD disabled?  Requires !CRYPTO_TFM_REQ_MAY_SLEEP.
    296 */
    297struct testvec_config {
    298	const char *name;
    299	enum inplace_mode inplace_mode;
    300	u32 req_flags;
    301	struct test_sg_division src_divs[XBUFSIZE];
    302	struct test_sg_division dst_divs[XBUFSIZE];
    303	unsigned int iv_offset;
    304	unsigned int key_offset;
    305	bool iv_offset_relative_to_alignmask;
    306	bool key_offset_relative_to_alignmask;
    307	enum finalization_type finalization_type;
    308	bool nosimd;
    309};
    310
    311#define TESTVEC_CONFIG_NAMELEN	192
    312
    313/*
    314 * The following are the lists of testvec_configs to test for each algorithm
    315 * type when the basic crypto self-tests are enabled, i.e. when
    316 * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset.  They aim to provide good test
    317 * coverage, while keeping the test time much shorter than the full fuzz tests
    318 * so that the basic tests can be enabled in a wider range of circumstances.
    319 */
    320
    321/* Configs for skciphers and aeads */
    322static const struct testvec_config default_cipher_testvec_configs[] = {
    323	{
    324		.name = "in-place (one sglist)",
    325		.inplace_mode = INPLACE_ONE_SGLIST,
    326		.src_divs = { { .proportion_of_total = 10000 } },
    327	}, {
    328		.name = "in-place (two sglists)",
    329		.inplace_mode = INPLACE_TWO_SGLISTS,
    330		.src_divs = { { .proportion_of_total = 10000 } },
    331	}, {
    332		.name = "out-of-place",
    333		.inplace_mode = OUT_OF_PLACE,
    334		.src_divs = { { .proportion_of_total = 10000 } },
    335	}, {
    336		.name = "unaligned buffer, offset=1",
    337		.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
    338		.iv_offset = 1,
    339		.key_offset = 1,
    340	}, {
    341		.name = "buffer aligned only to alignmask",
    342		.src_divs = {
    343			{
    344				.proportion_of_total = 10000,
    345				.offset = 1,
    346				.offset_relative_to_alignmask = true,
    347			},
    348		},
    349		.iv_offset = 1,
    350		.iv_offset_relative_to_alignmask = true,
    351		.key_offset = 1,
    352		.key_offset_relative_to_alignmask = true,
    353	}, {
    354		.name = "two even aligned splits",
    355		.src_divs = {
    356			{ .proportion_of_total = 5000 },
    357			{ .proportion_of_total = 5000 },
    358		},
    359	}, {
    360		.name = "uneven misaligned splits, may sleep",
    361		.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
    362		.src_divs = {
    363			{ .proportion_of_total = 1900, .offset = 33 },
    364			{ .proportion_of_total = 3300, .offset = 7  },
    365			{ .proportion_of_total = 4800, .offset = 18 },
    366		},
    367		.iv_offset = 3,
    368		.key_offset = 3,
    369	}, {
    370		.name = "misaligned splits crossing pages, inplace",
    371		.inplace_mode = INPLACE_ONE_SGLIST,
    372		.src_divs = {
    373			{
    374				.proportion_of_total = 7500,
    375				.offset = PAGE_SIZE - 32
    376			}, {
    377				.proportion_of_total = 2500,
    378				.offset = PAGE_SIZE - 7
    379			},
    380		},
    381	}
    382};
    383
    384static const struct testvec_config default_hash_testvec_configs[] = {
    385	{
    386		.name = "init+update+final aligned buffer",
    387		.src_divs = { { .proportion_of_total = 10000 } },
    388		.finalization_type = FINALIZATION_TYPE_FINAL,
    389	}, {
    390		.name = "init+finup aligned buffer",
    391		.src_divs = { { .proportion_of_total = 10000 } },
    392		.finalization_type = FINALIZATION_TYPE_FINUP,
    393	}, {
    394		.name = "digest aligned buffer",
    395		.src_divs = { { .proportion_of_total = 10000 } },
    396		.finalization_type = FINALIZATION_TYPE_DIGEST,
    397	}, {
    398		.name = "init+update+final misaligned buffer",
    399		.src_divs = { { .proportion_of_total = 10000, .offset = 1 } },
    400		.finalization_type = FINALIZATION_TYPE_FINAL,
    401		.key_offset = 1,
    402	}, {
    403		.name = "digest buffer aligned only to alignmask",
    404		.src_divs = {
    405			{
    406				.proportion_of_total = 10000,
    407				.offset = 1,
    408				.offset_relative_to_alignmask = true,
    409			},
    410		},
    411		.finalization_type = FINALIZATION_TYPE_DIGEST,
    412		.key_offset = 1,
    413		.key_offset_relative_to_alignmask = true,
    414	}, {
    415		.name = "init+update+update+final two even splits",
    416		.src_divs = {
    417			{ .proportion_of_total = 5000 },
    418			{
    419				.proportion_of_total = 5000,
    420				.flush_type = FLUSH_TYPE_FLUSH,
    421			},
    422		},
    423		.finalization_type = FINALIZATION_TYPE_FINAL,
    424	}, {
    425		.name = "digest uneven misaligned splits, may sleep",
    426		.req_flags = CRYPTO_TFM_REQ_MAY_SLEEP,
    427		.src_divs = {
    428			{ .proportion_of_total = 1900, .offset = 33 },
    429			{ .proportion_of_total = 3300, .offset = 7  },
    430			{ .proportion_of_total = 4800, .offset = 18 },
    431		},
    432		.finalization_type = FINALIZATION_TYPE_DIGEST,
    433	}, {
    434		.name = "digest misaligned splits crossing pages",
    435		.src_divs = {
    436			{
    437				.proportion_of_total = 7500,
    438				.offset = PAGE_SIZE - 32,
    439			}, {
    440				.proportion_of_total = 2500,
    441				.offset = PAGE_SIZE - 7,
    442			},
    443		},
    444		.finalization_type = FINALIZATION_TYPE_DIGEST,
    445	}, {
    446		.name = "import/export",
    447		.src_divs = {
    448			{
    449				.proportion_of_total = 6500,
    450				.flush_type = FLUSH_TYPE_REIMPORT,
    451			}, {
    452				.proportion_of_total = 3500,
    453				.flush_type = FLUSH_TYPE_REIMPORT,
    454			},
    455		},
    456		.finalization_type = FINALIZATION_TYPE_FINAL,
    457	}
    458};
    459
    460static unsigned int count_test_sg_divisions(const struct test_sg_division *divs)
    461{
    462	unsigned int remaining = TEST_SG_TOTAL;
    463	unsigned int ndivs = 0;
    464
    465	do {
    466		remaining -= divs[ndivs++].proportion_of_total;
    467	} while (remaining);
    468
    469	return ndivs;
    470}
    471
    472#define SGDIVS_HAVE_FLUSHES	BIT(0)
    473#define SGDIVS_HAVE_NOSIMD	BIT(1)
    474
    475static bool valid_sg_divisions(const struct test_sg_division *divs,
    476			       unsigned int count, int *flags_ret)
    477{
    478	unsigned int total = 0;
    479	unsigned int i;
    480
    481	for (i = 0; i < count && total != TEST_SG_TOTAL; i++) {
    482		if (divs[i].proportion_of_total <= 0 ||
    483		    divs[i].proportion_of_total > TEST_SG_TOTAL - total)
    484			return false;
    485		total += divs[i].proportion_of_total;
    486		if (divs[i].flush_type != FLUSH_TYPE_NONE)
    487			*flags_ret |= SGDIVS_HAVE_FLUSHES;
    488		if (divs[i].nosimd)
    489			*flags_ret |= SGDIVS_HAVE_NOSIMD;
    490	}
    491	return total == TEST_SG_TOTAL &&
    492		memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL;
    493}
    494
    495/*
    496 * Check whether the given testvec_config is valid.  This isn't strictly needed
    497 * since every testvec_config should be valid, but check anyway so that people
    498 * don't unknowingly add broken configs that don't do what they wanted.
    499 */
    500static bool valid_testvec_config(const struct testvec_config *cfg)
    501{
    502	int flags = 0;
    503
    504	if (cfg->name == NULL)
    505		return false;
    506
    507	if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs),
    508				&flags))
    509		return false;
    510
    511	if (cfg->dst_divs[0].proportion_of_total) {
    512		if (!valid_sg_divisions(cfg->dst_divs,
    513					ARRAY_SIZE(cfg->dst_divs), &flags))
    514			return false;
    515	} else {
    516		if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs)))
    517			return false;
    518		/* defaults to dst_divs=src_divs */
    519	}
    520
    521	if (cfg->iv_offset +
    522	    (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) >
    523	    MAX_ALGAPI_ALIGNMASK + 1)
    524		return false;
    525
    526	if ((flags & (SGDIVS_HAVE_FLUSHES | SGDIVS_HAVE_NOSIMD)) &&
    527	    cfg->finalization_type == FINALIZATION_TYPE_DIGEST)
    528		return false;
    529
    530	if ((cfg->nosimd || (flags & SGDIVS_HAVE_NOSIMD)) &&
    531	    (cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP))
    532		return false;
    533
    534	return true;
    535}
    536
    537struct test_sglist {
    538	char *bufs[XBUFSIZE];
    539	struct scatterlist sgl[XBUFSIZE];
    540	struct scatterlist sgl_saved[XBUFSIZE];
    541	struct scatterlist *sgl_ptr;
    542	unsigned int nents;
    543};
    544
    545static int init_test_sglist(struct test_sglist *tsgl)
    546{
    547	return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */);
    548}
    549
    550static void destroy_test_sglist(struct test_sglist *tsgl)
    551{
    552	return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */);
    553}
    554
    555/**
    556 * build_test_sglist() - build a scatterlist for a crypto test
    557 *
    558 * @tsgl: the scatterlist to build.  @tsgl->bufs[] contains an array of 2-page
    559 *	  buffers which the scatterlist @tsgl->sgl[] will be made to point into.
    560 * @divs: the layout specification on which the scatterlist will be based
    561 * @alignmask: the algorithm's alignmask
    562 * @total_len: the total length of the scatterlist to build in bytes
    563 * @data: if non-NULL, the buffers will be filled with this data until it ends.
    564 *	  Otherwise the buffers will be poisoned.  In both cases, some bytes
    565 *	  past the end of each buffer will be poisoned to help detect overruns.
    566 * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry
    567 *	      corresponds will be returned here.  This will match @divs except
    568 *	      that divisions resolving to a length of 0 are omitted as they are
    569 *	      not included in the scatterlist.
    570 *
    571 * Return: 0 or a -errno value
    572 */
    573static int build_test_sglist(struct test_sglist *tsgl,
    574			     const struct test_sg_division *divs,
    575			     const unsigned int alignmask,
    576			     const unsigned int total_len,
    577			     struct iov_iter *data,
    578			     const struct test_sg_division *out_divs[XBUFSIZE])
    579{
    580	struct {
    581		const struct test_sg_division *div;
    582		size_t length;
    583	} partitions[XBUFSIZE];
    584	const unsigned int ndivs = count_test_sg_divisions(divs);
    585	unsigned int len_remaining = total_len;
    586	unsigned int i;
    587
    588	BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl));
    589	if (WARN_ON(ndivs > ARRAY_SIZE(partitions)))
    590		return -EINVAL;
    591
    592	/* Calculate the (div, length) pairs */
    593	tsgl->nents = 0;
    594	for (i = 0; i < ndivs; i++) {
    595		unsigned int len_this_sg =
    596			min(len_remaining,
    597			    (total_len * divs[i].proportion_of_total +
    598			     TEST_SG_TOTAL / 2) / TEST_SG_TOTAL);
    599
    600		if (len_this_sg != 0) {
    601			partitions[tsgl->nents].div = &divs[i];
    602			partitions[tsgl->nents].length = len_this_sg;
    603			tsgl->nents++;
    604			len_remaining -= len_this_sg;
    605		}
    606	}
    607	if (tsgl->nents == 0) {
    608		partitions[tsgl->nents].div = &divs[0];
    609		partitions[tsgl->nents].length = 0;
    610		tsgl->nents++;
    611	}
    612	partitions[tsgl->nents - 1].length += len_remaining;
    613
    614	/* Set up the sgl entries and fill the data or poison */
    615	sg_init_table(tsgl->sgl, tsgl->nents);
    616	for (i = 0; i < tsgl->nents; i++) {
    617		unsigned int offset = partitions[i].div->offset;
    618		void *addr;
    619
    620		if (partitions[i].div->offset_relative_to_alignmask)
    621			offset += alignmask;
    622
    623		while (offset + partitions[i].length + TESTMGR_POISON_LEN >
    624		       2 * PAGE_SIZE) {
    625			if (WARN_ON(offset <= 0))
    626				return -EINVAL;
    627			offset /= 2;
    628		}
    629
    630		addr = &tsgl->bufs[i][offset];
    631		sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length);
    632
    633		if (out_divs)
    634			out_divs[i] = partitions[i].div;
    635
    636		if (data) {
    637			size_t copy_len, copied;
    638
    639			copy_len = min(partitions[i].length, data->count);
    640			copied = copy_from_iter(addr, copy_len, data);
    641			if (WARN_ON(copied != copy_len))
    642				return -EINVAL;
    643			testmgr_poison(addr + copy_len, partitions[i].length +
    644				       TESTMGR_POISON_LEN - copy_len);
    645		} else {
    646			testmgr_poison(addr, partitions[i].length +
    647				       TESTMGR_POISON_LEN);
    648		}
    649	}
    650
    651	sg_mark_end(&tsgl->sgl[tsgl->nents - 1]);
    652	tsgl->sgl_ptr = tsgl->sgl;
    653	memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0]));
    654	return 0;
    655}
    656
    657/*
    658 * Verify that a scatterlist crypto operation produced the correct output.
    659 *
    660 * @tsgl: scatterlist containing the actual output
    661 * @expected_output: buffer containing the expected output
    662 * @len_to_check: length of @expected_output in bytes
    663 * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result
    664 * @check_poison: verify that the poison bytes after each chunk are intact?
    665 *
    666 * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun.
    667 */
    668static int verify_correct_output(const struct test_sglist *tsgl,
    669				 const char *expected_output,
    670				 unsigned int len_to_check,
    671				 unsigned int unchecked_prefix_len,
    672				 bool check_poison)
    673{
    674	unsigned int i;
    675
    676	for (i = 0; i < tsgl->nents; i++) {
    677		struct scatterlist *sg = &tsgl->sgl_ptr[i];
    678		unsigned int len = sg->length;
    679		unsigned int offset = sg->offset;
    680		const char *actual_output;
    681
    682		if (unchecked_prefix_len) {
    683			if (unchecked_prefix_len >= len) {
    684				unchecked_prefix_len -= len;
    685				continue;
    686			}
    687			offset += unchecked_prefix_len;
    688			len -= unchecked_prefix_len;
    689			unchecked_prefix_len = 0;
    690		}
    691		len = min(len, len_to_check);
    692		actual_output = page_address(sg_page(sg)) + offset;
    693		if (memcmp(expected_output, actual_output, len) != 0)
    694			return -EINVAL;
    695		if (check_poison &&
    696		    !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN))
    697			return -EOVERFLOW;
    698		len_to_check -= len;
    699		expected_output += len;
    700	}
    701	if (WARN_ON(len_to_check != 0))
    702		return -EINVAL;
    703	return 0;
    704}
    705
    706static bool is_test_sglist_corrupted(const struct test_sglist *tsgl)
    707{
    708	unsigned int i;
    709
    710	for (i = 0; i < tsgl->nents; i++) {
    711		if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link)
    712			return true;
    713		if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset)
    714			return true;
    715		if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length)
    716			return true;
    717	}
    718	return false;
    719}
    720
    721struct cipher_test_sglists {
    722	struct test_sglist src;
    723	struct test_sglist dst;
    724};
    725
    726static struct cipher_test_sglists *alloc_cipher_test_sglists(void)
    727{
    728	struct cipher_test_sglists *tsgls;
    729
    730	tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL);
    731	if (!tsgls)
    732		return NULL;
    733
    734	if (init_test_sglist(&tsgls->src) != 0)
    735		goto fail_kfree;
    736	if (init_test_sglist(&tsgls->dst) != 0)
    737		goto fail_destroy_src;
    738
    739	return tsgls;
    740
    741fail_destroy_src:
    742	destroy_test_sglist(&tsgls->src);
    743fail_kfree:
    744	kfree(tsgls);
    745	return NULL;
    746}
    747
    748static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls)
    749{
    750	if (tsgls) {
    751		destroy_test_sglist(&tsgls->src);
    752		destroy_test_sglist(&tsgls->dst);
    753		kfree(tsgls);
    754	}
    755}
    756
    757/* Build the src and dst scatterlists for an skcipher or AEAD test */
    758static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls,
    759				     const struct testvec_config *cfg,
    760				     unsigned int alignmask,
    761				     unsigned int src_total_len,
    762				     unsigned int dst_total_len,
    763				     const struct kvec *inputs,
    764				     unsigned int nr_inputs)
    765{
    766	struct iov_iter input;
    767	int err;
    768
    769	iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len);
    770	err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask,
    771				cfg->inplace_mode != OUT_OF_PLACE ?
    772					max(dst_total_len, src_total_len) :
    773					src_total_len,
    774				&input, NULL);
    775	if (err)
    776		return err;
    777
    778	/*
    779	 * In-place crypto operations can use the same scatterlist for both the
    780	 * source and destination (req->src == req->dst), or can use separate
    781	 * scatterlists (req->src != req->dst) which point to the same
    782	 * underlying memory.  Make sure to test both cases.
    783	 */
    784	if (cfg->inplace_mode == INPLACE_ONE_SGLIST) {
    785		tsgls->dst.sgl_ptr = tsgls->src.sgl;
    786		tsgls->dst.nents = tsgls->src.nents;
    787		return 0;
    788	}
    789	if (cfg->inplace_mode == INPLACE_TWO_SGLISTS) {
    790		/*
    791		 * For now we keep it simple and only test the case where the
    792		 * two scatterlists have identical entries, rather than
    793		 * different entries that split up the same memory differently.
    794		 */
    795		memcpy(tsgls->dst.sgl, tsgls->src.sgl,
    796		       tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
    797		memcpy(tsgls->dst.sgl_saved, tsgls->src.sgl,
    798		       tsgls->src.nents * sizeof(tsgls->src.sgl[0]));
    799		tsgls->dst.sgl_ptr = tsgls->dst.sgl;
    800		tsgls->dst.nents = tsgls->src.nents;
    801		return 0;
    802	}
    803	/* Out of place */
    804	return build_test_sglist(&tsgls->dst,
    805				 cfg->dst_divs[0].proportion_of_total ?
    806					cfg->dst_divs : cfg->src_divs,
    807				 alignmask, dst_total_len, NULL, NULL);
    808}
    809
    810/*
    811 * Support for testing passing a misaligned key to setkey():
    812 *
    813 * If cfg->key_offset is set, copy the key into a new buffer at that offset,
    814 * optionally adding alignmask.  Else, just use the key directly.
    815 */
    816static int prepare_keybuf(const u8 *key, unsigned int ksize,
    817			  const struct testvec_config *cfg,
    818			  unsigned int alignmask,
    819			  const u8 **keybuf_ret, const u8 **keyptr_ret)
    820{
    821	unsigned int key_offset = cfg->key_offset;
    822	u8 *keybuf = NULL, *keyptr = (u8 *)key;
    823
    824	if (key_offset != 0) {
    825		if (cfg->key_offset_relative_to_alignmask)
    826			key_offset += alignmask;
    827		keybuf = kmalloc(key_offset + ksize, GFP_KERNEL);
    828		if (!keybuf)
    829			return -ENOMEM;
    830		keyptr = keybuf + key_offset;
    831		memcpy(keyptr, key, ksize);
    832	}
    833	*keybuf_ret = keybuf;
    834	*keyptr_ret = keyptr;
    835	return 0;
    836}
    837
    838/* Like setkey_f(tfm, key, ksize), but sometimes misalign the key */
    839#define do_setkey(setkey_f, tfm, key, ksize, cfg, alignmask)		\
    840({									\
    841	const u8 *keybuf, *keyptr;					\
    842	int err;							\
    843									\
    844	err = prepare_keybuf((key), (ksize), (cfg), (alignmask),	\
    845			     &keybuf, &keyptr);				\
    846	if (err == 0) {							\
    847		err = setkey_f((tfm), keyptr, (ksize));			\
    848		kfree(keybuf);						\
    849	}								\
    850	err;								\
    851})
    852
    853#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
    854
    855/* Generate a random length in range [0, max_len], but prefer smaller values */
    856static unsigned int generate_random_length(unsigned int max_len)
    857{
    858	unsigned int len = prandom_u32() % (max_len + 1);
    859
    860	switch (prandom_u32() % 4) {
    861	case 0:
    862		return len % 64;
    863	case 1:
    864		return len % 256;
    865	case 2:
    866		return len % 1024;
    867	default:
    868		return len;
    869	}
    870}
    871
    872/* Flip a random bit in the given nonempty data buffer */
    873static void flip_random_bit(u8 *buf, size_t size)
    874{
    875	size_t bitpos;
    876
    877	bitpos = prandom_u32() % (size * 8);
    878	buf[bitpos / 8] ^= 1 << (bitpos % 8);
    879}
    880
    881/* Flip a random byte in the given nonempty data buffer */
    882static void flip_random_byte(u8 *buf, size_t size)
    883{
    884	buf[prandom_u32() % size] ^= 0xff;
    885}
    886
    887/* Sometimes make some random changes to the given nonempty data buffer */
    888static void mutate_buffer(u8 *buf, size_t size)
    889{
    890	size_t num_flips;
    891	size_t i;
    892
    893	/* Sometimes flip some bits */
    894	if (prandom_u32() % 4 == 0) {
    895		num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8);
    896		for (i = 0; i < num_flips; i++)
    897			flip_random_bit(buf, size);
    898	}
    899
    900	/* Sometimes flip some bytes */
    901	if (prandom_u32() % 4 == 0) {
    902		num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size);
    903		for (i = 0; i < num_flips; i++)
    904			flip_random_byte(buf, size);
    905	}
    906}
    907
    908/* Randomly generate 'count' bytes, but sometimes make them "interesting" */
    909static void generate_random_bytes(u8 *buf, size_t count)
    910{
    911	u8 b;
    912	u8 increment;
    913	size_t i;
    914
    915	if (count == 0)
    916		return;
    917
    918	switch (prandom_u32() % 8) { /* Choose a generation strategy */
    919	case 0:
    920	case 1:
    921		/* All the same byte, plus optional mutations */
    922		switch (prandom_u32() % 4) {
    923		case 0:
    924			b = 0x00;
    925			break;
    926		case 1:
    927			b = 0xff;
    928			break;
    929		default:
    930			b = (u8)prandom_u32();
    931			break;
    932		}
    933		memset(buf, b, count);
    934		mutate_buffer(buf, count);
    935		break;
    936	case 2:
    937		/* Ascending or descending bytes, plus optional mutations */
    938		increment = (u8)prandom_u32();
    939		b = (u8)prandom_u32();
    940		for (i = 0; i < count; i++, b += increment)
    941			buf[i] = b;
    942		mutate_buffer(buf, count);
    943		break;
    944	default:
    945		/* Fully random bytes */
    946		for (i = 0; i < count; i++)
    947			buf[i] = (u8)prandom_u32();
    948	}
    949}
    950
    951static char *generate_random_sgl_divisions(struct test_sg_division *divs,
    952					   size_t max_divs, char *p, char *end,
    953					   bool gen_flushes, u32 req_flags)
    954{
    955	struct test_sg_division *div = divs;
    956	unsigned int remaining = TEST_SG_TOTAL;
    957
    958	do {
    959		unsigned int this_len;
    960		const char *flushtype_str;
    961
    962		if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0)
    963			this_len = remaining;
    964		else
    965			this_len = 1 + (prandom_u32() % remaining);
    966		div->proportion_of_total = this_len;
    967
    968		if (prandom_u32() % 4 == 0)
    969			div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128);
    970		else if (prandom_u32() % 2 == 0)
    971			div->offset = prandom_u32() % 32;
    972		else
    973			div->offset = prandom_u32() % PAGE_SIZE;
    974		if (prandom_u32() % 8 == 0)
    975			div->offset_relative_to_alignmask = true;
    976
    977		div->flush_type = FLUSH_TYPE_NONE;
    978		if (gen_flushes) {
    979			switch (prandom_u32() % 4) {
    980			case 0:
    981				div->flush_type = FLUSH_TYPE_REIMPORT;
    982				break;
    983			case 1:
    984				div->flush_type = FLUSH_TYPE_FLUSH;
    985				break;
    986			}
    987		}
    988
    989		if (div->flush_type != FLUSH_TYPE_NONE &&
    990		    !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
    991		    prandom_u32() % 2 == 0)
    992			div->nosimd = true;
    993
    994		switch (div->flush_type) {
    995		case FLUSH_TYPE_FLUSH:
    996			if (div->nosimd)
    997				flushtype_str = "<flush,nosimd>";
    998			else
    999				flushtype_str = "<flush>";
   1000			break;
   1001		case FLUSH_TYPE_REIMPORT:
   1002			if (div->nosimd)
   1003				flushtype_str = "<reimport,nosimd>";
   1004			else
   1005				flushtype_str = "<reimport>";
   1006			break;
   1007		default:
   1008			flushtype_str = "";
   1009			break;
   1010		}
   1011
   1012		BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */
   1013		p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", flushtype_str,
   1014			       this_len / 100, this_len % 100,
   1015			       div->offset_relative_to_alignmask ?
   1016					"alignmask" : "",
   1017			       div->offset, this_len == remaining ? "" : ", ");
   1018		remaining -= this_len;
   1019		div++;
   1020	} while (remaining);
   1021
   1022	return p;
   1023}
   1024
   1025/* Generate a random testvec_config for fuzz testing */
   1026static void generate_random_testvec_config(struct testvec_config *cfg,
   1027					   char *name, size_t max_namelen)
   1028{
   1029	char *p = name;
   1030	char * const end = name + max_namelen;
   1031
   1032	memset(cfg, 0, sizeof(*cfg));
   1033
   1034	cfg->name = name;
   1035
   1036	p += scnprintf(p, end - p, "random:");
   1037
   1038	switch (prandom_u32() % 4) {
   1039	case 0:
   1040	case 1:
   1041		cfg->inplace_mode = OUT_OF_PLACE;
   1042		break;
   1043	case 2:
   1044		cfg->inplace_mode = INPLACE_ONE_SGLIST;
   1045		p += scnprintf(p, end - p, " inplace_one_sglist");
   1046		break;
   1047	default:
   1048		cfg->inplace_mode = INPLACE_TWO_SGLISTS;
   1049		p += scnprintf(p, end - p, " inplace_two_sglists");
   1050		break;
   1051	}
   1052
   1053	if (prandom_u32() % 2 == 0) {
   1054		cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
   1055		p += scnprintf(p, end - p, " may_sleep");
   1056	}
   1057
   1058	switch (prandom_u32() % 4) {
   1059	case 0:
   1060		cfg->finalization_type = FINALIZATION_TYPE_FINAL;
   1061		p += scnprintf(p, end - p, " use_final");
   1062		break;
   1063	case 1:
   1064		cfg->finalization_type = FINALIZATION_TYPE_FINUP;
   1065		p += scnprintf(p, end - p, " use_finup");
   1066		break;
   1067	default:
   1068		cfg->finalization_type = FINALIZATION_TYPE_DIGEST;
   1069		p += scnprintf(p, end - p, " use_digest");
   1070		break;
   1071	}
   1072
   1073	if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
   1074	    prandom_u32() % 2 == 0) {
   1075		cfg->nosimd = true;
   1076		p += scnprintf(p, end - p, " nosimd");
   1077	}
   1078
   1079	p += scnprintf(p, end - p, " src_divs=[");
   1080	p = generate_random_sgl_divisions(cfg->src_divs,
   1081					  ARRAY_SIZE(cfg->src_divs), p, end,
   1082					  (cfg->finalization_type !=
   1083					   FINALIZATION_TYPE_DIGEST),
   1084					  cfg->req_flags);
   1085	p += scnprintf(p, end - p, "]");
   1086
   1087	if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) {
   1088		p += scnprintf(p, end - p, " dst_divs=[");
   1089		p = generate_random_sgl_divisions(cfg->dst_divs,
   1090						  ARRAY_SIZE(cfg->dst_divs),
   1091						  p, end, false,
   1092						  cfg->req_flags);
   1093		p += scnprintf(p, end - p, "]");
   1094	}
   1095
   1096	if (prandom_u32() % 2 == 0) {
   1097		cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
   1098		p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
   1099	}
   1100
   1101	if (prandom_u32() % 2 == 0) {
   1102		cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK);
   1103		p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
   1104	}
   1105
   1106	WARN_ON_ONCE(!valid_testvec_config(cfg));
   1107}
   1108
   1109static void crypto_disable_simd_for_test(void)
   1110{
   1111	migrate_disable();
   1112	__this_cpu_write(crypto_simd_disabled_for_test, true);
   1113}
   1114
   1115static void crypto_reenable_simd_for_test(void)
   1116{
   1117	__this_cpu_write(crypto_simd_disabled_for_test, false);
   1118	migrate_enable();
   1119}
   1120
   1121/*
   1122 * Given an algorithm name, build the name of the generic implementation of that
   1123 * algorithm, assuming the usual naming convention.  Specifically, this appends
   1124 * "-generic" to every part of the name that is not a template name.  Examples:
   1125 *
   1126 *	aes => aes-generic
   1127 *	cbc(aes) => cbc(aes-generic)
   1128 *	cts(cbc(aes)) => cts(cbc(aes-generic))
   1129 *	rfc7539(chacha20,poly1305) => rfc7539(chacha20-generic,poly1305-generic)
   1130 *
   1131 * Return: 0 on success, or -ENAMETOOLONG if the generic name would be too long
   1132 */
   1133static int build_generic_driver_name(const char *algname,
   1134				     char driver_name[CRYPTO_MAX_ALG_NAME])
   1135{
   1136	const char *in = algname;
   1137	char *out = driver_name;
   1138	size_t len = strlen(algname);
   1139
   1140	if (len >= CRYPTO_MAX_ALG_NAME)
   1141		goto too_long;
   1142	do {
   1143		const char *in_saved = in;
   1144
   1145		while (*in && *in != '(' && *in != ')' && *in != ',')
   1146			*out++ = *in++;
   1147		if (*in != '(' && in > in_saved) {
   1148			len += 8;
   1149			if (len >= CRYPTO_MAX_ALG_NAME)
   1150				goto too_long;
   1151			memcpy(out, "-generic", 8);
   1152			out += 8;
   1153		}
   1154	} while ((*out++ = *in++) != '\0');
   1155	return 0;
   1156
   1157too_long:
   1158	pr_err("alg: generic driver name for \"%s\" would be too long\n",
   1159	       algname);
   1160	return -ENAMETOOLONG;
   1161}
   1162#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   1163static void crypto_disable_simd_for_test(void)
   1164{
   1165}
   1166
   1167static void crypto_reenable_simd_for_test(void)
   1168{
   1169}
   1170#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   1171
   1172static int build_hash_sglist(struct test_sglist *tsgl,
   1173			     const struct hash_testvec *vec,
   1174			     const struct testvec_config *cfg,
   1175			     unsigned int alignmask,
   1176			     const struct test_sg_division *divs[XBUFSIZE])
   1177{
   1178	struct kvec kv;
   1179	struct iov_iter input;
   1180
   1181	kv.iov_base = (void *)vec->plaintext;
   1182	kv.iov_len = vec->psize;
   1183	iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
   1184	return build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize,
   1185				 &input, divs);
   1186}
   1187
   1188static int check_hash_result(const char *type,
   1189			     const u8 *result, unsigned int digestsize,
   1190			     const struct hash_testvec *vec,
   1191			     const char *vec_name,
   1192			     const char *driver,
   1193			     const struct testvec_config *cfg)
   1194{
   1195	if (memcmp(result, vec->digest, digestsize) != 0) {
   1196		pr_err("alg: %s: %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
   1197		       type, driver, vec_name, cfg->name);
   1198		return -EINVAL;
   1199	}
   1200	if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) {
   1201		pr_err("alg: %s: %s overran result buffer on test vector %s, cfg=\"%s\"\n",
   1202		       type, driver, vec_name, cfg->name);
   1203		return -EOVERFLOW;
   1204	}
   1205	return 0;
   1206}
   1207
   1208static inline int check_shash_op(const char *op, int err,
   1209				 const char *driver, const char *vec_name,
   1210				 const struct testvec_config *cfg)
   1211{
   1212	if (err)
   1213		pr_err("alg: shash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
   1214		       driver, op, err, vec_name, cfg->name);
   1215	return err;
   1216}
   1217
   1218/* Test one hash test vector in one configuration, using the shash API */
   1219static int test_shash_vec_cfg(const struct hash_testvec *vec,
   1220			      const char *vec_name,
   1221			      const struct testvec_config *cfg,
   1222			      struct shash_desc *desc,
   1223			      struct test_sglist *tsgl,
   1224			      u8 *hashstate)
   1225{
   1226	struct crypto_shash *tfm = desc->tfm;
   1227	const unsigned int alignmask = crypto_shash_alignmask(tfm);
   1228	const unsigned int digestsize = crypto_shash_digestsize(tfm);
   1229	const unsigned int statesize = crypto_shash_statesize(tfm);
   1230	const char *driver = crypto_shash_driver_name(tfm);
   1231	const struct test_sg_division *divs[XBUFSIZE];
   1232	unsigned int i;
   1233	u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
   1234	int err;
   1235
   1236	/* Set the key, if specified */
   1237	if (vec->ksize) {
   1238		err = do_setkey(crypto_shash_setkey, tfm, vec->key, vec->ksize,
   1239				cfg, alignmask);
   1240		if (err) {
   1241			if (err == vec->setkey_error)
   1242				return 0;
   1243			pr_err("alg: shash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
   1244			       driver, vec_name, vec->setkey_error, err,
   1245			       crypto_shash_get_flags(tfm));
   1246			return err;
   1247		}
   1248		if (vec->setkey_error) {
   1249			pr_err("alg: shash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
   1250			       driver, vec_name, vec->setkey_error);
   1251			return -EINVAL;
   1252		}
   1253	}
   1254
   1255	/* Build the scatterlist for the source data */
   1256	err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
   1257	if (err) {
   1258		pr_err("alg: shash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
   1259		       driver, vec_name, cfg->name);
   1260		return err;
   1261	}
   1262
   1263	/* Do the actual hashing */
   1264
   1265	testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
   1266	testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
   1267
   1268	if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
   1269	    vec->digest_error) {
   1270		/* Just using digest() */
   1271		if (tsgl->nents != 1)
   1272			return 0;
   1273		if (cfg->nosimd)
   1274			crypto_disable_simd_for_test();
   1275		err = crypto_shash_digest(desc, sg_virt(&tsgl->sgl[0]),
   1276					  tsgl->sgl[0].length, result);
   1277		if (cfg->nosimd)
   1278			crypto_reenable_simd_for_test();
   1279		if (err) {
   1280			if (err == vec->digest_error)
   1281				return 0;
   1282			pr_err("alg: shash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
   1283			       driver, vec_name, vec->digest_error, err,
   1284			       cfg->name);
   1285			return err;
   1286		}
   1287		if (vec->digest_error) {
   1288			pr_err("alg: shash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
   1289			       driver, vec_name, vec->digest_error, cfg->name);
   1290			return -EINVAL;
   1291		}
   1292		goto result_ready;
   1293	}
   1294
   1295	/* Using init(), zero or more update(), then final() or finup() */
   1296
   1297	if (cfg->nosimd)
   1298		crypto_disable_simd_for_test();
   1299	err = crypto_shash_init(desc);
   1300	if (cfg->nosimd)
   1301		crypto_reenable_simd_for_test();
   1302	err = check_shash_op("init", err, driver, vec_name, cfg);
   1303	if (err)
   1304		return err;
   1305
   1306	for (i = 0; i < tsgl->nents; i++) {
   1307		if (i + 1 == tsgl->nents &&
   1308		    cfg->finalization_type == FINALIZATION_TYPE_FINUP) {
   1309			if (divs[i]->nosimd)
   1310				crypto_disable_simd_for_test();
   1311			err = crypto_shash_finup(desc, sg_virt(&tsgl->sgl[i]),
   1312						 tsgl->sgl[i].length, result);
   1313			if (divs[i]->nosimd)
   1314				crypto_reenable_simd_for_test();
   1315			err = check_shash_op("finup", err, driver, vec_name,
   1316					     cfg);
   1317			if (err)
   1318				return err;
   1319			goto result_ready;
   1320		}
   1321		if (divs[i]->nosimd)
   1322			crypto_disable_simd_for_test();
   1323		err = crypto_shash_update(desc, sg_virt(&tsgl->sgl[i]),
   1324					  tsgl->sgl[i].length);
   1325		if (divs[i]->nosimd)
   1326			crypto_reenable_simd_for_test();
   1327		err = check_shash_op("update", err, driver, vec_name, cfg);
   1328		if (err)
   1329			return err;
   1330		if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
   1331			/* Test ->export() and ->import() */
   1332			testmgr_poison(hashstate + statesize,
   1333				       TESTMGR_POISON_LEN);
   1334			err = crypto_shash_export(desc, hashstate);
   1335			err = check_shash_op("export", err, driver, vec_name,
   1336					     cfg);
   1337			if (err)
   1338				return err;
   1339			if (!testmgr_is_poison(hashstate + statesize,
   1340					       TESTMGR_POISON_LEN)) {
   1341				pr_err("alg: shash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
   1342				       driver, vec_name, cfg->name);
   1343				return -EOVERFLOW;
   1344			}
   1345			testmgr_poison(desc->__ctx, crypto_shash_descsize(tfm));
   1346			err = crypto_shash_import(desc, hashstate);
   1347			err = check_shash_op("import", err, driver, vec_name,
   1348					     cfg);
   1349			if (err)
   1350				return err;
   1351		}
   1352	}
   1353
   1354	if (cfg->nosimd)
   1355		crypto_disable_simd_for_test();
   1356	err = crypto_shash_final(desc, result);
   1357	if (cfg->nosimd)
   1358		crypto_reenable_simd_for_test();
   1359	err = check_shash_op("final", err, driver, vec_name, cfg);
   1360	if (err)
   1361		return err;
   1362result_ready:
   1363	return check_hash_result("shash", result, digestsize, vec, vec_name,
   1364				 driver, cfg);
   1365}
   1366
   1367static int do_ahash_op(int (*op)(struct ahash_request *req),
   1368		       struct ahash_request *req,
   1369		       struct crypto_wait *wait, bool nosimd)
   1370{
   1371	int err;
   1372
   1373	if (nosimd)
   1374		crypto_disable_simd_for_test();
   1375
   1376	err = op(req);
   1377
   1378	if (nosimd)
   1379		crypto_reenable_simd_for_test();
   1380
   1381	return crypto_wait_req(err, wait);
   1382}
   1383
   1384static int check_nonfinal_ahash_op(const char *op, int err,
   1385				   u8 *result, unsigned int digestsize,
   1386				   const char *driver, const char *vec_name,
   1387				   const struct testvec_config *cfg)
   1388{
   1389	if (err) {
   1390		pr_err("alg: ahash: %s %s() failed with err %d on test vector %s, cfg=\"%s\"\n",
   1391		       driver, op, err, vec_name, cfg->name);
   1392		return err;
   1393	}
   1394	if (!testmgr_is_poison(result, digestsize)) {
   1395		pr_err("alg: ahash: %s %s() used result buffer on test vector %s, cfg=\"%s\"\n",
   1396		       driver, op, vec_name, cfg->name);
   1397		return -EINVAL;
   1398	}
   1399	return 0;
   1400}
   1401
   1402/* Test one hash test vector in one configuration, using the ahash API */
   1403static int test_ahash_vec_cfg(const struct hash_testvec *vec,
   1404			      const char *vec_name,
   1405			      const struct testvec_config *cfg,
   1406			      struct ahash_request *req,
   1407			      struct test_sglist *tsgl,
   1408			      u8 *hashstate)
   1409{
   1410	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1411	const unsigned int alignmask = crypto_ahash_alignmask(tfm);
   1412	const unsigned int digestsize = crypto_ahash_digestsize(tfm);
   1413	const unsigned int statesize = crypto_ahash_statesize(tfm);
   1414	const char *driver = crypto_ahash_driver_name(tfm);
   1415	const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
   1416	const struct test_sg_division *divs[XBUFSIZE];
   1417	DECLARE_CRYPTO_WAIT(wait);
   1418	unsigned int i;
   1419	struct scatterlist *pending_sgl;
   1420	unsigned int pending_len;
   1421	u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN];
   1422	int err;
   1423
   1424	/* Set the key, if specified */
   1425	if (vec->ksize) {
   1426		err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize,
   1427				cfg, alignmask);
   1428		if (err) {
   1429			if (err == vec->setkey_error)
   1430				return 0;
   1431			pr_err("alg: ahash: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
   1432			       driver, vec_name, vec->setkey_error, err,
   1433			       crypto_ahash_get_flags(tfm));
   1434			return err;
   1435		}
   1436		if (vec->setkey_error) {
   1437			pr_err("alg: ahash: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
   1438			       driver, vec_name, vec->setkey_error);
   1439			return -EINVAL;
   1440		}
   1441	}
   1442
   1443	/* Build the scatterlist for the source data */
   1444	err = build_hash_sglist(tsgl, vec, cfg, alignmask, divs);
   1445	if (err) {
   1446		pr_err("alg: ahash: %s: error preparing scatterlist for test vector %s, cfg=\"%s\"\n",
   1447		       driver, vec_name, cfg->name);
   1448		return err;
   1449	}
   1450
   1451	/* Do the actual hashing */
   1452
   1453	testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
   1454	testmgr_poison(result, digestsize + TESTMGR_POISON_LEN);
   1455
   1456	if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST ||
   1457	    vec->digest_error) {
   1458		/* Just using digest() */
   1459		ahash_request_set_callback(req, req_flags, crypto_req_done,
   1460					   &wait);
   1461		ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize);
   1462		err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd);
   1463		if (err) {
   1464			if (err == vec->digest_error)
   1465				return 0;
   1466			pr_err("alg: ahash: %s digest() failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
   1467			       driver, vec_name, vec->digest_error, err,
   1468			       cfg->name);
   1469			return err;
   1470		}
   1471		if (vec->digest_error) {
   1472			pr_err("alg: ahash: %s digest() unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
   1473			       driver, vec_name, vec->digest_error, cfg->name);
   1474			return -EINVAL;
   1475		}
   1476		goto result_ready;
   1477	}
   1478
   1479	/* Using init(), zero or more update(), then final() or finup() */
   1480
   1481	ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
   1482	ahash_request_set_crypt(req, NULL, result, 0);
   1483	err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd);
   1484	err = check_nonfinal_ahash_op("init", err, result, digestsize,
   1485				      driver, vec_name, cfg);
   1486	if (err)
   1487		return err;
   1488
   1489	pending_sgl = NULL;
   1490	pending_len = 0;
   1491	for (i = 0; i < tsgl->nents; i++) {
   1492		if (divs[i]->flush_type != FLUSH_TYPE_NONE &&
   1493		    pending_sgl != NULL) {
   1494			/* update() with the pending data */
   1495			ahash_request_set_callback(req, req_flags,
   1496						   crypto_req_done, &wait);
   1497			ahash_request_set_crypt(req, pending_sgl, result,
   1498						pending_len);
   1499			err = do_ahash_op(crypto_ahash_update, req, &wait,
   1500					  divs[i]->nosimd);
   1501			err = check_nonfinal_ahash_op("update", err,
   1502						      result, digestsize,
   1503						      driver, vec_name, cfg);
   1504			if (err)
   1505				return err;
   1506			pending_sgl = NULL;
   1507			pending_len = 0;
   1508		}
   1509		if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) {
   1510			/* Test ->export() and ->import() */
   1511			testmgr_poison(hashstate + statesize,
   1512				       TESTMGR_POISON_LEN);
   1513			err = crypto_ahash_export(req, hashstate);
   1514			err = check_nonfinal_ahash_op("export", err,
   1515						      result, digestsize,
   1516						      driver, vec_name, cfg);
   1517			if (err)
   1518				return err;
   1519			if (!testmgr_is_poison(hashstate + statesize,
   1520					       TESTMGR_POISON_LEN)) {
   1521				pr_err("alg: ahash: %s export() overran state buffer on test vector %s, cfg=\"%s\"\n",
   1522				       driver, vec_name, cfg->name);
   1523				return -EOVERFLOW;
   1524			}
   1525
   1526			testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm));
   1527			err = crypto_ahash_import(req, hashstate);
   1528			err = check_nonfinal_ahash_op("import", err,
   1529						      result, digestsize,
   1530						      driver, vec_name, cfg);
   1531			if (err)
   1532				return err;
   1533		}
   1534		if (pending_sgl == NULL)
   1535			pending_sgl = &tsgl->sgl[i];
   1536		pending_len += tsgl->sgl[i].length;
   1537	}
   1538
   1539	ahash_request_set_callback(req, req_flags, crypto_req_done, &wait);
   1540	ahash_request_set_crypt(req, pending_sgl, result, pending_len);
   1541	if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) {
   1542		/* finish with update() and final() */
   1543		err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd);
   1544		err = check_nonfinal_ahash_op("update", err, result, digestsize,
   1545					      driver, vec_name, cfg);
   1546		if (err)
   1547			return err;
   1548		err = do_ahash_op(crypto_ahash_final, req, &wait, cfg->nosimd);
   1549		if (err) {
   1550			pr_err("alg: ahash: %s final() failed with err %d on test vector %s, cfg=\"%s\"\n",
   1551			       driver, err, vec_name, cfg->name);
   1552			return err;
   1553		}
   1554	} else {
   1555		/* finish with finup() */
   1556		err = do_ahash_op(crypto_ahash_finup, req, &wait, cfg->nosimd);
   1557		if (err) {
   1558			pr_err("alg: ahash: %s finup() failed with err %d on test vector %s, cfg=\"%s\"\n",
   1559			       driver, err, vec_name, cfg->name);
   1560			return err;
   1561		}
   1562	}
   1563
   1564result_ready:
   1565	return check_hash_result("ahash", result, digestsize, vec, vec_name,
   1566				 driver, cfg);
   1567}
   1568
   1569static int test_hash_vec_cfg(const struct hash_testvec *vec,
   1570			     const char *vec_name,
   1571			     const struct testvec_config *cfg,
   1572			     struct ahash_request *req,
   1573			     struct shash_desc *desc,
   1574			     struct test_sglist *tsgl,
   1575			     u8 *hashstate)
   1576{
   1577	int err;
   1578
   1579	/*
   1580	 * For algorithms implemented as "shash", most bugs will be detected by
   1581	 * both the shash and ahash tests.  Test the shash API first so that the
   1582	 * failures involve less indirection, so are easier to debug.
   1583	 */
   1584
   1585	if (desc) {
   1586		err = test_shash_vec_cfg(vec, vec_name, cfg, desc, tsgl,
   1587					 hashstate);
   1588		if (err)
   1589			return err;
   1590	}
   1591
   1592	return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate);
   1593}
   1594
   1595static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num,
   1596			 struct ahash_request *req, struct shash_desc *desc,
   1597			 struct test_sglist *tsgl, u8 *hashstate)
   1598{
   1599	char vec_name[16];
   1600	unsigned int i;
   1601	int err;
   1602
   1603	sprintf(vec_name, "%u", vec_num);
   1604
   1605	for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) {
   1606		err = test_hash_vec_cfg(vec, vec_name,
   1607					&default_hash_testvec_configs[i],
   1608					req, desc, tsgl, hashstate);
   1609		if (err)
   1610			return err;
   1611	}
   1612
   1613#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   1614	if (!noextratests) {
   1615		struct testvec_config cfg;
   1616		char cfgname[TESTVEC_CONFIG_NAMELEN];
   1617
   1618		for (i = 0; i < fuzz_iterations; i++) {
   1619			generate_random_testvec_config(&cfg, cfgname,
   1620						       sizeof(cfgname));
   1621			err = test_hash_vec_cfg(vec, vec_name, &cfg,
   1622						req, desc, tsgl, hashstate);
   1623			if (err)
   1624				return err;
   1625			cond_resched();
   1626		}
   1627	}
   1628#endif
   1629	return 0;
   1630}
   1631
   1632#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   1633/*
   1634 * Generate a hash test vector from the given implementation.
   1635 * Assumes the buffers in 'vec' were already allocated.
   1636 */
   1637static void generate_random_hash_testvec(struct shash_desc *desc,
   1638					 struct hash_testvec *vec,
   1639					 unsigned int maxkeysize,
   1640					 unsigned int maxdatasize,
   1641					 char *name, size_t max_namelen)
   1642{
   1643	/* Data */
   1644	vec->psize = generate_random_length(maxdatasize);
   1645	generate_random_bytes((u8 *)vec->plaintext, vec->psize);
   1646
   1647	/*
   1648	 * Key: length in range [1, maxkeysize], but usually choose maxkeysize.
   1649	 * If algorithm is unkeyed, then maxkeysize == 0 and set ksize = 0.
   1650	 */
   1651	vec->setkey_error = 0;
   1652	vec->ksize = 0;
   1653	if (maxkeysize) {
   1654		vec->ksize = maxkeysize;
   1655		if (prandom_u32() % 4 == 0)
   1656			vec->ksize = 1 + (prandom_u32() % maxkeysize);
   1657		generate_random_bytes((u8 *)vec->key, vec->ksize);
   1658
   1659		vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
   1660							vec->ksize);
   1661		/* If the key couldn't be set, no need to continue to digest. */
   1662		if (vec->setkey_error)
   1663			goto done;
   1664	}
   1665
   1666	/* Digest */
   1667	vec->digest_error = crypto_shash_digest(desc, vec->plaintext,
   1668						vec->psize, (u8 *)vec->digest);
   1669done:
   1670	snprintf(name, max_namelen, "\"random: psize=%u ksize=%u\"",
   1671		 vec->psize, vec->ksize);
   1672}
   1673
   1674/*
   1675 * Test the hash algorithm represented by @req against the corresponding generic
   1676 * implementation, if one is available.
   1677 */
   1678static int test_hash_vs_generic_impl(const char *generic_driver,
   1679				     unsigned int maxkeysize,
   1680				     struct ahash_request *req,
   1681				     struct shash_desc *desc,
   1682				     struct test_sglist *tsgl,
   1683				     u8 *hashstate)
   1684{
   1685	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1686	const unsigned int digestsize = crypto_ahash_digestsize(tfm);
   1687	const unsigned int blocksize = crypto_ahash_blocksize(tfm);
   1688	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
   1689	const char *algname = crypto_hash_alg_common(tfm)->base.cra_name;
   1690	const char *driver = crypto_ahash_driver_name(tfm);
   1691	char _generic_driver[CRYPTO_MAX_ALG_NAME];
   1692	struct crypto_shash *generic_tfm = NULL;
   1693	struct shash_desc *generic_desc = NULL;
   1694	unsigned int i;
   1695	struct hash_testvec vec = { 0 };
   1696	char vec_name[64];
   1697	struct testvec_config *cfg;
   1698	char cfgname[TESTVEC_CONFIG_NAMELEN];
   1699	int err;
   1700
   1701	if (noextratests)
   1702		return 0;
   1703
   1704	if (!generic_driver) { /* Use default naming convention? */
   1705		err = build_generic_driver_name(algname, _generic_driver);
   1706		if (err)
   1707			return err;
   1708		generic_driver = _generic_driver;
   1709	}
   1710
   1711	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
   1712		return 0;
   1713
   1714	generic_tfm = crypto_alloc_shash(generic_driver, 0, 0);
   1715	if (IS_ERR(generic_tfm)) {
   1716		err = PTR_ERR(generic_tfm);
   1717		if (err == -ENOENT) {
   1718			pr_warn("alg: hash: skipping comparison tests for %s because %s is unavailable\n",
   1719				driver, generic_driver);
   1720			return 0;
   1721		}
   1722		pr_err("alg: hash: error allocating %s (generic impl of %s): %d\n",
   1723		       generic_driver, algname, err);
   1724		return err;
   1725	}
   1726
   1727	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
   1728	if (!cfg) {
   1729		err = -ENOMEM;
   1730		goto out;
   1731	}
   1732
   1733	generic_desc = kzalloc(sizeof(*desc) +
   1734			       crypto_shash_descsize(generic_tfm), GFP_KERNEL);
   1735	if (!generic_desc) {
   1736		err = -ENOMEM;
   1737		goto out;
   1738	}
   1739	generic_desc->tfm = generic_tfm;
   1740
   1741	/* Check the algorithm properties for consistency. */
   1742
   1743	if (digestsize != crypto_shash_digestsize(generic_tfm)) {
   1744		pr_err("alg: hash: digestsize for %s (%u) doesn't match generic impl (%u)\n",
   1745		       driver, digestsize,
   1746		       crypto_shash_digestsize(generic_tfm));
   1747		err = -EINVAL;
   1748		goto out;
   1749	}
   1750
   1751	if (blocksize != crypto_shash_blocksize(generic_tfm)) {
   1752		pr_err("alg: hash: blocksize for %s (%u) doesn't match generic impl (%u)\n",
   1753		       driver, blocksize, crypto_shash_blocksize(generic_tfm));
   1754		err = -EINVAL;
   1755		goto out;
   1756	}
   1757
   1758	/*
   1759	 * Now generate test vectors using the generic implementation, and test
   1760	 * the other implementation against them.
   1761	 */
   1762
   1763	vec.key = kmalloc(maxkeysize, GFP_KERNEL);
   1764	vec.plaintext = kmalloc(maxdatasize, GFP_KERNEL);
   1765	vec.digest = kmalloc(digestsize, GFP_KERNEL);
   1766	if (!vec.key || !vec.plaintext || !vec.digest) {
   1767		err = -ENOMEM;
   1768		goto out;
   1769	}
   1770
   1771	for (i = 0; i < fuzz_iterations * 8; i++) {
   1772		generate_random_hash_testvec(generic_desc, &vec,
   1773					     maxkeysize, maxdatasize,
   1774					     vec_name, sizeof(vec_name));
   1775		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
   1776
   1777		err = test_hash_vec_cfg(&vec, vec_name, cfg,
   1778					req, desc, tsgl, hashstate);
   1779		if (err)
   1780			goto out;
   1781		cond_resched();
   1782	}
   1783	err = 0;
   1784out:
   1785	kfree(cfg);
   1786	kfree(vec.key);
   1787	kfree(vec.plaintext);
   1788	kfree(vec.digest);
   1789	crypto_free_shash(generic_tfm);
   1790	kfree_sensitive(generic_desc);
   1791	return err;
   1792}
   1793#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   1794static int test_hash_vs_generic_impl(const char *generic_driver,
   1795				     unsigned int maxkeysize,
   1796				     struct ahash_request *req,
   1797				     struct shash_desc *desc,
   1798				     struct test_sglist *tsgl,
   1799				     u8 *hashstate)
   1800{
   1801	return 0;
   1802}
   1803#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   1804
   1805static int alloc_shash(const char *driver, u32 type, u32 mask,
   1806		       struct crypto_shash **tfm_ret,
   1807		       struct shash_desc **desc_ret)
   1808{
   1809	struct crypto_shash *tfm;
   1810	struct shash_desc *desc;
   1811
   1812	tfm = crypto_alloc_shash(driver, type, mask);
   1813	if (IS_ERR(tfm)) {
   1814		if (PTR_ERR(tfm) == -ENOENT) {
   1815			/*
   1816			 * This algorithm is only available through the ahash
   1817			 * API, not the shash API, so skip the shash tests.
   1818			 */
   1819			return 0;
   1820		}
   1821		pr_err("alg: hash: failed to allocate shash transform for %s: %ld\n",
   1822		       driver, PTR_ERR(tfm));
   1823		return PTR_ERR(tfm);
   1824	}
   1825
   1826	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
   1827	if (!desc) {
   1828		crypto_free_shash(tfm);
   1829		return -ENOMEM;
   1830	}
   1831	desc->tfm = tfm;
   1832
   1833	*tfm_ret = tfm;
   1834	*desc_ret = desc;
   1835	return 0;
   1836}
   1837
   1838static int __alg_test_hash(const struct hash_testvec *vecs,
   1839			   unsigned int num_vecs, const char *driver,
   1840			   u32 type, u32 mask,
   1841			   const char *generic_driver, unsigned int maxkeysize)
   1842{
   1843	struct crypto_ahash *atfm = NULL;
   1844	struct ahash_request *req = NULL;
   1845	struct crypto_shash *stfm = NULL;
   1846	struct shash_desc *desc = NULL;
   1847	struct test_sglist *tsgl = NULL;
   1848	u8 *hashstate = NULL;
   1849	unsigned int statesize;
   1850	unsigned int i;
   1851	int err;
   1852
   1853	/*
   1854	 * Always test the ahash API.  This works regardless of whether the
   1855	 * algorithm is implemented as ahash or shash.
   1856	 */
   1857
   1858	atfm = crypto_alloc_ahash(driver, type, mask);
   1859	if (IS_ERR(atfm)) {
   1860		pr_err("alg: hash: failed to allocate transform for %s: %ld\n",
   1861		       driver, PTR_ERR(atfm));
   1862		return PTR_ERR(atfm);
   1863	}
   1864	driver = crypto_ahash_driver_name(atfm);
   1865
   1866	req = ahash_request_alloc(atfm, GFP_KERNEL);
   1867	if (!req) {
   1868		pr_err("alg: hash: failed to allocate request for %s\n",
   1869		       driver);
   1870		err = -ENOMEM;
   1871		goto out;
   1872	}
   1873
   1874	/*
   1875	 * If available also test the shash API, to cover corner cases that may
   1876	 * be missed by testing the ahash API only.
   1877	 */
   1878	err = alloc_shash(driver, type, mask, &stfm, &desc);
   1879	if (err)
   1880		goto out;
   1881
   1882	tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL);
   1883	if (!tsgl || init_test_sglist(tsgl) != 0) {
   1884		pr_err("alg: hash: failed to allocate test buffers for %s\n",
   1885		       driver);
   1886		kfree(tsgl);
   1887		tsgl = NULL;
   1888		err = -ENOMEM;
   1889		goto out;
   1890	}
   1891
   1892	statesize = crypto_ahash_statesize(atfm);
   1893	if (stfm)
   1894		statesize = max(statesize, crypto_shash_statesize(stfm));
   1895	hashstate = kmalloc(statesize + TESTMGR_POISON_LEN, GFP_KERNEL);
   1896	if (!hashstate) {
   1897		pr_err("alg: hash: failed to allocate hash state buffer for %s\n",
   1898		       driver);
   1899		err = -ENOMEM;
   1900		goto out;
   1901	}
   1902
   1903	for (i = 0; i < num_vecs; i++) {
   1904		if (fips_enabled && vecs[i].fips_skip)
   1905			continue;
   1906
   1907		err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate);
   1908		if (err)
   1909			goto out;
   1910		cond_resched();
   1911	}
   1912	err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req,
   1913					desc, tsgl, hashstate);
   1914out:
   1915	kfree(hashstate);
   1916	if (tsgl) {
   1917		destroy_test_sglist(tsgl);
   1918		kfree(tsgl);
   1919	}
   1920	kfree(desc);
   1921	crypto_free_shash(stfm);
   1922	ahash_request_free(req);
   1923	crypto_free_ahash(atfm);
   1924	return err;
   1925}
   1926
   1927static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
   1928			 u32 type, u32 mask)
   1929{
   1930	const struct hash_testvec *template = desc->suite.hash.vecs;
   1931	unsigned int tcount = desc->suite.hash.count;
   1932	unsigned int nr_unkeyed, nr_keyed;
   1933	unsigned int maxkeysize = 0;
   1934	int err;
   1935
   1936	/*
   1937	 * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests
   1938	 * first, before setting a key on the tfm.  To make this easier, we
   1939	 * require that the unkeyed test vectors (if any) are listed first.
   1940	 */
   1941
   1942	for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) {
   1943		if (template[nr_unkeyed].ksize)
   1944			break;
   1945	}
   1946	for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) {
   1947		if (!template[nr_unkeyed + nr_keyed].ksize) {
   1948			pr_err("alg: hash: test vectors for %s out of order, "
   1949			       "unkeyed ones must come first\n", desc->alg);
   1950			return -EINVAL;
   1951		}
   1952		maxkeysize = max_t(unsigned int, maxkeysize,
   1953				   template[nr_unkeyed + nr_keyed].ksize);
   1954	}
   1955
   1956	err = 0;
   1957	if (nr_unkeyed) {
   1958		err = __alg_test_hash(template, nr_unkeyed, driver, type, mask,
   1959				      desc->generic_driver, maxkeysize);
   1960		template += nr_unkeyed;
   1961	}
   1962
   1963	if (!err && nr_keyed)
   1964		err = __alg_test_hash(template, nr_keyed, driver, type, mask,
   1965				      desc->generic_driver, maxkeysize);
   1966
   1967	return err;
   1968}
   1969
   1970static int test_aead_vec_cfg(int enc, const struct aead_testvec *vec,
   1971			     const char *vec_name,
   1972			     const struct testvec_config *cfg,
   1973			     struct aead_request *req,
   1974			     struct cipher_test_sglists *tsgls)
   1975{
   1976	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1977	const unsigned int alignmask = crypto_aead_alignmask(tfm);
   1978	const unsigned int ivsize = crypto_aead_ivsize(tfm);
   1979	const unsigned int authsize = vec->clen - vec->plen;
   1980	const char *driver = crypto_aead_driver_name(tfm);
   1981	const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
   1982	const char *op = enc ? "encryption" : "decryption";
   1983	DECLARE_CRYPTO_WAIT(wait);
   1984	u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
   1985	u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
   1986		 cfg->iv_offset +
   1987		 (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
   1988	struct kvec input[2];
   1989	int err;
   1990
   1991	/* Set the key */
   1992	if (vec->wk)
   1993		crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
   1994	else
   1995		crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
   1996
   1997	err = do_setkey(crypto_aead_setkey, tfm, vec->key, vec->klen,
   1998			cfg, alignmask);
   1999	if (err && err != vec->setkey_error) {
   2000		pr_err("alg: aead: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
   2001		       driver, vec_name, vec->setkey_error, err,
   2002		       crypto_aead_get_flags(tfm));
   2003		return err;
   2004	}
   2005	if (!err && vec->setkey_error) {
   2006		pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
   2007		       driver, vec_name, vec->setkey_error);
   2008		return -EINVAL;
   2009	}
   2010
   2011	/* Set the authentication tag size */
   2012	err = crypto_aead_setauthsize(tfm, authsize);
   2013	if (err && err != vec->setauthsize_error) {
   2014		pr_err("alg: aead: %s setauthsize failed on test vector %s; expected_error=%d, actual_error=%d\n",
   2015		       driver, vec_name, vec->setauthsize_error, err);
   2016		return err;
   2017	}
   2018	if (!err && vec->setauthsize_error) {
   2019		pr_err("alg: aead: %s setauthsize unexpectedly succeeded on test vector %s; expected_error=%d\n",
   2020		       driver, vec_name, vec->setauthsize_error);
   2021		return -EINVAL;
   2022	}
   2023
   2024	if (vec->setkey_error || vec->setauthsize_error)
   2025		return 0;
   2026
   2027	/* The IV must be copied to a buffer, as the algorithm may modify it */
   2028	if (WARN_ON(ivsize > MAX_IVLEN))
   2029		return -EINVAL;
   2030	if (vec->iv)
   2031		memcpy(iv, vec->iv, ivsize);
   2032	else
   2033		memset(iv, 0, ivsize);
   2034
   2035	/* Build the src/dst scatterlists */
   2036	input[0].iov_base = (void *)vec->assoc;
   2037	input[0].iov_len = vec->alen;
   2038	input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
   2039	input[1].iov_len = enc ? vec->plen : vec->clen;
   2040	err = build_cipher_test_sglists(tsgls, cfg, alignmask,
   2041					vec->alen + (enc ? vec->plen :
   2042						     vec->clen),
   2043					vec->alen + (enc ? vec->clen :
   2044						     vec->plen),
   2045					input, 2);
   2046	if (err) {
   2047		pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
   2048		       driver, op, vec_name, cfg->name);
   2049		return err;
   2050	}
   2051
   2052	/* Do the actual encryption or decryption */
   2053	testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm));
   2054	aead_request_set_callback(req, req_flags, crypto_req_done, &wait);
   2055	aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
   2056			       enc ? vec->plen : vec->clen, iv);
   2057	aead_request_set_ad(req, vec->alen);
   2058	if (cfg->nosimd)
   2059		crypto_disable_simd_for_test();
   2060	err = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
   2061	if (cfg->nosimd)
   2062		crypto_reenable_simd_for_test();
   2063	err = crypto_wait_req(err, &wait);
   2064
   2065	/* Check that the algorithm didn't overwrite things it shouldn't have */
   2066	if (req->cryptlen != (enc ? vec->plen : vec->clen) ||
   2067	    req->assoclen != vec->alen ||
   2068	    req->iv != iv ||
   2069	    req->src != tsgls->src.sgl_ptr ||
   2070	    req->dst != tsgls->dst.sgl_ptr ||
   2071	    crypto_aead_reqtfm(req) != tfm ||
   2072	    req->base.complete != crypto_req_done ||
   2073	    req->base.flags != req_flags ||
   2074	    req->base.data != &wait) {
   2075		pr_err("alg: aead: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
   2076		       driver, op, vec_name, cfg->name);
   2077		if (req->cryptlen != (enc ? vec->plen : vec->clen))
   2078			pr_err("alg: aead: changed 'req->cryptlen'\n");
   2079		if (req->assoclen != vec->alen)
   2080			pr_err("alg: aead: changed 'req->assoclen'\n");
   2081		if (req->iv != iv)
   2082			pr_err("alg: aead: changed 'req->iv'\n");
   2083		if (req->src != tsgls->src.sgl_ptr)
   2084			pr_err("alg: aead: changed 'req->src'\n");
   2085		if (req->dst != tsgls->dst.sgl_ptr)
   2086			pr_err("alg: aead: changed 'req->dst'\n");
   2087		if (crypto_aead_reqtfm(req) != tfm)
   2088			pr_err("alg: aead: changed 'req->base.tfm'\n");
   2089		if (req->base.complete != crypto_req_done)
   2090			pr_err("alg: aead: changed 'req->base.complete'\n");
   2091		if (req->base.flags != req_flags)
   2092			pr_err("alg: aead: changed 'req->base.flags'\n");
   2093		if (req->base.data != &wait)
   2094			pr_err("alg: aead: changed 'req->base.data'\n");
   2095		return -EINVAL;
   2096	}
   2097	if (is_test_sglist_corrupted(&tsgls->src)) {
   2098		pr_err("alg: aead: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
   2099		       driver, op, vec_name, cfg->name);
   2100		return -EINVAL;
   2101	}
   2102	if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
   2103	    is_test_sglist_corrupted(&tsgls->dst)) {
   2104		pr_err("alg: aead: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
   2105		       driver, op, vec_name, cfg->name);
   2106		return -EINVAL;
   2107	}
   2108
   2109	/* Check for unexpected success or failure, or wrong error code */
   2110	if ((err == 0 && vec->novrfy) ||
   2111	    (err != vec->crypt_error && !(err == -EBADMSG && vec->novrfy))) {
   2112		char expected_error[32];
   2113
   2114		if (vec->novrfy &&
   2115		    vec->crypt_error != 0 && vec->crypt_error != -EBADMSG)
   2116			sprintf(expected_error, "-EBADMSG or %d",
   2117				vec->crypt_error);
   2118		else if (vec->novrfy)
   2119			sprintf(expected_error, "-EBADMSG");
   2120		else
   2121			sprintf(expected_error, "%d", vec->crypt_error);
   2122		if (err) {
   2123			pr_err("alg: aead: %s %s failed on test vector %s; expected_error=%s, actual_error=%d, cfg=\"%s\"\n",
   2124			       driver, op, vec_name, expected_error, err,
   2125			       cfg->name);
   2126			return err;
   2127		}
   2128		pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %s; expected_error=%s, cfg=\"%s\"\n",
   2129		       driver, op, vec_name, expected_error, cfg->name);
   2130		return -EINVAL;
   2131	}
   2132	if (err) /* Expectedly failed. */
   2133		return 0;
   2134
   2135	/* Check for the correct output (ciphertext or plaintext) */
   2136	err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
   2137				    enc ? vec->clen : vec->plen,
   2138				    vec->alen,
   2139				    enc || cfg->inplace_mode == OUT_OF_PLACE);
   2140	if (err == -EOVERFLOW) {
   2141		pr_err("alg: aead: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
   2142		       driver, op, vec_name, cfg->name);
   2143		return err;
   2144	}
   2145	if (err) {
   2146		pr_err("alg: aead: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
   2147		       driver, op, vec_name, cfg->name);
   2148		return err;
   2149	}
   2150
   2151	return 0;
   2152}
   2153
   2154static int test_aead_vec(int enc, const struct aead_testvec *vec,
   2155			 unsigned int vec_num, struct aead_request *req,
   2156			 struct cipher_test_sglists *tsgls)
   2157{
   2158	char vec_name[16];
   2159	unsigned int i;
   2160	int err;
   2161
   2162	if (enc && vec->novrfy)
   2163		return 0;
   2164
   2165	sprintf(vec_name, "%u", vec_num);
   2166
   2167	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
   2168		err = test_aead_vec_cfg(enc, vec, vec_name,
   2169					&default_cipher_testvec_configs[i],
   2170					req, tsgls);
   2171		if (err)
   2172			return err;
   2173	}
   2174
   2175#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   2176	if (!noextratests) {
   2177		struct testvec_config cfg;
   2178		char cfgname[TESTVEC_CONFIG_NAMELEN];
   2179
   2180		for (i = 0; i < fuzz_iterations; i++) {
   2181			generate_random_testvec_config(&cfg, cfgname,
   2182						       sizeof(cfgname));
   2183			err = test_aead_vec_cfg(enc, vec, vec_name,
   2184						&cfg, req, tsgls);
   2185			if (err)
   2186				return err;
   2187			cond_resched();
   2188		}
   2189	}
   2190#endif
   2191	return 0;
   2192}
   2193
   2194#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   2195
   2196struct aead_extra_tests_ctx {
   2197	struct aead_request *req;
   2198	struct crypto_aead *tfm;
   2199	const struct alg_test_desc *test_desc;
   2200	struct cipher_test_sglists *tsgls;
   2201	unsigned int maxdatasize;
   2202	unsigned int maxkeysize;
   2203
   2204	struct aead_testvec vec;
   2205	char vec_name[64];
   2206	char cfgname[TESTVEC_CONFIG_NAMELEN];
   2207	struct testvec_config cfg;
   2208};
   2209
   2210/*
   2211 * Make at least one random change to a (ciphertext, AAD) pair.  "Ciphertext"
   2212 * here means the full ciphertext including the authentication tag.  The
   2213 * authentication tag (and hence also the ciphertext) is assumed to be nonempty.
   2214 */
   2215static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
   2216				unsigned int ivsize)
   2217{
   2218	const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
   2219	const unsigned int authsize = vec->clen - vec->plen;
   2220
   2221	if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) {
   2222		 /* Mutate the AAD */
   2223		flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
   2224		if (prandom_u32() % 2 == 0)
   2225			return;
   2226	}
   2227	if (prandom_u32() % 2 == 0) {
   2228		/* Mutate auth tag (assuming it's at the end of ciphertext) */
   2229		flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
   2230	} else {
   2231		/* Mutate any part of the ciphertext */
   2232		flip_random_bit((u8 *)vec->ctext, vec->clen);
   2233	}
   2234}
   2235
   2236/*
   2237 * Minimum authentication tag size in bytes at which we assume that we can
   2238 * reliably generate inauthentic messages, i.e. not generate an authentic
   2239 * message by chance.
   2240 */
   2241#define MIN_COLLISION_FREE_AUTHSIZE 8
   2242
   2243static void generate_aead_message(struct aead_request *req,
   2244				  const struct aead_test_suite *suite,
   2245				  struct aead_testvec *vec,
   2246				  bool prefer_inauthentic)
   2247{
   2248	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2249	const unsigned int ivsize = crypto_aead_ivsize(tfm);
   2250	const unsigned int authsize = vec->clen - vec->plen;
   2251	const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
   2252				 (prefer_inauthentic || prandom_u32() % 4 == 0);
   2253
   2254	/* Generate the AAD. */
   2255	generate_random_bytes((u8 *)vec->assoc, vec->alen);
   2256	if (suite->aad_iv && vec->alen >= ivsize)
   2257		/* Avoid implementation-defined behavior. */
   2258		memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
   2259
   2260	if (inauthentic && prandom_u32() % 2 == 0) {
   2261		/* Generate a random ciphertext. */
   2262		generate_random_bytes((u8 *)vec->ctext, vec->clen);
   2263	} else {
   2264		int i = 0;
   2265		struct scatterlist src[2], dst;
   2266		u8 iv[MAX_IVLEN];
   2267		DECLARE_CRYPTO_WAIT(wait);
   2268
   2269		/* Generate a random plaintext and encrypt it. */
   2270		sg_init_table(src, 2);
   2271		if (vec->alen)
   2272			sg_set_buf(&src[i++], vec->assoc, vec->alen);
   2273		if (vec->plen) {
   2274			generate_random_bytes((u8 *)vec->ptext, vec->plen);
   2275			sg_set_buf(&src[i++], vec->ptext, vec->plen);
   2276		}
   2277		sg_init_one(&dst, vec->ctext, vec->alen + vec->clen);
   2278		memcpy(iv, vec->iv, ivsize);
   2279		aead_request_set_callback(req, 0, crypto_req_done, &wait);
   2280		aead_request_set_crypt(req, src, &dst, vec->plen, iv);
   2281		aead_request_set_ad(req, vec->alen);
   2282		vec->crypt_error = crypto_wait_req(crypto_aead_encrypt(req),
   2283						   &wait);
   2284		/* If encryption failed, we're done. */
   2285		if (vec->crypt_error != 0)
   2286			return;
   2287		memmove((u8 *)vec->ctext, vec->ctext + vec->alen, vec->clen);
   2288		if (!inauthentic)
   2289			return;
   2290		/*
   2291		 * Mutate the authentic (ciphertext, AAD) pair to get an
   2292		 * inauthentic one.
   2293		 */
   2294		mutate_aead_message(vec, suite->aad_iv, ivsize);
   2295	}
   2296	vec->novrfy = 1;
   2297	if (suite->einval_allowed)
   2298		vec->crypt_error = -EINVAL;
   2299}
   2300
   2301/*
   2302 * Generate an AEAD test vector 'vec' using the implementation specified by
   2303 * 'req'.  The buffers in 'vec' must already be allocated.
   2304 *
   2305 * If 'prefer_inauthentic' is true, then this function will generate inauthentic
   2306 * test vectors (i.e. vectors with 'vec->novrfy=1') more often.
   2307 */
   2308static void generate_random_aead_testvec(struct aead_request *req,
   2309					 struct aead_testvec *vec,
   2310					 const struct aead_test_suite *suite,
   2311					 unsigned int maxkeysize,
   2312					 unsigned int maxdatasize,
   2313					 char *name, size_t max_namelen,
   2314					 bool prefer_inauthentic)
   2315{
   2316	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2317	const unsigned int ivsize = crypto_aead_ivsize(tfm);
   2318	const unsigned int maxauthsize = crypto_aead_maxauthsize(tfm);
   2319	unsigned int authsize;
   2320	unsigned int total_len;
   2321
   2322	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
   2323	vec->klen = maxkeysize;
   2324	if (prandom_u32() % 4 == 0)
   2325		vec->klen = prandom_u32() % (maxkeysize + 1);
   2326	generate_random_bytes((u8 *)vec->key, vec->klen);
   2327	vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
   2328
   2329	/* IV */
   2330	generate_random_bytes((u8 *)vec->iv, ivsize);
   2331
   2332	/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
   2333	authsize = maxauthsize;
   2334	if (prandom_u32() % 4 == 0)
   2335		authsize = prandom_u32() % (maxauthsize + 1);
   2336	if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
   2337		authsize = MIN_COLLISION_FREE_AUTHSIZE;
   2338	if (WARN_ON(authsize > maxdatasize))
   2339		authsize = maxdatasize;
   2340	maxdatasize -= authsize;
   2341	vec->setauthsize_error = crypto_aead_setauthsize(tfm, authsize);
   2342
   2343	/* AAD, plaintext, and ciphertext lengths */
   2344	total_len = generate_random_length(maxdatasize);
   2345	if (prandom_u32() % 4 == 0)
   2346		vec->alen = 0;
   2347	else
   2348		vec->alen = generate_random_length(total_len);
   2349	vec->plen = total_len - vec->alen;
   2350	vec->clen = vec->plen + authsize;
   2351
   2352	/*
   2353	 * Generate the AAD, plaintext, and ciphertext.  Not applicable if the
   2354	 * key or the authentication tag size couldn't be set.
   2355	 */
   2356	vec->novrfy = 0;
   2357	vec->crypt_error = 0;
   2358	if (vec->setkey_error == 0 && vec->setauthsize_error == 0)
   2359		generate_aead_message(req, suite, vec, prefer_inauthentic);
   2360	snprintf(name, max_namelen,
   2361		 "\"random: alen=%u plen=%u authsize=%u klen=%u novrfy=%d\"",
   2362		 vec->alen, vec->plen, authsize, vec->klen, vec->novrfy);
   2363}
   2364
   2365static void try_to_generate_inauthentic_testvec(
   2366					struct aead_extra_tests_ctx *ctx)
   2367{
   2368	int i;
   2369
   2370	for (i = 0; i < 10; i++) {
   2371		generate_random_aead_testvec(ctx->req, &ctx->vec,
   2372					     &ctx->test_desc->suite.aead,
   2373					     ctx->maxkeysize, ctx->maxdatasize,
   2374					     ctx->vec_name,
   2375					     sizeof(ctx->vec_name), true);
   2376		if (ctx->vec.novrfy)
   2377			return;
   2378	}
   2379}
   2380
   2381/*
   2382 * Generate inauthentic test vectors (i.e. ciphertext, AAD pairs that aren't the
   2383 * result of an encryption with the key) and verify that decryption fails.
   2384 */
   2385static int test_aead_inauthentic_inputs(struct aead_extra_tests_ctx *ctx)
   2386{
   2387	unsigned int i;
   2388	int err;
   2389
   2390	for (i = 0; i < fuzz_iterations * 8; i++) {
   2391		/*
   2392		 * Since this part of the tests isn't comparing the
   2393		 * implementation to another, there's no point in testing any
   2394		 * test vectors other than inauthentic ones (vec.novrfy=1) here.
   2395		 *
   2396		 * If we're having trouble generating such a test vector, e.g.
   2397		 * if the algorithm keeps rejecting the generated keys, don't
   2398		 * retry forever; just continue on.
   2399		 */
   2400		try_to_generate_inauthentic_testvec(ctx);
   2401		if (ctx->vec.novrfy) {
   2402			generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
   2403						       sizeof(ctx->cfgname));
   2404			err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
   2405						ctx->vec_name, &ctx->cfg,
   2406						ctx->req, ctx->tsgls);
   2407			if (err)
   2408				return err;
   2409		}
   2410		cond_resched();
   2411	}
   2412	return 0;
   2413}
   2414
   2415/*
   2416 * Test the AEAD algorithm against the corresponding generic implementation, if
   2417 * one is available.
   2418 */
   2419static int test_aead_vs_generic_impl(struct aead_extra_tests_ctx *ctx)
   2420{
   2421	struct crypto_aead *tfm = ctx->tfm;
   2422	const char *algname = crypto_aead_alg(tfm)->base.cra_name;
   2423	const char *driver = crypto_aead_driver_name(tfm);
   2424	const char *generic_driver = ctx->test_desc->generic_driver;
   2425	char _generic_driver[CRYPTO_MAX_ALG_NAME];
   2426	struct crypto_aead *generic_tfm = NULL;
   2427	struct aead_request *generic_req = NULL;
   2428	unsigned int i;
   2429	int err;
   2430
   2431	if (!generic_driver) { /* Use default naming convention? */
   2432		err = build_generic_driver_name(algname, _generic_driver);
   2433		if (err)
   2434			return err;
   2435		generic_driver = _generic_driver;
   2436	}
   2437
   2438	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
   2439		return 0;
   2440
   2441	generic_tfm = crypto_alloc_aead(generic_driver, 0, 0);
   2442	if (IS_ERR(generic_tfm)) {
   2443		err = PTR_ERR(generic_tfm);
   2444		if (err == -ENOENT) {
   2445			pr_warn("alg: aead: skipping comparison tests for %s because %s is unavailable\n",
   2446				driver, generic_driver);
   2447			return 0;
   2448		}
   2449		pr_err("alg: aead: error allocating %s (generic impl of %s): %d\n",
   2450		       generic_driver, algname, err);
   2451		return err;
   2452	}
   2453
   2454	generic_req = aead_request_alloc(generic_tfm, GFP_KERNEL);
   2455	if (!generic_req) {
   2456		err = -ENOMEM;
   2457		goto out;
   2458	}
   2459
   2460	/* Check the algorithm properties for consistency. */
   2461
   2462	if (crypto_aead_maxauthsize(tfm) !=
   2463	    crypto_aead_maxauthsize(generic_tfm)) {
   2464		pr_err("alg: aead: maxauthsize for %s (%u) doesn't match generic impl (%u)\n",
   2465		       driver, crypto_aead_maxauthsize(tfm),
   2466		       crypto_aead_maxauthsize(generic_tfm));
   2467		err = -EINVAL;
   2468		goto out;
   2469	}
   2470
   2471	if (crypto_aead_ivsize(tfm) != crypto_aead_ivsize(generic_tfm)) {
   2472		pr_err("alg: aead: ivsize for %s (%u) doesn't match generic impl (%u)\n",
   2473		       driver, crypto_aead_ivsize(tfm),
   2474		       crypto_aead_ivsize(generic_tfm));
   2475		err = -EINVAL;
   2476		goto out;
   2477	}
   2478
   2479	if (crypto_aead_blocksize(tfm) != crypto_aead_blocksize(generic_tfm)) {
   2480		pr_err("alg: aead: blocksize for %s (%u) doesn't match generic impl (%u)\n",
   2481		       driver, crypto_aead_blocksize(tfm),
   2482		       crypto_aead_blocksize(generic_tfm));
   2483		err = -EINVAL;
   2484		goto out;
   2485	}
   2486
   2487	/*
   2488	 * Now generate test vectors using the generic implementation, and test
   2489	 * the other implementation against them.
   2490	 */
   2491	for (i = 0; i < fuzz_iterations * 8; i++) {
   2492		generate_random_aead_testvec(generic_req, &ctx->vec,
   2493					     &ctx->test_desc->suite.aead,
   2494					     ctx->maxkeysize, ctx->maxdatasize,
   2495					     ctx->vec_name,
   2496					     sizeof(ctx->vec_name), false);
   2497		generate_random_testvec_config(&ctx->cfg, ctx->cfgname,
   2498					       sizeof(ctx->cfgname));
   2499		if (!ctx->vec.novrfy) {
   2500			err = test_aead_vec_cfg(ENCRYPT, &ctx->vec,
   2501						ctx->vec_name, &ctx->cfg,
   2502						ctx->req, ctx->tsgls);
   2503			if (err)
   2504				goto out;
   2505		}
   2506		if (ctx->vec.crypt_error == 0 || ctx->vec.novrfy) {
   2507			err = test_aead_vec_cfg(DECRYPT, &ctx->vec,
   2508						ctx->vec_name, &ctx->cfg,
   2509						ctx->req, ctx->tsgls);
   2510			if (err)
   2511				goto out;
   2512		}
   2513		cond_resched();
   2514	}
   2515	err = 0;
   2516out:
   2517	crypto_free_aead(generic_tfm);
   2518	aead_request_free(generic_req);
   2519	return err;
   2520}
   2521
   2522static int test_aead_extra(const struct alg_test_desc *test_desc,
   2523			   struct aead_request *req,
   2524			   struct cipher_test_sglists *tsgls)
   2525{
   2526	struct aead_extra_tests_ctx *ctx;
   2527	unsigned int i;
   2528	int err;
   2529
   2530	if (noextratests)
   2531		return 0;
   2532
   2533	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
   2534	if (!ctx)
   2535		return -ENOMEM;
   2536	ctx->req = req;
   2537	ctx->tfm = crypto_aead_reqtfm(req);
   2538	ctx->test_desc = test_desc;
   2539	ctx->tsgls = tsgls;
   2540	ctx->maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
   2541	ctx->maxkeysize = 0;
   2542	for (i = 0; i < test_desc->suite.aead.count; i++)
   2543		ctx->maxkeysize = max_t(unsigned int, ctx->maxkeysize,
   2544					test_desc->suite.aead.vecs[i].klen);
   2545
   2546	ctx->vec.key = kmalloc(ctx->maxkeysize, GFP_KERNEL);
   2547	ctx->vec.iv = kmalloc(crypto_aead_ivsize(ctx->tfm), GFP_KERNEL);
   2548	ctx->vec.assoc = kmalloc(ctx->maxdatasize, GFP_KERNEL);
   2549	ctx->vec.ptext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
   2550	ctx->vec.ctext = kmalloc(ctx->maxdatasize, GFP_KERNEL);
   2551	if (!ctx->vec.key || !ctx->vec.iv || !ctx->vec.assoc ||
   2552	    !ctx->vec.ptext || !ctx->vec.ctext) {
   2553		err = -ENOMEM;
   2554		goto out;
   2555	}
   2556
   2557	err = test_aead_vs_generic_impl(ctx);
   2558	if (err)
   2559		goto out;
   2560
   2561	err = test_aead_inauthentic_inputs(ctx);
   2562out:
   2563	kfree(ctx->vec.key);
   2564	kfree(ctx->vec.iv);
   2565	kfree(ctx->vec.assoc);
   2566	kfree(ctx->vec.ptext);
   2567	kfree(ctx->vec.ctext);
   2568	kfree(ctx);
   2569	return err;
   2570}
   2571#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   2572static int test_aead_extra(const struct alg_test_desc *test_desc,
   2573			   struct aead_request *req,
   2574			   struct cipher_test_sglists *tsgls)
   2575{
   2576	return 0;
   2577}
   2578#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   2579
   2580static int test_aead(int enc, const struct aead_test_suite *suite,
   2581		     struct aead_request *req,
   2582		     struct cipher_test_sglists *tsgls)
   2583{
   2584	unsigned int i;
   2585	int err;
   2586
   2587	for (i = 0; i < suite->count; i++) {
   2588		err = test_aead_vec(enc, &suite->vecs[i], i, req, tsgls);
   2589		if (err)
   2590			return err;
   2591		cond_resched();
   2592	}
   2593	return 0;
   2594}
   2595
   2596static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
   2597			 u32 type, u32 mask)
   2598{
   2599	const struct aead_test_suite *suite = &desc->suite.aead;
   2600	struct crypto_aead *tfm;
   2601	struct aead_request *req = NULL;
   2602	struct cipher_test_sglists *tsgls = NULL;
   2603	int err;
   2604
   2605	if (suite->count <= 0) {
   2606		pr_err("alg: aead: empty test suite for %s\n", driver);
   2607		return -EINVAL;
   2608	}
   2609
   2610	tfm = crypto_alloc_aead(driver, type, mask);
   2611	if (IS_ERR(tfm)) {
   2612		pr_err("alg: aead: failed to allocate transform for %s: %ld\n",
   2613		       driver, PTR_ERR(tfm));
   2614		return PTR_ERR(tfm);
   2615	}
   2616	driver = crypto_aead_driver_name(tfm);
   2617
   2618	req = aead_request_alloc(tfm, GFP_KERNEL);
   2619	if (!req) {
   2620		pr_err("alg: aead: failed to allocate request for %s\n",
   2621		       driver);
   2622		err = -ENOMEM;
   2623		goto out;
   2624	}
   2625
   2626	tsgls = alloc_cipher_test_sglists();
   2627	if (!tsgls) {
   2628		pr_err("alg: aead: failed to allocate test buffers for %s\n",
   2629		       driver);
   2630		err = -ENOMEM;
   2631		goto out;
   2632	}
   2633
   2634	err = test_aead(ENCRYPT, suite, req, tsgls);
   2635	if (err)
   2636		goto out;
   2637
   2638	err = test_aead(DECRYPT, suite, req, tsgls);
   2639	if (err)
   2640		goto out;
   2641
   2642	err = test_aead_extra(desc, req, tsgls);
   2643out:
   2644	free_cipher_test_sglists(tsgls);
   2645	aead_request_free(req);
   2646	crypto_free_aead(tfm);
   2647	return err;
   2648}
   2649
   2650static int test_cipher(struct crypto_cipher *tfm, int enc,
   2651		       const struct cipher_testvec *template,
   2652		       unsigned int tcount)
   2653{
   2654	const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
   2655	unsigned int i, j, k;
   2656	char *q;
   2657	const char *e;
   2658	const char *input, *result;
   2659	void *data;
   2660	char *xbuf[XBUFSIZE];
   2661	int ret = -ENOMEM;
   2662
   2663	if (testmgr_alloc_buf(xbuf))
   2664		goto out_nobuf;
   2665
   2666	if (enc == ENCRYPT)
   2667	        e = "encryption";
   2668	else
   2669		e = "decryption";
   2670
   2671	j = 0;
   2672	for (i = 0; i < tcount; i++) {
   2673
   2674		if (fips_enabled && template[i].fips_skip)
   2675			continue;
   2676
   2677		input  = enc ? template[i].ptext : template[i].ctext;
   2678		result = enc ? template[i].ctext : template[i].ptext;
   2679		j++;
   2680
   2681		ret = -EINVAL;
   2682		if (WARN_ON(template[i].len > PAGE_SIZE))
   2683			goto out;
   2684
   2685		data = xbuf[0];
   2686		memcpy(data, input, template[i].len);
   2687
   2688		crypto_cipher_clear_flags(tfm, ~0);
   2689		if (template[i].wk)
   2690			crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
   2691
   2692		ret = crypto_cipher_setkey(tfm, template[i].key,
   2693					   template[i].klen);
   2694		if (ret) {
   2695			if (ret == template[i].setkey_error)
   2696				continue;
   2697			pr_err("alg: cipher: %s setkey failed on test vector %u; expected_error=%d, actual_error=%d, flags=%#x\n",
   2698			       algo, j, template[i].setkey_error, ret,
   2699			       crypto_cipher_get_flags(tfm));
   2700			goto out;
   2701		}
   2702		if (template[i].setkey_error) {
   2703			pr_err("alg: cipher: %s setkey unexpectedly succeeded on test vector %u; expected_error=%d\n",
   2704			       algo, j, template[i].setkey_error);
   2705			ret = -EINVAL;
   2706			goto out;
   2707		}
   2708
   2709		for (k = 0; k < template[i].len;
   2710		     k += crypto_cipher_blocksize(tfm)) {
   2711			if (enc)
   2712				crypto_cipher_encrypt_one(tfm, data + k,
   2713							  data + k);
   2714			else
   2715				crypto_cipher_decrypt_one(tfm, data + k,
   2716							  data + k);
   2717		}
   2718
   2719		q = data;
   2720		if (memcmp(q, result, template[i].len)) {
   2721			printk(KERN_ERR "alg: cipher: Test %d failed "
   2722			       "on %s for %s\n", j, e, algo);
   2723			hexdump(q, template[i].len);
   2724			ret = -EINVAL;
   2725			goto out;
   2726		}
   2727	}
   2728
   2729	ret = 0;
   2730
   2731out:
   2732	testmgr_free_buf(xbuf);
   2733out_nobuf:
   2734	return ret;
   2735}
   2736
   2737static int test_skcipher_vec_cfg(int enc, const struct cipher_testvec *vec,
   2738				 const char *vec_name,
   2739				 const struct testvec_config *cfg,
   2740				 struct skcipher_request *req,
   2741				 struct cipher_test_sglists *tsgls)
   2742{
   2743	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
   2744	const unsigned int alignmask = crypto_skcipher_alignmask(tfm);
   2745	const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
   2746	const char *driver = crypto_skcipher_driver_name(tfm);
   2747	const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags;
   2748	const char *op = enc ? "encryption" : "decryption";
   2749	DECLARE_CRYPTO_WAIT(wait);
   2750	u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN];
   2751	u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) +
   2752		 cfg->iv_offset +
   2753		 (cfg->iv_offset_relative_to_alignmask ? alignmask : 0);
   2754	struct kvec input;
   2755	int err;
   2756
   2757	/* Set the key */
   2758	if (vec->wk)
   2759		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
   2760	else
   2761		crypto_skcipher_clear_flags(tfm,
   2762					    CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
   2763	err = do_setkey(crypto_skcipher_setkey, tfm, vec->key, vec->klen,
   2764			cfg, alignmask);
   2765	if (err) {
   2766		if (err == vec->setkey_error)
   2767			return 0;
   2768		pr_err("alg: skcipher: %s setkey failed on test vector %s; expected_error=%d, actual_error=%d, flags=%#x\n",
   2769		       driver, vec_name, vec->setkey_error, err,
   2770		       crypto_skcipher_get_flags(tfm));
   2771		return err;
   2772	}
   2773	if (vec->setkey_error) {
   2774		pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %s; expected_error=%d\n",
   2775		       driver, vec_name, vec->setkey_error);
   2776		return -EINVAL;
   2777	}
   2778
   2779	/* The IV must be copied to a buffer, as the algorithm may modify it */
   2780	if (ivsize) {
   2781		if (WARN_ON(ivsize > MAX_IVLEN))
   2782			return -EINVAL;
   2783		if (vec->generates_iv && !enc)
   2784			memcpy(iv, vec->iv_out, ivsize);
   2785		else if (vec->iv)
   2786			memcpy(iv, vec->iv, ivsize);
   2787		else
   2788			memset(iv, 0, ivsize);
   2789	} else {
   2790		if (vec->generates_iv) {
   2791			pr_err("alg: skcipher: %s has ivsize=0 but test vector %s generates IV!\n",
   2792			       driver, vec_name);
   2793			return -EINVAL;
   2794		}
   2795		iv = NULL;
   2796	}
   2797
   2798	/* Build the src/dst scatterlists */
   2799	input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext;
   2800	input.iov_len = vec->len;
   2801	err = build_cipher_test_sglists(tsgls, cfg, alignmask,
   2802					vec->len, vec->len, &input, 1);
   2803	if (err) {
   2804		pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %s, cfg=\"%s\"\n",
   2805		       driver, op, vec_name, cfg->name);
   2806		return err;
   2807	}
   2808
   2809	/* Do the actual encryption or decryption */
   2810	testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm));
   2811	skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait);
   2812	skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr,
   2813				   vec->len, iv);
   2814	if (cfg->nosimd)
   2815		crypto_disable_simd_for_test();
   2816	err = enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
   2817	if (cfg->nosimd)
   2818		crypto_reenable_simd_for_test();
   2819	err = crypto_wait_req(err, &wait);
   2820
   2821	/* Check that the algorithm didn't overwrite things it shouldn't have */
   2822	if (req->cryptlen != vec->len ||
   2823	    req->iv != iv ||
   2824	    req->src != tsgls->src.sgl_ptr ||
   2825	    req->dst != tsgls->dst.sgl_ptr ||
   2826	    crypto_skcipher_reqtfm(req) != tfm ||
   2827	    req->base.complete != crypto_req_done ||
   2828	    req->base.flags != req_flags ||
   2829	    req->base.data != &wait) {
   2830		pr_err("alg: skcipher: %s %s corrupted request struct on test vector %s, cfg=\"%s\"\n",
   2831		       driver, op, vec_name, cfg->name);
   2832		if (req->cryptlen != vec->len)
   2833			pr_err("alg: skcipher: changed 'req->cryptlen'\n");
   2834		if (req->iv != iv)
   2835			pr_err("alg: skcipher: changed 'req->iv'\n");
   2836		if (req->src != tsgls->src.sgl_ptr)
   2837			pr_err("alg: skcipher: changed 'req->src'\n");
   2838		if (req->dst != tsgls->dst.sgl_ptr)
   2839			pr_err("alg: skcipher: changed 'req->dst'\n");
   2840		if (crypto_skcipher_reqtfm(req) != tfm)
   2841			pr_err("alg: skcipher: changed 'req->base.tfm'\n");
   2842		if (req->base.complete != crypto_req_done)
   2843			pr_err("alg: skcipher: changed 'req->base.complete'\n");
   2844		if (req->base.flags != req_flags)
   2845			pr_err("alg: skcipher: changed 'req->base.flags'\n");
   2846		if (req->base.data != &wait)
   2847			pr_err("alg: skcipher: changed 'req->base.data'\n");
   2848		return -EINVAL;
   2849	}
   2850	if (is_test_sglist_corrupted(&tsgls->src)) {
   2851		pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %s, cfg=\"%s\"\n",
   2852		       driver, op, vec_name, cfg->name);
   2853		return -EINVAL;
   2854	}
   2855	if (tsgls->dst.sgl_ptr != tsgls->src.sgl &&
   2856	    is_test_sglist_corrupted(&tsgls->dst)) {
   2857		pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %s, cfg=\"%s\"\n",
   2858		       driver, op, vec_name, cfg->name);
   2859		return -EINVAL;
   2860	}
   2861
   2862	/* Check for success or failure */
   2863	if (err) {
   2864		if (err == vec->crypt_error)
   2865			return 0;
   2866		pr_err("alg: skcipher: %s %s failed on test vector %s; expected_error=%d, actual_error=%d, cfg=\"%s\"\n",
   2867		       driver, op, vec_name, vec->crypt_error, err, cfg->name);
   2868		return err;
   2869	}
   2870	if (vec->crypt_error) {
   2871		pr_err("alg: skcipher: %s %s unexpectedly succeeded on test vector %s; expected_error=%d, cfg=\"%s\"\n",
   2872		       driver, op, vec_name, vec->crypt_error, cfg->name);
   2873		return -EINVAL;
   2874	}
   2875
   2876	/* Check for the correct output (ciphertext or plaintext) */
   2877	err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext,
   2878				    vec->len, 0, true);
   2879	if (err == -EOVERFLOW) {
   2880		pr_err("alg: skcipher: %s %s overran dst buffer on test vector %s, cfg=\"%s\"\n",
   2881		       driver, op, vec_name, cfg->name);
   2882		return err;
   2883	}
   2884	if (err) {
   2885		pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %s, cfg=\"%s\"\n",
   2886		       driver, op, vec_name, cfg->name);
   2887		return err;
   2888	}
   2889
   2890	/* If applicable, check that the algorithm generated the correct IV */
   2891	if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) {
   2892		pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %s, cfg=\"%s\"\n",
   2893		       driver, op, vec_name, cfg->name);
   2894		hexdump(iv, ivsize);
   2895		return -EINVAL;
   2896	}
   2897
   2898	return 0;
   2899}
   2900
   2901static int test_skcipher_vec(int enc, const struct cipher_testvec *vec,
   2902			     unsigned int vec_num,
   2903			     struct skcipher_request *req,
   2904			     struct cipher_test_sglists *tsgls)
   2905{
   2906	char vec_name[16];
   2907	unsigned int i;
   2908	int err;
   2909
   2910	if (fips_enabled && vec->fips_skip)
   2911		return 0;
   2912
   2913	sprintf(vec_name, "%u", vec_num);
   2914
   2915	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) {
   2916		err = test_skcipher_vec_cfg(enc, vec, vec_name,
   2917					    &default_cipher_testvec_configs[i],
   2918					    req, tsgls);
   2919		if (err)
   2920			return err;
   2921	}
   2922
   2923#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   2924	if (!noextratests) {
   2925		struct testvec_config cfg;
   2926		char cfgname[TESTVEC_CONFIG_NAMELEN];
   2927
   2928		for (i = 0; i < fuzz_iterations; i++) {
   2929			generate_random_testvec_config(&cfg, cfgname,
   2930						       sizeof(cfgname));
   2931			err = test_skcipher_vec_cfg(enc, vec, vec_name,
   2932						    &cfg, req, tsgls);
   2933			if (err)
   2934				return err;
   2935			cond_resched();
   2936		}
   2937	}
   2938#endif
   2939	return 0;
   2940}
   2941
   2942#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   2943/*
   2944 * Generate a symmetric cipher test vector from the given implementation.
   2945 * Assumes the buffers in 'vec' were already allocated.
   2946 */
   2947static void generate_random_cipher_testvec(struct skcipher_request *req,
   2948					   struct cipher_testvec *vec,
   2949					   unsigned int maxdatasize,
   2950					   char *name, size_t max_namelen)
   2951{
   2952	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
   2953	const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
   2954	const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
   2955	struct scatterlist src, dst;
   2956	u8 iv[MAX_IVLEN];
   2957	DECLARE_CRYPTO_WAIT(wait);
   2958
   2959	/* Key: length in [0, maxkeysize], but usually choose maxkeysize */
   2960	vec->klen = maxkeysize;
   2961	if (prandom_u32() % 4 == 0)
   2962		vec->klen = prandom_u32() % (maxkeysize + 1);
   2963	generate_random_bytes((u8 *)vec->key, vec->klen);
   2964	vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
   2965
   2966	/* IV */
   2967	generate_random_bytes((u8 *)vec->iv, ivsize);
   2968
   2969	/* Plaintext */
   2970	vec->len = generate_random_length(maxdatasize);
   2971	generate_random_bytes((u8 *)vec->ptext, vec->len);
   2972
   2973	/* If the key couldn't be set, no need to continue to encrypt. */
   2974	if (vec->setkey_error)
   2975		goto done;
   2976
   2977	/* Ciphertext */
   2978	sg_init_one(&src, vec->ptext, vec->len);
   2979	sg_init_one(&dst, vec->ctext, vec->len);
   2980	memcpy(iv, vec->iv, ivsize);
   2981	skcipher_request_set_callback(req, 0, crypto_req_done, &wait);
   2982	skcipher_request_set_crypt(req, &src, &dst, vec->len, iv);
   2983	vec->crypt_error = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
   2984	if (vec->crypt_error != 0) {
   2985		/*
   2986		 * The only acceptable error here is for an invalid length, so
   2987		 * skcipher decryption should fail with the same error too.
   2988		 * We'll test for this.  But to keep the API usage well-defined,
   2989		 * explicitly initialize the ciphertext buffer too.
   2990		 */
   2991		memset((u8 *)vec->ctext, 0, vec->len);
   2992	}
   2993done:
   2994	snprintf(name, max_namelen, "\"random: len=%u klen=%u\"",
   2995		 vec->len, vec->klen);
   2996}
   2997
   2998/*
   2999 * Test the skcipher algorithm represented by @req against the corresponding
   3000 * generic implementation, if one is available.
   3001 */
   3002static int test_skcipher_vs_generic_impl(const char *generic_driver,
   3003					 struct skcipher_request *req,
   3004					 struct cipher_test_sglists *tsgls)
   3005{
   3006	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
   3007	const unsigned int maxkeysize = crypto_skcipher_max_keysize(tfm);
   3008	const unsigned int ivsize = crypto_skcipher_ivsize(tfm);
   3009	const unsigned int blocksize = crypto_skcipher_blocksize(tfm);
   3010	const unsigned int maxdatasize = (2 * PAGE_SIZE) - TESTMGR_POISON_LEN;
   3011	const char *algname = crypto_skcipher_alg(tfm)->base.cra_name;
   3012	const char *driver = crypto_skcipher_driver_name(tfm);
   3013	char _generic_driver[CRYPTO_MAX_ALG_NAME];
   3014	struct crypto_skcipher *generic_tfm = NULL;
   3015	struct skcipher_request *generic_req = NULL;
   3016	unsigned int i;
   3017	struct cipher_testvec vec = { 0 };
   3018	char vec_name[64];
   3019	struct testvec_config *cfg;
   3020	char cfgname[TESTVEC_CONFIG_NAMELEN];
   3021	int err;
   3022
   3023	if (noextratests)
   3024		return 0;
   3025
   3026	/* Keywrap isn't supported here yet as it handles its IV differently. */
   3027	if (strncmp(algname, "kw(", 3) == 0)
   3028		return 0;
   3029
   3030	if (!generic_driver) { /* Use default naming convention? */
   3031		err = build_generic_driver_name(algname, _generic_driver);
   3032		if (err)
   3033			return err;
   3034		generic_driver = _generic_driver;
   3035	}
   3036
   3037	if (strcmp(generic_driver, driver) == 0) /* Already the generic impl? */
   3038		return 0;
   3039
   3040	generic_tfm = crypto_alloc_skcipher(generic_driver, 0, 0);
   3041	if (IS_ERR(generic_tfm)) {
   3042		err = PTR_ERR(generic_tfm);
   3043		if (err == -ENOENT) {
   3044			pr_warn("alg: skcipher: skipping comparison tests for %s because %s is unavailable\n",
   3045				driver, generic_driver);
   3046			return 0;
   3047		}
   3048		pr_err("alg: skcipher: error allocating %s (generic impl of %s): %d\n",
   3049		       generic_driver, algname, err);
   3050		return err;
   3051	}
   3052
   3053	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
   3054	if (!cfg) {
   3055		err = -ENOMEM;
   3056		goto out;
   3057	}
   3058
   3059	generic_req = skcipher_request_alloc(generic_tfm, GFP_KERNEL);
   3060	if (!generic_req) {
   3061		err = -ENOMEM;
   3062		goto out;
   3063	}
   3064
   3065	/* Check the algorithm properties for consistency. */
   3066
   3067	if (crypto_skcipher_min_keysize(tfm) !=
   3068	    crypto_skcipher_min_keysize(generic_tfm)) {
   3069		pr_err("alg: skcipher: min keysize for %s (%u) doesn't match generic impl (%u)\n",
   3070		       driver, crypto_skcipher_min_keysize(tfm),
   3071		       crypto_skcipher_min_keysize(generic_tfm));
   3072		err = -EINVAL;
   3073		goto out;
   3074	}
   3075
   3076	if (maxkeysize != crypto_skcipher_max_keysize(generic_tfm)) {
   3077		pr_err("alg: skcipher: max keysize for %s (%u) doesn't match generic impl (%u)\n",
   3078		       driver, maxkeysize,
   3079		       crypto_skcipher_max_keysize(generic_tfm));
   3080		err = -EINVAL;
   3081		goto out;
   3082	}
   3083
   3084	if (ivsize != crypto_skcipher_ivsize(generic_tfm)) {
   3085		pr_err("alg: skcipher: ivsize for %s (%u) doesn't match generic impl (%u)\n",
   3086		       driver, ivsize, crypto_skcipher_ivsize(generic_tfm));
   3087		err = -EINVAL;
   3088		goto out;
   3089	}
   3090
   3091	if (blocksize != crypto_skcipher_blocksize(generic_tfm)) {
   3092		pr_err("alg: skcipher: blocksize for %s (%u) doesn't match generic impl (%u)\n",
   3093		       driver, blocksize,
   3094		       crypto_skcipher_blocksize(generic_tfm));
   3095		err = -EINVAL;
   3096		goto out;
   3097	}
   3098
   3099	/*
   3100	 * Now generate test vectors using the generic implementation, and test
   3101	 * the other implementation against them.
   3102	 */
   3103
   3104	vec.key = kmalloc(maxkeysize, GFP_KERNEL);
   3105	vec.iv = kmalloc(ivsize, GFP_KERNEL);
   3106	vec.ptext = kmalloc(maxdatasize, GFP_KERNEL);
   3107	vec.ctext = kmalloc(maxdatasize, GFP_KERNEL);
   3108	if (!vec.key || !vec.iv || !vec.ptext || !vec.ctext) {
   3109		err = -ENOMEM;
   3110		goto out;
   3111	}
   3112
   3113	for (i = 0; i < fuzz_iterations * 8; i++) {
   3114		generate_random_cipher_testvec(generic_req, &vec, maxdatasize,
   3115					       vec_name, sizeof(vec_name));
   3116		generate_random_testvec_config(cfg, cfgname, sizeof(cfgname));
   3117
   3118		err = test_skcipher_vec_cfg(ENCRYPT, &vec, vec_name,
   3119					    cfg, req, tsgls);
   3120		if (err)
   3121			goto out;
   3122		err = test_skcipher_vec_cfg(DECRYPT, &vec, vec_name,
   3123					    cfg, req, tsgls);
   3124		if (err)
   3125			goto out;
   3126		cond_resched();
   3127	}
   3128	err = 0;
   3129out:
   3130	kfree(cfg);
   3131	kfree(vec.key);
   3132	kfree(vec.iv);
   3133	kfree(vec.ptext);
   3134	kfree(vec.ctext);
   3135	crypto_free_skcipher(generic_tfm);
   3136	skcipher_request_free(generic_req);
   3137	return err;
   3138}
   3139#else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   3140static int test_skcipher_vs_generic_impl(const char *generic_driver,
   3141					 struct skcipher_request *req,
   3142					 struct cipher_test_sglists *tsgls)
   3143{
   3144	return 0;
   3145}
   3146#endif /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */
   3147
   3148static int test_skcipher(int enc, const struct cipher_test_suite *suite,
   3149			 struct skcipher_request *req,
   3150			 struct cipher_test_sglists *tsgls)
   3151{
   3152	unsigned int i;
   3153	int err;
   3154
   3155	for (i = 0; i < suite->count; i++) {
   3156		err = test_skcipher_vec(enc, &suite->vecs[i], i, req, tsgls);
   3157		if (err)
   3158			return err;
   3159		cond_resched();
   3160	}
   3161	return 0;
   3162}
   3163
   3164static int alg_test_skcipher(const struct alg_test_desc *desc,
   3165			     const char *driver, u32 type, u32 mask)
   3166{
   3167	const struct cipher_test_suite *suite = &desc->suite.cipher;
   3168	struct crypto_skcipher *tfm;
   3169	struct skcipher_request *req = NULL;
   3170	struct cipher_test_sglists *tsgls = NULL;
   3171	int err;
   3172
   3173	if (suite->count <= 0) {
   3174		pr_err("alg: skcipher: empty test suite for %s\n", driver);
   3175		return -EINVAL;
   3176	}
   3177
   3178	tfm = crypto_alloc_skcipher(driver, type, mask);
   3179	if (IS_ERR(tfm)) {
   3180		pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n",
   3181		       driver, PTR_ERR(tfm));
   3182		return PTR_ERR(tfm);
   3183	}
   3184	driver = crypto_skcipher_driver_name(tfm);
   3185
   3186	req = skcipher_request_alloc(tfm, GFP_KERNEL);
   3187	if (!req) {
   3188		pr_err("alg: skcipher: failed to allocate request for %s\n",
   3189		       driver);
   3190		err = -ENOMEM;
   3191		goto out;
   3192	}
   3193
   3194	tsgls = alloc_cipher_test_sglists();
   3195	if (!tsgls) {
   3196		pr_err("alg: skcipher: failed to allocate test buffers for %s\n",
   3197		       driver);
   3198		err = -ENOMEM;
   3199		goto out;
   3200	}
   3201
   3202	err = test_skcipher(ENCRYPT, suite, req, tsgls);
   3203	if (err)
   3204		goto out;
   3205
   3206	err = test_skcipher(DECRYPT, suite, req, tsgls);
   3207	if (err)
   3208		goto out;
   3209
   3210	err = test_skcipher_vs_generic_impl(desc->generic_driver, req, tsgls);
   3211out:
   3212	free_cipher_test_sglists(tsgls);
   3213	skcipher_request_free(req);
   3214	crypto_free_skcipher(tfm);
   3215	return err;
   3216}
   3217
   3218static int test_comp(struct crypto_comp *tfm,
   3219		     const struct comp_testvec *ctemplate,
   3220		     const struct comp_testvec *dtemplate,
   3221		     int ctcount, int dtcount)
   3222{
   3223	const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
   3224	char *output, *decomp_output;
   3225	unsigned int i;
   3226	int ret;
   3227
   3228	output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
   3229	if (!output)
   3230		return -ENOMEM;
   3231
   3232	decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
   3233	if (!decomp_output) {
   3234		kfree(output);
   3235		return -ENOMEM;
   3236	}
   3237
   3238	for (i = 0; i < ctcount; i++) {
   3239		int ilen;
   3240		unsigned int dlen = COMP_BUF_SIZE;
   3241
   3242		memset(output, 0, COMP_BUF_SIZE);
   3243		memset(decomp_output, 0, COMP_BUF_SIZE);
   3244
   3245		ilen = ctemplate[i].inlen;
   3246		ret = crypto_comp_compress(tfm, ctemplate[i].input,
   3247					   ilen, output, &dlen);
   3248		if (ret) {
   3249			printk(KERN_ERR "alg: comp: compression failed "
   3250			       "on test %d for %s: ret=%d\n", i + 1, algo,
   3251			       -ret);
   3252			goto out;
   3253		}
   3254
   3255		ilen = dlen;
   3256		dlen = COMP_BUF_SIZE;
   3257		ret = crypto_comp_decompress(tfm, output,
   3258					     ilen, decomp_output, &dlen);
   3259		if (ret) {
   3260			pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n",
   3261			       i + 1, algo, -ret);
   3262			goto out;
   3263		}
   3264
   3265		if (dlen != ctemplate[i].inlen) {
   3266			printk(KERN_ERR "alg: comp: Compression test %d "
   3267			       "failed for %s: output len = %d\n", i + 1, algo,
   3268			       dlen);
   3269			ret = -EINVAL;
   3270			goto out;
   3271		}
   3272
   3273		if (memcmp(decomp_output, ctemplate[i].input,
   3274			   ctemplate[i].inlen)) {
   3275			pr_err("alg: comp: compression failed: output differs: on test %d for %s\n",
   3276			       i + 1, algo);
   3277			hexdump(decomp_output, dlen);
   3278			ret = -EINVAL;
   3279			goto out;
   3280		}
   3281	}
   3282
   3283	for (i = 0; i < dtcount; i++) {
   3284		int ilen;
   3285		unsigned int dlen = COMP_BUF_SIZE;
   3286
   3287		memset(decomp_output, 0, COMP_BUF_SIZE);
   3288
   3289		ilen = dtemplate[i].inlen;
   3290		ret = crypto_comp_decompress(tfm, dtemplate[i].input,
   3291					     ilen, decomp_output, &dlen);
   3292		if (ret) {
   3293			printk(KERN_ERR "alg: comp: decompression failed "
   3294			       "on test %d for %s: ret=%d\n", i + 1, algo,
   3295			       -ret);
   3296			goto out;
   3297		}
   3298
   3299		if (dlen != dtemplate[i].outlen) {
   3300			printk(KERN_ERR "alg: comp: Decompression test %d "
   3301			       "failed for %s: output len = %d\n", i + 1, algo,
   3302			       dlen);
   3303			ret = -EINVAL;
   3304			goto out;
   3305		}
   3306
   3307		if (memcmp(decomp_output, dtemplate[i].output, dlen)) {
   3308			printk(KERN_ERR "alg: comp: Decompression test %d "
   3309			       "failed for %s\n", i + 1, algo);
   3310			hexdump(decomp_output, dlen);
   3311			ret = -EINVAL;
   3312			goto out;
   3313		}
   3314	}
   3315
   3316	ret = 0;
   3317
   3318out:
   3319	kfree(decomp_output);
   3320	kfree(output);
   3321	return ret;
   3322}
   3323
   3324static int test_acomp(struct crypto_acomp *tfm,
   3325			      const struct comp_testvec *ctemplate,
   3326		      const struct comp_testvec *dtemplate,
   3327		      int ctcount, int dtcount)
   3328{
   3329	const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
   3330	unsigned int i;
   3331	char *output, *decomp_out;
   3332	int ret;
   3333	struct scatterlist src, dst;
   3334	struct acomp_req *req;
   3335	struct crypto_wait wait;
   3336
   3337	output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
   3338	if (!output)
   3339		return -ENOMEM;
   3340
   3341	decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
   3342	if (!decomp_out) {
   3343		kfree(output);
   3344		return -ENOMEM;
   3345	}
   3346
   3347	for (i = 0; i < ctcount; i++) {
   3348		unsigned int dlen = COMP_BUF_SIZE;
   3349		int ilen = ctemplate[i].inlen;
   3350		void *input_vec;
   3351
   3352		input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
   3353		if (!input_vec) {
   3354			ret = -ENOMEM;
   3355			goto out;
   3356		}
   3357
   3358		memset(output, 0, dlen);
   3359		crypto_init_wait(&wait);
   3360		sg_init_one(&src, input_vec, ilen);
   3361		sg_init_one(&dst, output, dlen);
   3362
   3363		req = acomp_request_alloc(tfm);
   3364		if (!req) {
   3365			pr_err("alg: acomp: request alloc failed for %s\n",
   3366			       algo);
   3367			kfree(input_vec);
   3368			ret = -ENOMEM;
   3369			goto out;
   3370		}
   3371
   3372		acomp_request_set_params(req, &src, &dst, ilen, dlen);
   3373		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   3374					   crypto_req_done, &wait);
   3375
   3376		ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
   3377		if (ret) {
   3378			pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
   3379			       i + 1, algo, -ret);
   3380			kfree(input_vec);
   3381			acomp_request_free(req);
   3382			goto out;
   3383		}
   3384
   3385		ilen = req->dlen;
   3386		dlen = COMP_BUF_SIZE;
   3387		sg_init_one(&src, output, ilen);
   3388		sg_init_one(&dst, decomp_out, dlen);
   3389		crypto_init_wait(&wait);
   3390		acomp_request_set_params(req, &src, &dst, ilen, dlen);
   3391
   3392		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
   3393		if (ret) {
   3394			pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
   3395			       i + 1, algo, -ret);
   3396			kfree(input_vec);
   3397			acomp_request_free(req);
   3398			goto out;
   3399		}
   3400
   3401		if (req->dlen != ctemplate[i].inlen) {
   3402			pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
   3403			       i + 1, algo, req->dlen);
   3404			ret = -EINVAL;
   3405			kfree(input_vec);
   3406			acomp_request_free(req);
   3407			goto out;
   3408		}
   3409
   3410		if (memcmp(input_vec, decomp_out, req->dlen)) {
   3411			pr_err("alg: acomp: Compression test %d failed for %s\n",
   3412			       i + 1, algo);
   3413			hexdump(output, req->dlen);
   3414			ret = -EINVAL;
   3415			kfree(input_vec);
   3416			acomp_request_free(req);
   3417			goto out;
   3418		}
   3419
   3420		kfree(input_vec);
   3421		acomp_request_free(req);
   3422	}
   3423
   3424	for (i = 0; i < dtcount; i++) {
   3425		unsigned int dlen = COMP_BUF_SIZE;
   3426		int ilen = dtemplate[i].inlen;
   3427		void *input_vec;
   3428
   3429		input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
   3430		if (!input_vec) {
   3431			ret = -ENOMEM;
   3432			goto out;
   3433		}
   3434
   3435		memset(output, 0, dlen);
   3436		crypto_init_wait(&wait);
   3437		sg_init_one(&src, input_vec, ilen);
   3438		sg_init_one(&dst, output, dlen);
   3439
   3440		req = acomp_request_alloc(tfm);
   3441		if (!req) {
   3442			pr_err("alg: acomp: request alloc failed for %s\n",
   3443			       algo);
   3444			kfree(input_vec);
   3445			ret = -ENOMEM;
   3446			goto out;
   3447		}
   3448
   3449		acomp_request_set_params(req, &src, &dst, ilen, dlen);
   3450		acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   3451					   crypto_req_done, &wait);
   3452
   3453		ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
   3454		if (ret) {
   3455			pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
   3456			       i + 1, algo, -ret);
   3457			kfree(input_vec);
   3458			acomp_request_free(req);
   3459			goto out;
   3460		}
   3461
   3462		if (req->dlen != dtemplate[i].outlen) {
   3463			pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
   3464			       i + 1, algo, req->dlen);
   3465			ret = -EINVAL;
   3466			kfree(input_vec);
   3467			acomp_request_free(req);
   3468			goto out;
   3469		}
   3470
   3471		if (memcmp(output, dtemplate[i].output, req->dlen)) {
   3472			pr_err("alg: acomp: Decompression test %d failed for %s\n",
   3473			       i + 1, algo);
   3474			hexdump(output, req->dlen);
   3475			ret = -EINVAL;
   3476			kfree(input_vec);
   3477			acomp_request_free(req);
   3478			goto out;
   3479		}
   3480
   3481		kfree(input_vec);
   3482		acomp_request_free(req);
   3483	}
   3484
   3485	ret = 0;
   3486
   3487out:
   3488	kfree(decomp_out);
   3489	kfree(output);
   3490	return ret;
   3491}
   3492
   3493static int test_cprng(struct crypto_rng *tfm,
   3494		      const struct cprng_testvec *template,
   3495		      unsigned int tcount)
   3496{
   3497	const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
   3498	int err = 0, i, j, seedsize;
   3499	u8 *seed;
   3500	char result[32];
   3501
   3502	seedsize = crypto_rng_seedsize(tfm);
   3503
   3504	seed = kmalloc(seedsize, GFP_KERNEL);
   3505	if (!seed) {
   3506		printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
   3507		       "for %s\n", algo);
   3508		return -ENOMEM;
   3509	}
   3510
   3511	for (i = 0; i < tcount; i++) {
   3512		memset(result, 0, 32);
   3513
   3514		memcpy(seed, template[i].v, template[i].vlen);
   3515		memcpy(seed + template[i].vlen, template[i].key,
   3516		       template[i].klen);
   3517		memcpy(seed + template[i].vlen + template[i].klen,
   3518		       template[i].dt, template[i].dtlen);
   3519
   3520		err = crypto_rng_reset(tfm, seed, seedsize);
   3521		if (err) {
   3522			printk(KERN_ERR "alg: cprng: Failed to reset rng "
   3523			       "for %s\n", algo);
   3524			goto out;
   3525		}
   3526
   3527		for (j = 0; j < template[i].loops; j++) {
   3528			err = crypto_rng_get_bytes(tfm, result,
   3529						   template[i].rlen);
   3530			if (err < 0) {
   3531				printk(KERN_ERR "alg: cprng: Failed to obtain "
   3532				       "the correct amount of random data for "
   3533				       "%s (requested %d)\n", algo,
   3534				       template[i].rlen);
   3535				goto out;
   3536			}
   3537		}
   3538
   3539		err = memcmp(result, template[i].result,
   3540			     template[i].rlen);
   3541		if (err) {
   3542			printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
   3543			       i, algo);
   3544			hexdump(result, template[i].rlen);
   3545			err = -EINVAL;
   3546			goto out;
   3547		}
   3548	}
   3549
   3550out:
   3551	kfree(seed);
   3552	return err;
   3553}
   3554
   3555static int alg_test_cipher(const struct alg_test_desc *desc,
   3556			   const char *driver, u32 type, u32 mask)
   3557{
   3558	const struct cipher_test_suite *suite = &desc->suite.cipher;
   3559	struct crypto_cipher *tfm;
   3560	int err;
   3561
   3562	tfm = crypto_alloc_cipher(driver, type, mask);
   3563	if (IS_ERR(tfm)) {
   3564		printk(KERN_ERR "alg: cipher: Failed to load transform for "
   3565		       "%s: %ld\n", driver, PTR_ERR(tfm));
   3566		return PTR_ERR(tfm);
   3567	}
   3568
   3569	err = test_cipher(tfm, ENCRYPT, suite->vecs, suite->count);
   3570	if (!err)
   3571		err = test_cipher(tfm, DECRYPT, suite->vecs, suite->count);
   3572
   3573	crypto_free_cipher(tfm);
   3574	return err;
   3575}
   3576
   3577static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
   3578			 u32 type, u32 mask)
   3579{
   3580	struct crypto_comp *comp;
   3581	struct crypto_acomp *acomp;
   3582	int err;
   3583	u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
   3584
   3585	if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
   3586		acomp = crypto_alloc_acomp(driver, type, mask);
   3587		if (IS_ERR(acomp)) {
   3588			pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
   3589			       driver, PTR_ERR(acomp));
   3590			return PTR_ERR(acomp);
   3591		}
   3592		err = test_acomp(acomp, desc->suite.comp.comp.vecs,
   3593				 desc->suite.comp.decomp.vecs,
   3594				 desc->suite.comp.comp.count,
   3595				 desc->suite.comp.decomp.count);
   3596		crypto_free_acomp(acomp);
   3597	} else {
   3598		comp = crypto_alloc_comp(driver, type, mask);
   3599		if (IS_ERR(comp)) {
   3600			pr_err("alg: comp: Failed to load transform for %s: %ld\n",
   3601			       driver, PTR_ERR(comp));
   3602			return PTR_ERR(comp);
   3603		}
   3604
   3605		err = test_comp(comp, desc->suite.comp.comp.vecs,
   3606				desc->suite.comp.decomp.vecs,
   3607				desc->suite.comp.comp.count,
   3608				desc->suite.comp.decomp.count);
   3609
   3610		crypto_free_comp(comp);
   3611	}
   3612	return err;
   3613}
   3614
   3615static int alg_test_crc32c(const struct alg_test_desc *desc,
   3616			   const char *driver, u32 type, u32 mask)
   3617{
   3618	struct crypto_shash *tfm;
   3619	__le32 val;
   3620	int err;
   3621
   3622	err = alg_test_hash(desc, driver, type, mask);
   3623	if (err)
   3624		return err;
   3625
   3626	tfm = crypto_alloc_shash(driver, type, mask);
   3627	if (IS_ERR(tfm)) {
   3628		if (PTR_ERR(tfm) == -ENOENT) {
   3629			/*
   3630			 * This crc32c implementation is only available through
   3631			 * ahash API, not the shash API, so the remaining part
   3632			 * of the test is not applicable to it.
   3633			 */
   3634			return 0;
   3635		}
   3636		printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
   3637		       "%ld\n", driver, PTR_ERR(tfm));
   3638		return PTR_ERR(tfm);
   3639	}
   3640	driver = crypto_shash_driver_name(tfm);
   3641
   3642	do {
   3643		SHASH_DESC_ON_STACK(shash, tfm);
   3644		u32 *ctx = (u32 *)shash_desc_ctx(shash);
   3645
   3646		shash->tfm = tfm;
   3647
   3648		*ctx = 420553207;
   3649		err = crypto_shash_final(shash, (u8 *)&val);
   3650		if (err) {
   3651			printk(KERN_ERR "alg: crc32c: Operation failed for "
   3652			       "%s: %d\n", driver, err);
   3653			break;
   3654		}
   3655
   3656		if (val != cpu_to_le32(~420553207)) {
   3657			pr_err("alg: crc32c: Test failed for %s: %u\n",
   3658			       driver, le32_to_cpu(val));
   3659			err = -EINVAL;
   3660		}
   3661	} while (0);
   3662
   3663	crypto_free_shash(tfm);
   3664
   3665	return err;
   3666}
   3667
   3668static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
   3669			  u32 type, u32 mask)
   3670{
   3671	struct crypto_rng *rng;
   3672	int err;
   3673
   3674	rng = crypto_alloc_rng(driver, type, mask);
   3675	if (IS_ERR(rng)) {
   3676		printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
   3677		       "%ld\n", driver, PTR_ERR(rng));
   3678		return PTR_ERR(rng);
   3679	}
   3680
   3681	err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
   3682
   3683	crypto_free_rng(rng);
   3684
   3685	return err;
   3686}
   3687
   3688
   3689static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
   3690			  const char *driver, u32 type, u32 mask)
   3691{
   3692	int ret = -EAGAIN;
   3693	struct crypto_rng *drng;
   3694	struct drbg_test_data test_data;
   3695	struct drbg_string addtl, pers, testentropy;
   3696	unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
   3697
   3698	if (!buf)
   3699		return -ENOMEM;
   3700
   3701	drng = crypto_alloc_rng(driver, type, mask);
   3702	if (IS_ERR(drng)) {
   3703		printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
   3704		       "%s\n", driver);
   3705		kfree_sensitive(buf);
   3706		return -ENOMEM;
   3707	}
   3708
   3709	test_data.testentropy = &testentropy;
   3710	drbg_string_fill(&testentropy, test->entropy, test->entropylen);
   3711	drbg_string_fill(&pers, test->pers, test->perslen);
   3712	ret = crypto_drbg_reset_test(drng, &pers, &test_data);
   3713	if (ret) {
   3714		printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
   3715		goto outbuf;
   3716	}
   3717
   3718	drbg_string_fill(&addtl, test->addtla, test->addtllen);
   3719	if (pr) {
   3720		drbg_string_fill(&testentropy, test->entpra, test->entprlen);
   3721		ret = crypto_drbg_get_bytes_addtl_test(drng,
   3722			buf, test->expectedlen, &addtl,	&test_data);
   3723	} else {
   3724		ret = crypto_drbg_get_bytes_addtl(drng,
   3725			buf, test->expectedlen, &addtl);
   3726	}
   3727	if (ret < 0) {
   3728		printk(KERN_ERR "alg: drbg: could not obtain random data for "
   3729		       "driver %s\n", driver);
   3730		goto outbuf;
   3731	}
   3732
   3733	drbg_string_fill(&addtl, test->addtlb, test->addtllen);
   3734	if (pr) {
   3735		drbg_string_fill(&testentropy, test->entprb, test->entprlen);
   3736		ret = crypto_drbg_get_bytes_addtl_test(drng,
   3737			buf, test->expectedlen, &addtl, &test_data);
   3738	} else {
   3739		ret = crypto_drbg_get_bytes_addtl(drng,
   3740			buf, test->expectedlen, &addtl);
   3741	}
   3742	if (ret < 0) {
   3743		printk(KERN_ERR "alg: drbg: could not obtain random data for "
   3744		       "driver %s\n", driver);
   3745		goto outbuf;
   3746	}
   3747
   3748	ret = memcmp(test->expected, buf, test->expectedlen);
   3749
   3750outbuf:
   3751	crypto_free_rng(drng);
   3752	kfree_sensitive(buf);
   3753	return ret;
   3754}
   3755
   3756
   3757static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
   3758			 u32 type, u32 mask)
   3759{
   3760	int err = 0;
   3761	int pr = 0;
   3762	int i = 0;
   3763	const struct drbg_testvec *template = desc->suite.drbg.vecs;
   3764	unsigned int tcount = desc->suite.drbg.count;
   3765
   3766	if (0 == memcmp(driver, "drbg_pr_", 8))
   3767		pr = 1;
   3768
   3769	for (i = 0; i < tcount; i++) {
   3770		err = drbg_cavs_test(&template[i], pr, driver, type, mask);
   3771		if (err) {
   3772			printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
   3773			       i, driver);
   3774			err = -EINVAL;
   3775			break;
   3776		}
   3777	}
   3778	return err;
   3779
   3780}
   3781
   3782static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
   3783		       const char *alg)
   3784{
   3785	struct kpp_request *req;
   3786	void *input_buf = NULL;
   3787	void *output_buf = NULL;
   3788	void *a_public = NULL;
   3789	void *a_ss = NULL;
   3790	void *shared_secret = NULL;
   3791	struct crypto_wait wait;
   3792	unsigned int out_len_max;
   3793	int err = -ENOMEM;
   3794	struct scatterlist src, dst;
   3795
   3796	req = kpp_request_alloc(tfm, GFP_KERNEL);
   3797	if (!req)
   3798		return err;
   3799
   3800	crypto_init_wait(&wait);
   3801
   3802	err = crypto_kpp_set_secret(tfm, vec->secret, vec->secret_size);
   3803	if (err < 0)
   3804		goto free_req;
   3805
   3806	out_len_max = crypto_kpp_maxsize(tfm);
   3807	output_buf = kzalloc(out_len_max, GFP_KERNEL);
   3808	if (!output_buf) {
   3809		err = -ENOMEM;
   3810		goto free_req;
   3811	}
   3812
   3813	/* Use appropriate parameter as base */
   3814	kpp_request_set_input(req, NULL, 0);
   3815	sg_init_one(&dst, output_buf, out_len_max);
   3816	kpp_request_set_output(req, &dst, out_len_max);
   3817	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   3818				 crypto_req_done, &wait);
   3819
   3820	/* Compute party A's public key */
   3821	err = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
   3822	if (err) {
   3823		pr_err("alg: %s: Party A: generate public key test failed. err %d\n",
   3824		       alg, err);
   3825		goto free_output;
   3826	}
   3827
   3828	if (vec->genkey) {
   3829		/* Save party A's public key */
   3830		a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL);
   3831		if (!a_public) {
   3832			err = -ENOMEM;
   3833			goto free_output;
   3834		}
   3835	} else {
   3836		/* Verify calculated public key */
   3837		if (memcmp(vec->expected_a_public, sg_virt(req->dst),
   3838			   vec->expected_a_public_size)) {
   3839			pr_err("alg: %s: Party A: generate public key test failed. Invalid output\n",
   3840			       alg);
   3841			err = -EINVAL;
   3842			goto free_output;
   3843		}
   3844	}
   3845
   3846	/* Calculate shared secret key by using counter part (b) public key. */
   3847	input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL);
   3848	if (!input_buf) {
   3849		err = -ENOMEM;
   3850		goto free_output;
   3851	}
   3852
   3853	sg_init_one(&src, input_buf, vec->b_public_size);
   3854	sg_init_one(&dst, output_buf, out_len_max);
   3855	kpp_request_set_input(req, &src, vec->b_public_size);
   3856	kpp_request_set_output(req, &dst, out_len_max);
   3857	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   3858				 crypto_req_done, &wait);
   3859	err = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
   3860	if (err) {
   3861		pr_err("alg: %s: Party A: compute shared secret test failed. err %d\n",
   3862		       alg, err);
   3863		goto free_all;
   3864	}
   3865
   3866	if (vec->genkey) {
   3867		/* Save the shared secret obtained by party A */
   3868		a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL);
   3869		if (!a_ss) {
   3870			err = -ENOMEM;
   3871			goto free_all;
   3872		}
   3873
   3874		/*
   3875		 * Calculate party B's shared secret by using party A's
   3876		 * public key.
   3877		 */
   3878		err = crypto_kpp_set_secret(tfm, vec->b_secret,
   3879					    vec->b_secret_size);
   3880		if (err < 0)
   3881			goto free_all;
   3882
   3883		sg_init_one(&src, a_public, vec->expected_a_public_size);
   3884		sg_init_one(&dst, output_buf, out_len_max);
   3885		kpp_request_set_input(req, &src, vec->expected_a_public_size);
   3886		kpp_request_set_output(req, &dst, out_len_max);
   3887		kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   3888					 crypto_req_done, &wait);
   3889		err = crypto_wait_req(crypto_kpp_compute_shared_secret(req),
   3890				      &wait);
   3891		if (err) {
   3892			pr_err("alg: %s: Party B: compute shared secret failed. err %d\n",
   3893			       alg, err);
   3894			goto free_all;
   3895		}
   3896
   3897		shared_secret = a_ss;
   3898	} else {
   3899		shared_secret = (void *)vec->expected_ss;
   3900	}
   3901
   3902	/*
   3903	 * verify shared secret from which the user will derive
   3904	 * secret key by executing whatever hash it has chosen
   3905	 */
   3906	if (memcmp(shared_secret, sg_virt(req->dst),
   3907		   vec->expected_ss_size)) {
   3908		pr_err("alg: %s: compute shared secret test failed. Invalid output\n",
   3909		       alg);
   3910		err = -EINVAL;
   3911	}
   3912
   3913free_all:
   3914	kfree(a_ss);
   3915	kfree(input_buf);
   3916free_output:
   3917	kfree(a_public);
   3918	kfree(output_buf);
   3919free_req:
   3920	kpp_request_free(req);
   3921	return err;
   3922}
   3923
   3924static int test_kpp(struct crypto_kpp *tfm, const char *alg,
   3925		    const struct kpp_testvec *vecs, unsigned int tcount)
   3926{
   3927	int ret, i;
   3928
   3929	for (i = 0; i < tcount; i++) {
   3930		ret = do_test_kpp(tfm, vecs++, alg);
   3931		if (ret) {
   3932			pr_err("alg: %s: test failed on vector %d, err=%d\n",
   3933			       alg, i + 1, ret);
   3934			return ret;
   3935		}
   3936	}
   3937	return 0;
   3938}
   3939
   3940static int alg_test_kpp(const struct alg_test_desc *desc, const char *driver,
   3941			u32 type, u32 mask)
   3942{
   3943	struct crypto_kpp *tfm;
   3944	int err = 0;
   3945
   3946	tfm = crypto_alloc_kpp(driver, type, mask);
   3947	if (IS_ERR(tfm)) {
   3948		pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
   3949		       driver, PTR_ERR(tfm));
   3950		return PTR_ERR(tfm);
   3951	}
   3952	if (desc->suite.kpp.vecs)
   3953		err = test_kpp(tfm, desc->alg, desc->suite.kpp.vecs,
   3954			       desc->suite.kpp.count);
   3955
   3956	crypto_free_kpp(tfm);
   3957	return err;
   3958}
   3959
   3960static u8 *test_pack_u32(u8 *dst, u32 val)
   3961{
   3962	memcpy(dst, &val, sizeof(val));
   3963	return dst + sizeof(val);
   3964}
   3965
   3966static int test_akcipher_one(struct crypto_akcipher *tfm,
   3967			     const struct akcipher_testvec *vecs)
   3968{
   3969	char *xbuf[XBUFSIZE];
   3970	struct akcipher_request *req;
   3971	void *outbuf_enc = NULL;
   3972	void *outbuf_dec = NULL;
   3973	struct crypto_wait wait;
   3974	unsigned int out_len_max, out_len = 0;
   3975	int err = -ENOMEM;
   3976	struct scatterlist src, dst, src_tab[3];
   3977	const char *m, *c;
   3978	unsigned int m_size, c_size;
   3979	const char *op;
   3980	u8 *key, *ptr;
   3981
   3982	if (testmgr_alloc_buf(xbuf))
   3983		return err;
   3984
   3985	req = akcipher_request_alloc(tfm, GFP_KERNEL);
   3986	if (!req)
   3987		goto free_xbuf;
   3988
   3989	crypto_init_wait(&wait);
   3990
   3991	key = kmalloc(vecs->key_len + sizeof(u32) * 2 + vecs->param_len,
   3992		      GFP_KERNEL);
   3993	if (!key)
   3994		goto free_req;
   3995	memcpy(key, vecs->key, vecs->key_len);
   3996	ptr = key + vecs->key_len;
   3997	ptr = test_pack_u32(ptr, vecs->algo);
   3998	ptr = test_pack_u32(ptr, vecs->param_len);
   3999	memcpy(ptr, vecs->params, vecs->param_len);
   4000
   4001	if (vecs->public_key_vec)
   4002		err = crypto_akcipher_set_pub_key(tfm, key, vecs->key_len);
   4003	else
   4004		err = crypto_akcipher_set_priv_key(tfm, key, vecs->key_len);
   4005	if (err)
   4006		goto free_key;
   4007
   4008	/*
   4009	 * First run test which do not require a private key, such as
   4010	 * encrypt or verify.
   4011	 */
   4012	err = -ENOMEM;
   4013	out_len_max = crypto_akcipher_maxsize(tfm);
   4014	outbuf_enc = kzalloc(out_len_max, GFP_KERNEL);
   4015	if (!outbuf_enc)
   4016		goto free_key;
   4017
   4018	if (!vecs->siggen_sigver_test) {
   4019		m = vecs->m;
   4020		m_size = vecs->m_size;
   4021		c = vecs->c;
   4022		c_size = vecs->c_size;
   4023		op = "encrypt";
   4024	} else {
   4025		/* Swap args so we could keep plaintext (digest)
   4026		 * in vecs->m, and cooked signature in vecs->c.
   4027		 */
   4028		m = vecs->c; /* signature */
   4029		m_size = vecs->c_size;
   4030		c = vecs->m; /* digest */
   4031		c_size = vecs->m_size;
   4032		op = "verify";
   4033	}
   4034
   4035	err = -E2BIG;
   4036	if (WARN_ON(m_size > PAGE_SIZE))
   4037		goto free_all;
   4038	memcpy(xbuf[0], m, m_size);
   4039
   4040	sg_init_table(src_tab, 3);
   4041	sg_set_buf(&src_tab[0], xbuf[0], 8);
   4042	sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8);
   4043	if (vecs->siggen_sigver_test) {
   4044		if (WARN_ON(c_size > PAGE_SIZE))
   4045			goto free_all;
   4046		memcpy(xbuf[1], c, c_size);
   4047		sg_set_buf(&src_tab[2], xbuf[1], c_size);
   4048		akcipher_request_set_crypt(req, src_tab, NULL, m_size, c_size);
   4049	} else {
   4050		sg_init_one(&dst, outbuf_enc, out_len_max);
   4051		akcipher_request_set_crypt(req, src_tab, &dst, m_size,
   4052					   out_len_max);
   4053	}
   4054	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
   4055				      crypto_req_done, &wait);
   4056
   4057	err = crypto_wait_req(vecs->siggen_sigver_test ?
   4058			      /* Run asymmetric signature verification */
   4059			      crypto_akcipher_verify(req) :
   4060			      /* Run asymmetric encrypt */
   4061			      crypto_akcipher_encrypt(req), &wait);
   4062	if (err) {
   4063		pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
   4064		goto free_all;
   4065	}
   4066	if (!vecs->siggen_sigver_test && c) {
   4067		if (req->dst_len != c_size) {
   4068			pr_err("alg: akcipher: %s test failed. Invalid output len\n",
   4069			       op);
   4070			err = -EINVAL;
   4071			goto free_all;
   4072		}
   4073		/* verify that encrypted message is equal to expected */
   4074		if (memcmp(c, outbuf_enc, c_size) != 0) {
   4075			pr_err("alg: akcipher: %s test failed. Invalid output\n",
   4076			       op);
   4077			hexdump(outbuf_enc, c_size);
   4078			err = -EINVAL;
   4079			goto free_all;
   4080		}
   4081	}
   4082
   4083	/*
   4084	 * Don't invoke (decrypt or sign) test which require a private key
   4085	 * for vectors with only a public key.
   4086	 */
   4087	if (vecs->public_key_vec) {
   4088		err = 0;
   4089		goto free_all;
   4090	}
   4091	outbuf_dec = kzalloc(out_len_max, GFP_KERNEL);
   4092	if (!outbuf_dec) {
   4093		err = -ENOMEM;
   4094		goto free_all;
   4095	}
   4096
   4097	if (!vecs->siggen_sigver_test && !c) {
   4098		c = outbuf_enc;
   4099		c_size = req->dst_len;
   4100	}
   4101
   4102	err = -E2BIG;
   4103	op = vecs->siggen_sigver_test ? "sign" : "decrypt";
   4104	if (WARN_ON(c_size > PAGE_SIZE))
   4105		goto free_all;
   4106	memcpy(xbuf[0], c, c_size);
   4107
   4108	sg_init_one(&src, xbuf[0], c_size);
   4109	sg_init_one(&dst, outbuf_dec, out_len_max);
   4110	crypto_init_wait(&wait);
   4111	akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max);
   4112
   4113	err = crypto_wait_req(vecs->siggen_sigver_test ?
   4114			      /* Run asymmetric signature generation */
   4115			      crypto_akcipher_sign(req) :
   4116			      /* Run asymmetric decrypt */
   4117			      crypto_akcipher_decrypt(req), &wait);
   4118	if (err) {
   4119		pr_err("alg: akcipher: %s test failed. err %d\n", op, err);
   4120		goto free_all;
   4121	}
   4122	out_len = req->dst_len;
   4123	if (out_len < m_size) {
   4124		pr_err("alg: akcipher: %s test failed. Invalid output len %u\n",
   4125		       op, out_len);
   4126		err = -EINVAL;
   4127		goto free_all;
   4128	}
   4129	/* verify that decrypted message is equal to the original msg */
   4130	if (memchr_inv(outbuf_dec, 0, out_len - m_size) ||
   4131	    memcmp(m, outbuf_dec + out_len - m_size, m_size)) {
   4132		pr_err("alg: akcipher: %s test failed. Invalid output\n", op);
   4133		hexdump(outbuf_dec, out_len);
   4134		err = -EINVAL;
   4135	}
   4136free_all:
   4137	kfree(outbuf_dec);
   4138	kfree(outbuf_enc);
   4139free_key:
   4140	kfree(key);
   4141free_req:
   4142	akcipher_request_free(req);
   4143free_xbuf:
   4144	testmgr_free_buf(xbuf);
   4145	return err;
   4146}
   4147
   4148static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
   4149			 const struct akcipher_testvec *vecs,
   4150			 unsigned int tcount)
   4151{
   4152	const char *algo =
   4153		crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
   4154	int ret, i;
   4155
   4156	for (i = 0; i < tcount; i++) {
   4157		ret = test_akcipher_one(tfm, vecs++);
   4158		if (!ret)
   4159			continue;
   4160
   4161		pr_err("alg: akcipher: test %d failed for %s, err=%d\n",
   4162		       i + 1, algo, ret);
   4163		return ret;
   4164	}
   4165	return 0;
   4166}
   4167
   4168static int alg_test_akcipher(const struct alg_test_desc *desc,
   4169			     const char *driver, u32 type, u32 mask)
   4170{
   4171	struct crypto_akcipher *tfm;
   4172	int err = 0;
   4173
   4174	tfm = crypto_alloc_akcipher(driver, type, mask);
   4175	if (IS_ERR(tfm)) {
   4176		pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
   4177		       driver, PTR_ERR(tfm));
   4178		return PTR_ERR(tfm);
   4179	}
   4180	if (desc->suite.akcipher.vecs)
   4181		err = test_akcipher(tfm, desc->alg, desc->suite.akcipher.vecs,
   4182				    desc->suite.akcipher.count);
   4183
   4184	crypto_free_akcipher(tfm);
   4185	return err;
   4186}
   4187
   4188static int alg_test_null(const struct alg_test_desc *desc,
   4189			     const char *driver, u32 type, u32 mask)
   4190{
   4191	return 0;
   4192}
   4193
   4194#define ____VECS(tv)	.vecs = tv, .count = ARRAY_SIZE(tv)
   4195#define __VECS(tv)	{ ____VECS(tv) }
   4196
   4197/* Please keep this list sorted by algorithm name. */
   4198static const struct alg_test_desc alg_test_descs[] = {
   4199	{
   4200		.alg = "adiantum(xchacha12,aes)",
   4201		.generic_driver = "adiantum(xchacha12-generic,aes-generic,nhpoly1305-generic)",
   4202		.test = alg_test_skcipher,
   4203		.suite = {
   4204			.cipher = __VECS(adiantum_xchacha12_aes_tv_template)
   4205		},
   4206	}, {
   4207		.alg = "adiantum(xchacha20,aes)",
   4208		.generic_driver = "adiantum(xchacha20-generic,aes-generic,nhpoly1305-generic)",
   4209		.test = alg_test_skcipher,
   4210		.suite = {
   4211			.cipher = __VECS(adiantum_xchacha20_aes_tv_template)
   4212		},
   4213	}, {
   4214		.alg = "aegis128",
   4215		.test = alg_test_aead,
   4216		.suite = {
   4217			.aead = __VECS(aegis128_tv_template)
   4218		}
   4219	}, {
   4220		.alg = "ansi_cprng",
   4221		.test = alg_test_cprng,
   4222		.suite = {
   4223			.cprng = __VECS(ansi_cprng_aes_tv_template)
   4224		}
   4225	}, {
   4226		.alg = "authenc(hmac(md5),ecb(cipher_null))",
   4227		.test = alg_test_aead,
   4228		.suite = {
   4229			.aead = __VECS(hmac_md5_ecb_cipher_null_tv_template)
   4230		}
   4231	}, {
   4232		.alg = "authenc(hmac(sha1),cbc(aes))",
   4233		.test = alg_test_aead,
   4234		.fips_allowed = 1,
   4235		.suite = {
   4236			.aead = __VECS(hmac_sha1_aes_cbc_tv_temp)
   4237		}
   4238	}, {
   4239		.alg = "authenc(hmac(sha1),cbc(des))",
   4240		.test = alg_test_aead,
   4241		.suite = {
   4242			.aead = __VECS(hmac_sha1_des_cbc_tv_temp)
   4243		}
   4244	}, {
   4245		.alg = "authenc(hmac(sha1),cbc(des3_ede))",
   4246		.test = alg_test_aead,
   4247		.suite = {
   4248			.aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp)
   4249		}
   4250	}, {
   4251		.alg = "authenc(hmac(sha1),ctr(aes))",
   4252		.test = alg_test_null,
   4253		.fips_allowed = 1,
   4254	}, {
   4255		.alg = "authenc(hmac(sha1),ecb(cipher_null))",
   4256		.test = alg_test_aead,
   4257		.suite = {
   4258			.aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp)
   4259		}
   4260	}, {
   4261		.alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
   4262		.test = alg_test_null,
   4263		.fips_allowed = 1,
   4264	}, {
   4265		.alg = "authenc(hmac(sha224),cbc(des))",
   4266		.test = alg_test_aead,
   4267		.suite = {
   4268			.aead = __VECS(hmac_sha224_des_cbc_tv_temp)
   4269		}
   4270	}, {
   4271		.alg = "authenc(hmac(sha224),cbc(des3_ede))",
   4272		.test = alg_test_aead,
   4273		.suite = {
   4274			.aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp)
   4275		}
   4276	}, {
   4277		.alg = "authenc(hmac(sha256),cbc(aes))",
   4278		.test = alg_test_aead,
   4279		.fips_allowed = 1,
   4280		.suite = {
   4281			.aead = __VECS(hmac_sha256_aes_cbc_tv_temp)
   4282		}
   4283	}, {
   4284		.alg = "authenc(hmac(sha256),cbc(des))",
   4285		.test = alg_test_aead,
   4286		.suite = {
   4287			.aead = __VECS(hmac_sha256_des_cbc_tv_temp)
   4288		}
   4289	}, {
   4290		.alg = "authenc(hmac(sha256),cbc(des3_ede))",
   4291		.test = alg_test_aead,
   4292		.suite = {
   4293			.aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp)
   4294		}
   4295	}, {
   4296		.alg = "authenc(hmac(sha256),ctr(aes))",
   4297		.test = alg_test_null,
   4298		.fips_allowed = 1,
   4299	}, {
   4300		.alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
   4301		.test = alg_test_null,
   4302		.fips_allowed = 1,
   4303	}, {
   4304		.alg = "authenc(hmac(sha384),cbc(des))",
   4305		.test = alg_test_aead,
   4306		.suite = {
   4307			.aead = __VECS(hmac_sha384_des_cbc_tv_temp)
   4308		}
   4309	}, {
   4310		.alg = "authenc(hmac(sha384),cbc(des3_ede))",
   4311		.test = alg_test_aead,
   4312		.suite = {
   4313			.aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp)
   4314		}
   4315	}, {
   4316		.alg = "authenc(hmac(sha384),ctr(aes))",
   4317		.test = alg_test_null,
   4318		.fips_allowed = 1,
   4319	}, {
   4320		.alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
   4321		.test = alg_test_null,
   4322		.fips_allowed = 1,
   4323	}, {
   4324		.alg = "authenc(hmac(sha512),cbc(aes))",
   4325		.fips_allowed = 1,
   4326		.test = alg_test_aead,
   4327		.suite = {
   4328			.aead = __VECS(hmac_sha512_aes_cbc_tv_temp)
   4329		}
   4330	}, {
   4331		.alg = "authenc(hmac(sha512),cbc(des))",
   4332		.test = alg_test_aead,
   4333		.suite = {
   4334			.aead = __VECS(hmac_sha512_des_cbc_tv_temp)
   4335		}
   4336	}, {
   4337		.alg = "authenc(hmac(sha512),cbc(des3_ede))",
   4338		.test = alg_test_aead,
   4339		.suite = {
   4340			.aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp)
   4341		}
   4342	}, {
   4343		.alg = "authenc(hmac(sha512),ctr(aes))",
   4344		.test = alg_test_null,
   4345		.fips_allowed = 1,
   4346	}, {
   4347		.alg = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
   4348		.test = alg_test_null,
   4349		.fips_allowed = 1,
   4350	}, {
   4351		.alg = "blake2b-160",
   4352		.test = alg_test_hash,
   4353		.fips_allowed = 0,
   4354		.suite = {
   4355			.hash = __VECS(blake2b_160_tv_template)
   4356		}
   4357	}, {
   4358		.alg = "blake2b-256",
   4359		.test = alg_test_hash,
   4360		.fips_allowed = 0,
   4361		.suite = {
   4362			.hash = __VECS(blake2b_256_tv_template)
   4363		}
   4364	}, {
   4365		.alg = "blake2b-384",
   4366		.test = alg_test_hash,
   4367		.fips_allowed = 0,
   4368		.suite = {
   4369			.hash = __VECS(blake2b_384_tv_template)
   4370		}
   4371	}, {
   4372		.alg = "blake2b-512",
   4373		.test = alg_test_hash,
   4374		.fips_allowed = 0,
   4375		.suite = {
   4376			.hash = __VECS(blake2b_512_tv_template)
   4377		}
   4378	}, {
   4379		.alg = "blake2s-128",
   4380		.test = alg_test_hash,
   4381		.suite = {
   4382			.hash = __VECS(blakes2s_128_tv_template)
   4383		}
   4384	}, {
   4385		.alg = "blake2s-160",
   4386		.test = alg_test_hash,
   4387		.suite = {
   4388			.hash = __VECS(blakes2s_160_tv_template)
   4389		}
   4390	}, {
   4391		.alg = "blake2s-224",
   4392		.test = alg_test_hash,
   4393		.suite = {
   4394			.hash = __VECS(blakes2s_224_tv_template)
   4395		}
   4396	}, {
   4397		.alg = "blake2s-256",
   4398		.test = alg_test_hash,
   4399		.suite = {
   4400			.hash = __VECS(blakes2s_256_tv_template)
   4401		}
   4402	}, {
   4403		.alg = "cbc(aes)",
   4404		.test = alg_test_skcipher,
   4405		.fips_allowed = 1,
   4406		.suite = {
   4407			.cipher = __VECS(aes_cbc_tv_template)
   4408		},
   4409	}, {
   4410		.alg = "cbc(anubis)",
   4411		.test = alg_test_skcipher,
   4412		.suite = {
   4413			.cipher = __VECS(anubis_cbc_tv_template)
   4414		},
   4415	}, {
   4416		.alg = "cbc(blowfish)",
   4417		.test = alg_test_skcipher,
   4418		.suite = {
   4419			.cipher = __VECS(bf_cbc_tv_template)
   4420		},
   4421	}, {
   4422		.alg = "cbc(camellia)",
   4423		.test = alg_test_skcipher,
   4424		.suite = {
   4425			.cipher = __VECS(camellia_cbc_tv_template)
   4426		},
   4427	}, {
   4428		.alg = "cbc(cast5)",
   4429		.test = alg_test_skcipher,
   4430		.suite = {
   4431			.cipher = __VECS(cast5_cbc_tv_template)
   4432		},
   4433	}, {
   4434		.alg = "cbc(cast6)",
   4435		.test = alg_test_skcipher,
   4436		.suite = {
   4437			.cipher = __VECS(cast6_cbc_tv_template)
   4438		},
   4439	}, {
   4440		.alg = "cbc(des)",
   4441		.test = alg_test_skcipher,
   4442		.suite = {
   4443			.cipher = __VECS(des_cbc_tv_template)
   4444		},
   4445	}, {
   4446		.alg = "cbc(des3_ede)",
   4447		.test = alg_test_skcipher,
   4448		.suite = {
   4449			.cipher = __VECS(des3_ede_cbc_tv_template)
   4450		},
   4451	}, {
   4452		/* Same as cbc(aes) except the key is stored in
   4453		 * hardware secure memory which we reference by index
   4454		 */
   4455		.alg = "cbc(paes)",
   4456		.test = alg_test_null,
   4457		.fips_allowed = 1,
   4458	}, {
   4459		/* Same as cbc(sm4) except the key is stored in
   4460		 * hardware secure memory which we reference by index
   4461		 */
   4462		.alg = "cbc(psm4)",
   4463		.test = alg_test_null,
   4464	}, {
   4465		.alg = "cbc(serpent)",
   4466		.test = alg_test_skcipher,
   4467		.suite = {
   4468			.cipher = __VECS(serpent_cbc_tv_template)
   4469		},
   4470	}, {
   4471		.alg = "cbc(sm4)",
   4472		.test = alg_test_skcipher,
   4473		.suite = {
   4474			.cipher = __VECS(sm4_cbc_tv_template)
   4475		}
   4476	}, {
   4477		.alg = "cbc(twofish)",
   4478		.test = alg_test_skcipher,
   4479		.suite = {
   4480			.cipher = __VECS(tf_cbc_tv_template)
   4481		},
   4482	}, {
   4483#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
   4484		.alg = "cbc-paes-s390",
   4485		.fips_allowed = 1,
   4486		.test = alg_test_skcipher,
   4487		.suite = {
   4488			.cipher = __VECS(aes_cbc_tv_template)
   4489		}
   4490	}, {
   4491#endif
   4492		.alg = "cbcmac(aes)",
   4493		.fips_allowed = 1,
   4494		.test = alg_test_hash,
   4495		.suite = {
   4496			.hash = __VECS(aes_cbcmac_tv_template)
   4497		}
   4498	}, {
   4499		.alg = "cbcmac(sm4)",
   4500		.test = alg_test_hash,
   4501		.suite = {
   4502			.hash = __VECS(sm4_cbcmac_tv_template)
   4503		}
   4504	}, {
   4505		.alg = "ccm(aes)",
   4506		.generic_driver = "ccm_base(ctr(aes-generic),cbcmac(aes-generic))",
   4507		.test = alg_test_aead,
   4508		.fips_allowed = 1,
   4509		.suite = {
   4510			.aead = {
   4511				____VECS(aes_ccm_tv_template),
   4512				.einval_allowed = 1,
   4513			}
   4514		}
   4515	}, {
   4516		.alg = "ccm(sm4)",
   4517		.generic_driver = "ccm_base(ctr(sm4-generic),cbcmac(sm4-generic))",
   4518		.test = alg_test_aead,
   4519		.suite = {
   4520			.aead = {
   4521				____VECS(sm4_ccm_tv_template),
   4522				.einval_allowed = 1,
   4523			}
   4524		}
   4525	}, {
   4526		.alg = "cfb(aes)",
   4527		.test = alg_test_skcipher,
   4528		.fips_allowed = 1,
   4529		.suite = {
   4530			.cipher = __VECS(aes_cfb_tv_template)
   4531		},
   4532	}, {
   4533		.alg = "cfb(sm4)",
   4534		.test = alg_test_skcipher,
   4535		.suite = {
   4536			.cipher = __VECS(sm4_cfb_tv_template)
   4537		}
   4538	}, {
   4539		.alg = "chacha20",
   4540		.test = alg_test_skcipher,
   4541		.suite = {
   4542			.cipher = __VECS(chacha20_tv_template)
   4543		},
   4544	}, {
   4545		.alg = "cmac(aes)",
   4546		.fips_allowed = 1,
   4547		.test = alg_test_hash,
   4548		.suite = {
   4549			.hash = __VECS(aes_cmac128_tv_template)
   4550		}
   4551	}, {
   4552		.alg = "cmac(des3_ede)",
   4553		.test = alg_test_hash,
   4554		.suite = {
   4555			.hash = __VECS(des3_ede_cmac64_tv_template)
   4556		}
   4557	}, {
   4558		.alg = "cmac(sm4)",
   4559		.test = alg_test_hash,
   4560		.suite = {
   4561			.hash = __VECS(sm4_cmac128_tv_template)
   4562		}
   4563	}, {
   4564		.alg = "compress_null",
   4565		.test = alg_test_null,
   4566	}, {
   4567		.alg = "crc32",
   4568		.test = alg_test_hash,
   4569		.fips_allowed = 1,
   4570		.suite = {
   4571			.hash = __VECS(crc32_tv_template)
   4572		}
   4573	}, {
   4574		.alg = "crc32c",
   4575		.test = alg_test_crc32c,
   4576		.fips_allowed = 1,
   4577		.suite = {
   4578			.hash = __VECS(crc32c_tv_template)
   4579		}
   4580	}, {
   4581		.alg = "crc64-rocksoft",
   4582		.test = alg_test_hash,
   4583		.fips_allowed = 1,
   4584		.suite = {
   4585			.hash = __VECS(crc64_rocksoft_tv_template)
   4586		}
   4587	}, {
   4588		.alg = "crct10dif",
   4589		.test = alg_test_hash,
   4590		.fips_allowed = 1,
   4591		.suite = {
   4592			.hash = __VECS(crct10dif_tv_template)
   4593		}
   4594	}, {
   4595		.alg = "ctr(aes)",
   4596		.test = alg_test_skcipher,
   4597		.fips_allowed = 1,
   4598		.suite = {
   4599			.cipher = __VECS(aes_ctr_tv_template)
   4600		}
   4601	}, {
   4602		.alg = "ctr(blowfish)",
   4603		.test = alg_test_skcipher,
   4604		.suite = {
   4605			.cipher = __VECS(bf_ctr_tv_template)
   4606		}
   4607	}, {
   4608		.alg = "ctr(camellia)",
   4609		.test = alg_test_skcipher,
   4610		.suite = {
   4611			.cipher = __VECS(camellia_ctr_tv_template)
   4612		}
   4613	}, {
   4614		.alg = "ctr(cast5)",
   4615		.test = alg_test_skcipher,
   4616		.suite = {
   4617			.cipher = __VECS(cast5_ctr_tv_template)
   4618		}
   4619	}, {
   4620		.alg = "ctr(cast6)",
   4621		.test = alg_test_skcipher,
   4622		.suite = {
   4623			.cipher = __VECS(cast6_ctr_tv_template)
   4624		}
   4625	}, {
   4626		.alg = "ctr(des)",
   4627		.test = alg_test_skcipher,
   4628		.suite = {
   4629			.cipher = __VECS(des_ctr_tv_template)
   4630		}
   4631	}, {
   4632		.alg = "ctr(des3_ede)",
   4633		.test = alg_test_skcipher,
   4634		.suite = {
   4635			.cipher = __VECS(des3_ede_ctr_tv_template)
   4636		}
   4637	}, {
   4638		/* Same as ctr(aes) except the key is stored in
   4639		 * hardware secure memory which we reference by index
   4640		 */
   4641		.alg = "ctr(paes)",
   4642		.test = alg_test_null,
   4643		.fips_allowed = 1,
   4644	}, {
   4645
   4646		/* Same as ctr(sm4) except the key is stored in
   4647		 * hardware secure memory which we reference by index
   4648		 */
   4649		.alg = "ctr(psm4)",
   4650		.test = alg_test_null,
   4651	}, {
   4652		.alg = "ctr(serpent)",
   4653		.test = alg_test_skcipher,
   4654		.suite = {
   4655			.cipher = __VECS(serpent_ctr_tv_template)
   4656		}
   4657	}, {
   4658		.alg = "ctr(sm4)",
   4659		.test = alg_test_skcipher,
   4660		.suite = {
   4661			.cipher = __VECS(sm4_ctr_tv_template)
   4662		}
   4663	}, {
   4664		.alg = "ctr(twofish)",
   4665		.test = alg_test_skcipher,
   4666		.suite = {
   4667			.cipher = __VECS(tf_ctr_tv_template)
   4668		}
   4669	}, {
   4670#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
   4671		.alg = "ctr-paes-s390",
   4672		.fips_allowed = 1,
   4673		.test = alg_test_skcipher,
   4674		.suite = {
   4675			.cipher = __VECS(aes_ctr_tv_template)
   4676		}
   4677	}, {
   4678#endif
   4679		.alg = "cts(cbc(aes))",
   4680		.test = alg_test_skcipher,
   4681		.fips_allowed = 1,
   4682		.suite = {
   4683			.cipher = __VECS(cts_mode_tv_template)
   4684		}
   4685	}, {
   4686		/* Same as cts(cbc((aes)) except the key is stored in
   4687		 * hardware secure memory which we reference by index
   4688		 */
   4689		.alg = "cts(cbc(paes))",
   4690		.test = alg_test_null,
   4691		.fips_allowed = 1,
   4692	}, {
   4693		.alg = "curve25519",
   4694		.test = alg_test_kpp,
   4695		.suite = {
   4696			.kpp = __VECS(curve25519_tv_template)
   4697		}
   4698	}, {
   4699		.alg = "deflate",
   4700		.test = alg_test_comp,
   4701		.fips_allowed = 1,
   4702		.suite = {
   4703			.comp = {
   4704				.comp = __VECS(deflate_comp_tv_template),
   4705				.decomp = __VECS(deflate_decomp_tv_template)
   4706			}
   4707		}
   4708	}, {
   4709		.alg = "dh",
   4710		.test = alg_test_kpp,
   4711		.suite = {
   4712			.kpp = __VECS(dh_tv_template)
   4713		}
   4714	}, {
   4715		.alg = "digest_null",
   4716		.test = alg_test_null,
   4717	}, {
   4718		.alg = "drbg_nopr_ctr_aes128",
   4719		.test = alg_test_drbg,
   4720		.fips_allowed = 1,
   4721		.suite = {
   4722			.drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
   4723		}
   4724	}, {
   4725		.alg = "drbg_nopr_ctr_aes192",
   4726		.test = alg_test_drbg,
   4727		.fips_allowed = 1,
   4728		.suite = {
   4729			.drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
   4730		}
   4731	}, {
   4732		.alg = "drbg_nopr_ctr_aes256",
   4733		.test = alg_test_drbg,
   4734		.fips_allowed = 1,
   4735		.suite = {
   4736			.drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
   4737		}
   4738	}, {
   4739		/*
   4740		 * There is no need to specifically test the DRBG with every
   4741		 * backend cipher -- covered by drbg_nopr_hmac_sha256 test
   4742		 */
   4743		.alg = "drbg_nopr_hmac_sha1",
   4744		.fips_allowed = 1,
   4745		.test = alg_test_null,
   4746	}, {
   4747		.alg = "drbg_nopr_hmac_sha256",
   4748		.test = alg_test_drbg,
   4749		.fips_allowed = 1,
   4750		.suite = {
   4751			.drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
   4752		}
   4753	}, {
   4754		/* covered by drbg_nopr_hmac_sha256 test */
   4755		.alg = "drbg_nopr_hmac_sha384",
   4756		.fips_allowed = 1,
   4757		.test = alg_test_null,
   4758	}, {
   4759		.alg = "drbg_nopr_hmac_sha512",
   4760		.test = alg_test_drbg,
   4761		.fips_allowed = 1,
   4762		.suite = {
   4763			.drbg = __VECS(drbg_nopr_hmac_sha512_tv_template)
   4764		}
   4765	}, {
   4766		.alg = "drbg_nopr_sha1",
   4767		.fips_allowed = 1,
   4768		.test = alg_test_null,
   4769	}, {
   4770		.alg = "drbg_nopr_sha256",
   4771		.test = alg_test_drbg,
   4772		.fips_allowed = 1,
   4773		.suite = {
   4774			.drbg = __VECS(drbg_nopr_sha256_tv_template)
   4775		}
   4776	}, {
   4777		/* covered by drbg_nopr_sha256 test */
   4778		.alg = "drbg_nopr_sha384",
   4779		.fips_allowed = 1,
   4780		.test = alg_test_null,
   4781	}, {
   4782		.alg = "drbg_nopr_sha512",
   4783		.fips_allowed = 1,
   4784		.test = alg_test_null,
   4785	}, {
   4786		.alg = "drbg_pr_ctr_aes128",
   4787		.test = alg_test_drbg,
   4788		.fips_allowed = 1,
   4789		.suite = {
   4790			.drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
   4791		}
   4792	}, {
   4793		/* covered by drbg_pr_ctr_aes128 test */
   4794		.alg = "drbg_pr_ctr_aes192",
   4795		.fips_allowed = 1,
   4796		.test = alg_test_null,
   4797	}, {
   4798		.alg = "drbg_pr_ctr_aes256",
   4799		.fips_allowed = 1,
   4800		.test = alg_test_null,
   4801	}, {
   4802		.alg = "drbg_pr_hmac_sha1",
   4803		.fips_allowed = 1,
   4804		.test = alg_test_null,
   4805	}, {
   4806		.alg = "drbg_pr_hmac_sha256",
   4807		.test = alg_test_drbg,
   4808		.fips_allowed = 1,
   4809		.suite = {
   4810			.drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
   4811		}
   4812	}, {
   4813		/* covered by drbg_pr_hmac_sha256 test */
   4814		.alg = "drbg_pr_hmac_sha384",
   4815		.fips_allowed = 1,
   4816		.test = alg_test_null,
   4817	}, {
   4818		.alg = "drbg_pr_hmac_sha512",
   4819		.test = alg_test_null,
   4820		.fips_allowed = 1,
   4821	}, {
   4822		.alg = "drbg_pr_sha1",
   4823		.fips_allowed = 1,
   4824		.test = alg_test_null,
   4825	}, {
   4826		.alg = "drbg_pr_sha256",
   4827		.test = alg_test_drbg,
   4828		.fips_allowed = 1,
   4829		.suite = {
   4830			.drbg = __VECS(drbg_pr_sha256_tv_template)
   4831		}
   4832	}, {
   4833		/* covered by drbg_pr_sha256 test */
   4834		.alg = "drbg_pr_sha384",
   4835		.fips_allowed = 1,
   4836		.test = alg_test_null,
   4837	}, {
   4838		.alg = "drbg_pr_sha512",
   4839		.fips_allowed = 1,
   4840		.test = alg_test_null,
   4841	}, {
   4842		.alg = "ecb(aes)",
   4843		.test = alg_test_skcipher,
   4844		.fips_allowed = 1,
   4845		.suite = {
   4846			.cipher = __VECS(aes_tv_template)
   4847		}
   4848	}, {
   4849		.alg = "ecb(anubis)",
   4850		.test = alg_test_skcipher,
   4851		.suite = {
   4852			.cipher = __VECS(anubis_tv_template)
   4853		}
   4854	}, {
   4855		.alg = "ecb(arc4)",
   4856		.generic_driver = "ecb(arc4)-generic",
   4857		.test = alg_test_skcipher,
   4858		.suite = {
   4859			.cipher = __VECS(arc4_tv_template)
   4860		}
   4861	}, {
   4862		.alg = "ecb(blowfish)",
   4863		.test = alg_test_skcipher,
   4864		.suite = {
   4865			.cipher = __VECS(bf_tv_template)
   4866		}
   4867	}, {
   4868		.alg = "ecb(camellia)",
   4869		.test = alg_test_skcipher,
   4870		.suite = {
   4871			.cipher = __VECS(camellia_tv_template)
   4872		}
   4873	}, {
   4874		.alg = "ecb(cast5)",
   4875		.test = alg_test_skcipher,
   4876		.suite = {
   4877			.cipher = __VECS(cast5_tv_template)
   4878		}
   4879	}, {
   4880		.alg = "ecb(cast6)",
   4881		.test = alg_test_skcipher,
   4882		.suite = {
   4883			.cipher = __VECS(cast6_tv_template)
   4884		}
   4885	}, {
   4886		.alg = "ecb(cipher_null)",
   4887		.test = alg_test_null,
   4888		.fips_allowed = 1,
   4889	}, {
   4890		.alg = "ecb(des)",
   4891		.test = alg_test_skcipher,
   4892		.suite = {
   4893			.cipher = __VECS(des_tv_template)
   4894		}
   4895	}, {
   4896		.alg = "ecb(des3_ede)",
   4897		.test = alg_test_skcipher,
   4898		.suite = {
   4899			.cipher = __VECS(des3_ede_tv_template)
   4900		}
   4901	}, {
   4902		.alg = "ecb(fcrypt)",
   4903		.test = alg_test_skcipher,
   4904		.suite = {
   4905			.cipher = {
   4906				.vecs = fcrypt_pcbc_tv_template,
   4907				.count = 1
   4908			}
   4909		}
   4910	}, {
   4911		.alg = "ecb(khazad)",
   4912		.test = alg_test_skcipher,
   4913		.suite = {
   4914			.cipher = __VECS(khazad_tv_template)
   4915		}
   4916	}, {
   4917		/* Same as ecb(aes) except the key is stored in
   4918		 * hardware secure memory which we reference by index
   4919		 */
   4920		.alg = "ecb(paes)",
   4921		.test = alg_test_null,
   4922		.fips_allowed = 1,
   4923	}, {
   4924		.alg = "ecb(seed)",
   4925		.test = alg_test_skcipher,
   4926		.suite = {
   4927			.cipher = __VECS(seed_tv_template)
   4928		}
   4929	}, {
   4930		.alg = "ecb(serpent)",
   4931		.test = alg_test_skcipher,
   4932		.suite = {
   4933			.cipher = __VECS(serpent_tv_template)
   4934		}
   4935	}, {
   4936		.alg = "ecb(sm4)",
   4937		.test = alg_test_skcipher,
   4938		.suite = {
   4939			.cipher = __VECS(sm4_tv_template)
   4940		}
   4941	}, {
   4942		.alg = "ecb(tea)",
   4943		.test = alg_test_skcipher,
   4944		.suite = {
   4945			.cipher = __VECS(tea_tv_template)
   4946		}
   4947	}, {
   4948		.alg = "ecb(twofish)",
   4949		.test = alg_test_skcipher,
   4950		.suite = {
   4951			.cipher = __VECS(tf_tv_template)
   4952		}
   4953	}, {
   4954		.alg = "ecb(xeta)",
   4955		.test = alg_test_skcipher,
   4956		.suite = {
   4957			.cipher = __VECS(xeta_tv_template)
   4958		}
   4959	}, {
   4960		.alg = "ecb(xtea)",
   4961		.test = alg_test_skcipher,
   4962		.suite = {
   4963			.cipher = __VECS(xtea_tv_template)
   4964		}
   4965	}, {
   4966#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
   4967		.alg = "ecb-paes-s390",
   4968		.fips_allowed = 1,
   4969		.test = alg_test_skcipher,
   4970		.suite = {
   4971			.cipher = __VECS(aes_tv_template)
   4972		}
   4973	}, {
   4974#endif
   4975		.alg = "ecdh-nist-p192",
   4976		.test = alg_test_kpp,
   4977		.suite = {
   4978			.kpp = __VECS(ecdh_p192_tv_template)
   4979		}
   4980	}, {
   4981		.alg = "ecdh-nist-p256",
   4982		.test = alg_test_kpp,
   4983		.fips_allowed = 1,
   4984		.suite = {
   4985			.kpp = __VECS(ecdh_p256_tv_template)
   4986		}
   4987	}, {
   4988		.alg = "ecdh-nist-p384",
   4989		.test = alg_test_kpp,
   4990		.fips_allowed = 1,
   4991		.suite = {
   4992			.kpp = __VECS(ecdh_p384_tv_template)
   4993		}
   4994	}, {
   4995		.alg = "ecdsa-nist-p192",
   4996		.test = alg_test_akcipher,
   4997		.suite = {
   4998			.akcipher = __VECS(ecdsa_nist_p192_tv_template)
   4999		}
   5000	}, {
   5001		.alg = "ecdsa-nist-p256",
   5002		.test = alg_test_akcipher,
   5003		.suite = {
   5004			.akcipher = __VECS(ecdsa_nist_p256_tv_template)
   5005		}
   5006	}, {
   5007		.alg = "ecdsa-nist-p384",
   5008		.test = alg_test_akcipher,
   5009		.suite = {
   5010			.akcipher = __VECS(ecdsa_nist_p384_tv_template)
   5011		}
   5012	}, {
   5013		.alg = "ecrdsa",
   5014		.test = alg_test_akcipher,
   5015		.suite = {
   5016			.akcipher = __VECS(ecrdsa_tv_template)
   5017		}
   5018	}, {
   5019		.alg = "essiv(authenc(hmac(sha256),cbc(aes)),sha256)",
   5020		.test = alg_test_aead,
   5021		.fips_allowed = 1,
   5022		.suite = {
   5023			.aead = __VECS(essiv_hmac_sha256_aes_cbc_tv_temp)
   5024		}
   5025	}, {
   5026		.alg = "essiv(cbc(aes),sha256)",
   5027		.test = alg_test_skcipher,
   5028		.fips_allowed = 1,
   5029		.suite = {
   5030			.cipher = __VECS(essiv_aes_cbc_tv_template)
   5031		}
   5032	}, {
   5033#if IS_ENABLED(CONFIG_CRYPTO_DH_RFC7919_GROUPS)
   5034		.alg = "ffdhe2048(dh)",
   5035		.test = alg_test_kpp,
   5036		.fips_allowed = 1,
   5037		.suite = {
   5038			.kpp = __VECS(ffdhe2048_dh_tv_template)
   5039		}
   5040	}, {
   5041		.alg = "ffdhe3072(dh)",
   5042		.test = alg_test_kpp,
   5043		.fips_allowed = 1,
   5044		.suite = {
   5045			.kpp = __VECS(ffdhe3072_dh_tv_template)
   5046		}
   5047	}, {
   5048		.alg = "ffdhe4096(dh)",
   5049		.test = alg_test_kpp,
   5050		.fips_allowed = 1,
   5051		.suite = {
   5052			.kpp = __VECS(ffdhe4096_dh_tv_template)
   5053		}
   5054	}, {
   5055		.alg = "ffdhe6144(dh)",
   5056		.test = alg_test_kpp,
   5057		.fips_allowed = 1,
   5058		.suite = {
   5059			.kpp = __VECS(ffdhe6144_dh_tv_template)
   5060		}
   5061	}, {
   5062		.alg = "ffdhe8192(dh)",
   5063		.test = alg_test_kpp,
   5064		.fips_allowed = 1,
   5065		.suite = {
   5066			.kpp = __VECS(ffdhe8192_dh_tv_template)
   5067		}
   5068	}, {
   5069#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
   5070		.alg = "gcm(aes)",
   5071		.generic_driver = "gcm_base(ctr(aes-generic),ghash-generic)",
   5072		.test = alg_test_aead,
   5073		.fips_allowed = 1,
   5074		.suite = {
   5075			.aead = __VECS(aes_gcm_tv_template)
   5076		}
   5077	}, {
   5078		.alg = "gcm(sm4)",
   5079		.generic_driver = "gcm_base(ctr(sm4-generic),ghash-generic)",
   5080		.test = alg_test_aead,
   5081		.suite = {
   5082			.aead = __VECS(sm4_gcm_tv_template)
   5083		}
   5084	}, {
   5085		.alg = "ghash",
   5086		.test = alg_test_hash,
   5087		.fips_allowed = 1,
   5088		.suite = {
   5089			.hash = __VECS(ghash_tv_template)
   5090		}
   5091	}, {
   5092		.alg = "hmac(md5)",
   5093		.test = alg_test_hash,
   5094		.suite = {
   5095			.hash = __VECS(hmac_md5_tv_template)
   5096		}
   5097	}, {
   5098		.alg = "hmac(rmd160)",
   5099		.test = alg_test_hash,
   5100		.suite = {
   5101			.hash = __VECS(hmac_rmd160_tv_template)
   5102		}
   5103	}, {
   5104		.alg = "hmac(sha1)",
   5105		.test = alg_test_hash,
   5106		.fips_allowed = 1,
   5107		.suite = {
   5108			.hash = __VECS(hmac_sha1_tv_template)
   5109		}
   5110	}, {
   5111		.alg = "hmac(sha224)",
   5112		.test = alg_test_hash,
   5113		.fips_allowed = 1,
   5114		.suite = {
   5115			.hash = __VECS(hmac_sha224_tv_template)
   5116		}
   5117	}, {
   5118		.alg = "hmac(sha256)",
   5119		.test = alg_test_hash,
   5120		.fips_allowed = 1,
   5121		.suite = {
   5122			.hash = __VECS(hmac_sha256_tv_template)
   5123		}
   5124	}, {
   5125		.alg = "hmac(sha3-224)",
   5126		.test = alg_test_hash,
   5127		.fips_allowed = 1,
   5128		.suite = {
   5129			.hash = __VECS(hmac_sha3_224_tv_template)
   5130		}
   5131	}, {
   5132		.alg = "hmac(sha3-256)",
   5133		.test = alg_test_hash,
   5134		.fips_allowed = 1,
   5135		.suite = {
   5136			.hash = __VECS(hmac_sha3_256_tv_template)
   5137		}
   5138	}, {
   5139		.alg = "hmac(sha3-384)",
   5140		.test = alg_test_hash,
   5141		.fips_allowed = 1,
   5142		.suite = {
   5143			.hash = __VECS(hmac_sha3_384_tv_template)
   5144		}
   5145	}, {
   5146		.alg = "hmac(sha3-512)",
   5147		.test = alg_test_hash,
   5148		.fips_allowed = 1,
   5149		.suite = {
   5150			.hash = __VECS(hmac_sha3_512_tv_template)
   5151		}
   5152	}, {
   5153		.alg = "hmac(sha384)",
   5154		.test = alg_test_hash,
   5155		.fips_allowed = 1,
   5156		.suite = {
   5157			.hash = __VECS(hmac_sha384_tv_template)
   5158		}
   5159	}, {
   5160		.alg = "hmac(sha512)",
   5161		.test = alg_test_hash,
   5162		.fips_allowed = 1,
   5163		.suite = {
   5164			.hash = __VECS(hmac_sha512_tv_template)
   5165		}
   5166	}, {
   5167		.alg = "hmac(sm3)",
   5168		.test = alg_test_hash,
   5169		.suite = {
   5170			.hash = __VECS(hmac_sm3_tv_template)
   5171		}
   5172	}, {
   5173		.alg = "hmac(streebog256)",
   5174		.test = alg_test_hash,
   5175		.suite = {
   5176			.hash = __VECS(hmac_streebog256_tv_template)
   5177		}
   5178	}, {
   5179		.alg = "hmac(streebog512)",
   5180		.test = alg_test_hash,
   5181		.suite = {
   5182			.hash = __VECS(hmac_streebog512_tv_template)
   5183		}
   5184	}, {
   5185		.alg = "jitterentropy_rng",
   5186		.fips_allowed = 1,
   5187		.test = alg_test_null,
   5188	}, {
   5189		.alg = "kw(aes)",
   5190		.test = alg_test_skcipher,
   5191		.fips_allowed = 1,
   5192		.suite = {
   5193			.cipher = __VECS(aes_kw_tv_template)
   5194		}
   5195	}, {
   5196		.alg = "lrw(aes)",
   5197		.generic_driver = "lrw(ecb(aes-generic))",
   5198		.test = alg_test_skcipher,
   5199		.suite = {
   5200			.cipher = __VECS(aes_lrw_tv_template)
   5201		}
   5202	}, {
   5203		.alg = "lrw(camellia)",
   5204		.generic_driver = "lrw(ecb(camellia-generic))",
   5205		.test = alg_test_skcipher,
   5206		.suite = {
   5207			.cipher = __VECS(camellia_lrw_tv_template)
   5208		}
   5209	}, {
   5210		.alg = "lrw(cast6)",
   5211		.generic_driver = "lrw(ecb(cast6-generic))",
   5212		.test = alg_test_skcipher,
   5213		.suite = {
   5214			.cipher = __VECS(cast6_lrw_tv_template)
   5215		}
   5216	}, {
   5217		.alg = "lrw(serpent)",
   5218		.generic_driver = "lrw(ecb(serpent-generic))",
   5219		.test = alg_test_skcipher,
   5220		.suite = {
   5221			.cipher = __VECS(serpent_lrw_tv_template)
   5222		}
   5223	}, {
   5224		.alg = "lrw(twofish)",
   5225		.generic_driver = "lrw(ecb(twofish-generic))",
   5226		.test = alg_test_skcipher,
   5227		.suite = {
   5228			.cipher = __VECS(tf_lrw_tv_template)
   5229		}
   5230	}, {
   5231		.alg = "lz4",
   5232		.test = alg_test_comp,
   5233		.fips_allowed = 1,
   5234		.suite = {
   5235			.comp = {
   5236				.comp = __VECS(lz4_comp_tv_template),
   5237				.decomp = __VECS(lz4_decomp_tv_template)
   5238			}
   5239		}
   5240	}, {
   5241		.alg = "lz4hc",
   5242		.test = alg_test_comp,
   5243		.fips_allowed = 1,
   5244		.suite = {
   5245			.comp = {
   5246				.comp = __VECS(lz4hc_comp_tv_template),
   5247				.decomp = __VECS(lz4hc_decomp_tv_template)
   5248			}
   5249		}
   5250	}, {
   5251		.alg = "lzo",
   5252		.test = alg_test_comp,
   5253		.fips_allowed = 1,
   5254		.suite = {
   5255			.comp = {
   5256				.comp = __VECS(lzo_comp_tv_template),
   5257				.decomp = __VECS(lzo_decomp_tv_template)
   5258			}
   5259		}
   5260	}, {
   5261		.alg = "lzo-rle",
   5262		.test = alg_test_comp,
   5263		.fips_allowed = 1,
   5264		.suite = {
   5265			.comp = {
   5266				.comp = __VECS(lzorle_comp_tv_template),
   5267				.decomp = __VECS(lzorle_decomp_tv_template)
   5268			}
   5269		}
   5270	}, {
   5271		.alg = "md4",
   5272		.test = alg_test_hash,
   5273		.suite = {
   5274			.hash = __VECS(md4_tv_template)
   5275		}
   5276	}, {
   5277		.alg = "md5",
   5278		.test = alg_test_hash,
   5279		.suite = {
   5280			.hash = __VECS(md5_tv_template)
   5281		}
   5282	}, {
   5283		.alg = "michael_mic",
   5284		.test = alg_test_hash,
   5285		.suite = {
   5286			.hash = __VECS(michael_mic_tv_template)
   5287		}
   5288	}, {
   5289		.alg = "nhpoly1305",
   5290		.test = alg_test_hash,
   5291		.suite = {
   5292			.hash = __VECS(nhpoly1305_tv_template)
   5293		}
   5294	}, {
   5295		.alg = "ofb(aes)",
   5296		.test = alg_test_skcipher,
   5297		.fips_allowed = 1,
   5298		.suite = {
   5299			.cipher = __VECS(aes_ofb_tv_template)
   5300		}
   5301	}, {
   5302		/* Same as ofb(aes) except the key is stored in
   5303		 * hardware secure memory which we reference by index
   5304		 */
   5305		.alg = "ofb(paes)",
   5306		.test = alg_test_null,
   5307		.fips_allowed = 1,
   5308	}, {
   5309		.alg = "ofb(sm4)",
   5310		.test = alg_test_skcipher,
   5311		.suite = {
   5312			.cipher = __VECS(sm4_ofb_tv_template)
   5313		}
   5314	}, {
   5315		.alg = "pcbc(fcrypt)",
   5316		.test = alg_test_skcipher,
   5317		.suite = {
   5318			.cipher = __VECS(fcrypt_pcbc_tv_template)
   5319		}
   5320	}, {
   5321		.alg = "pkcs1pad(rsa,sha224)",
   5322		.test = alg_test_null,
   5323		.fips_allowed = 1,
   5324	}, {
   5325		.alg = "pkcs1pad(rsa,sha256)",
   5326		.test = alg_test_akcipher,
   5327		.fips_allowed = 1,
   5328		.suite = {
   5329			.akcipher = __VECS(pkcs1pad_rsa_tv_template)
   5330		}
   5331	}, {
   5332		.alg = "pkcs1pad(rsa,sha384)",
   5333		.test = alg_test_null,
   5334		.fips_allowed = 1,
   5335	}, {
   5336		.alg = "pkcs1pad(rsa,sha512)",
   5337		.test = alg_test_null,
   5338		.fips_allowed = 1,
   5339	}, {
   5340		.alg = "poly1305",
   5341		.test = alg_test_hash,
   5342		.suite = {
   5343			.hash = __VECS(poly1305_tv_template)
   5344		}
   5345	}, {
   5346		.alg = "rfc3686(ctr(aes))",
   5347		.test = alg_test_skcipher,
   5348		.fips_allowed = 1,
   5349		.suite = {
   5350			.cipher = __VECS(aes_ctr_rfc3686_tv_template)
   5351		}
   5352	}, {
   5353		.alg = "rfc3686(ctr(sm4))",
   5354		.test = alg_test_skcipher,
   5355		.suite = {
   5356			.cipher = __VECS(sm4_ctr_rfc3686_tv_template)
   5357		}
   5358	}, {
   5359		.alg = "rfc4106(gcm(aes))",
   5360		.generic_driver = "rfc4106(gcm_base(ctr(aes-generic),ghash-generic))",
   5361		.test = alg_test_aead,
   5362		.fips_allowed = 1,
   5363		.suite = {
   5364			.aead = {
   5365				____VECS(aes_gcm_rfc4106_tv_template),
   5366				.einval_allowed = 1,
   5367				.aad_iv = 1,
   5368			}
   5369		}
   5370	}, {
   5371		.alg = "rfc4309(ccm(aes))",
   5372		.generic_driver = "rfc4309(ccm_base(ctr(aes-generic),cbcmac(aes-generic)))",
   5373		.test = alg_test_aead,
   5374		.fips_allowed = 1,
   5375		.suite = {
   5376			.aead = {
   5377				____VECS(aes_ccm_rfc4309_tv_template),
   5378				.einval_allowed = 1,
   5379				.aad_iv = 1,
   5380			}
   5381		}
   5382	}, {
   5383		.alg = "rfc4543(gcm(aes))",
   5384		.generic_driver = "rfc4543(gcm_base(ctr(aes-generic),ghash-generic))",
   5385		.test = alg_test_aead,
   5386		.suite = {
   5387			.aead = {
   5388				____VECS(aes_gcm_rfc4543_tv_template),
   5389				.einval_allowed = 1,
   5390				.aad_iv = 1,
   5391			}
   5392		}
   5393	}, {
   5394		.alg = "rfc7539(chacha20,poly1305)",
   5395		.test = alg_test_aead,
   5396		.suite = {
   5397			.aead = __VECS(rfc7539_tv_template)
   5398		}
   5399	}, {
   5400		.alg = "rfc7539esp(chacha20,poly1305)",
   5401		.test = alg_test_aead,
   5402		.suite = {
   5403			.aead = {
   5404				____VECS(rfc7539esp_tv_template),
   5405				.einval_allowed = 1,
   5406				.aad_iv = 1,
   5407			}
   5408		}
   5409	}, {
   5410		.alg = "rmd160",
   5411		.test = alg_test_hash,
   5412		.suite = {
   5413			.hash = __VECS(rmd160_tv_template)
   5414		}
   5415	}, {
   5416		.alg = "rsa",
   5417		.test = alg_test_akcipher,
   5418		.fips_allowed = 1,
   5419		.suite = {
   5420			.akcipher = __VECS(rsa_tv_template)
   5421		}
   5422	}, {
   5423		.alg = "sha1",
   5424		.test = alg_test_hash,
   5425		.fips_allowed = 1,
   5426		.suite = {
   5427			.hash = __VECS(sha1_tv_template)
   5428		}
   5429	}, {
   5430		.alg = "sha224",
   5431		.test = alg_test_hash,
   5432		.fips_allowed = 1,
   5433		.suite = {
   5434			.hash = __VECS(sha224_tv_template)
   5435		}
   5436	}, {
   5437		.alg = "sha256",
   5438		.test = alg_test_hash,
   5439		.fips_allowed = 1,
   5440		.suite = {
   5441			.hash = __VECS(sha256_tv_template)
   5442		}
   5443	}, {
   5444		.alg = "sha3-224",
   5445		.test = alg_test_hash,
   5446		.fips_allowed = 1,
   5447		.suite = {
   5448			.hash = __VECS(sha3_224_tv_template)
   5449		}
   5450	}, {
   5451		.alg = "sha3-256",
   5452		.test = alg_test_hash,
   5453		.fips_allowed = 1,
   5454		.suite = {
   5455			.hash = __VECS(sha3_256_tv_template)
   5456		}
   5457	}, {
   5458		.alg = "sha3-384",
   5459		.test = alg_test_hash,
   5460		.fips_allowed = 1,
   5461		.suite = {
   5462			.hash = __VECS(sha3_384_tv_template)
   5463		}
   5464	}, {
   5465		.alg = "sha3-512",
   5466		.test = alg_test_hash,
   5467		.fips_allowed = 1,
   5468		.suite = {
   5469			.hash = __VECS(sha3_512_tv_template)
   5470		}
   5471	}, {
   5472		.alg = "sha384",
   5473		.test = alg_test_hash,
   5474		.fips_allowed = 1,
   5475		.suite = {
   5476			.hash = __VECS(sha384_tv_template)
   5477		}
   5478	}, {
   5479		.alg = "sha512",
   5480		.test = alg_test_hash,
   5481		.fips_allowed = 1,
   5482		.suite = {
   5483			.hash = __VECS(sha512_tv_template)
   5484		}
   5485	}, {
   5486		.alg = "sm2",
   5487		.test = alg_test_akcipher,
   5488		.suite = {
   5489			.akcipher = __VECS(sm2_tv_template)
   5490		}
   5491	}, {
   5492		.alg = "sm3",
   5493		.test = alg_test_hash,
   5494		.suite = {
   5495			.hash = __VECS(sm3_tv_template)
   5496		}
   5497	}, {
   5498		.alg = "streebog256",
   5499		.test = alg_test_hash,
   5500		.suite = {
   5501			.hash = __VECS(streebog256_tv_template)
   5502		}
   5503	}, {
   5504		.alg = "streebog512",
   5505		.test = alg_test_hash,
   5506		.suite = {
   5507			.hash = __VECS(streebog512_tv_template)
   5508		}
   5509	}, {
   5510		.alg = "vmac64(aes)",
   5511		.test = alg_test_hash,
   5512		.suite = {
   5513			.hash = __VECS(vmac64_aes_tv_template)
   5514		}
   5515	}, {
   5516		.alg = "wp256",
   5517		.test = alg_test_hash,
   5518		.suite = {
   5519			.hash = __VECS(wp256_tv_template)
   5520		}
   5521	}, {
   5522		.alg = "wp384",
   5523		.test = alg_test_hash,
   5524		.suite = {
   5525			.hash = __VECS(wp384_tv_template)
   5526		}
   5527	}, {
   5528		.alg = "wp512",
   5529		.test = alg_test_hash,
   5530		.suite = {
   5531			.hash = __VECS(wp512_tv_template)
   5532		}
   5533	}, {
   5534		.alg = "xcbc(aes)",
   5535		.test = alg_test_hash,
   5536		.suite = {
   5537			.hash = __VECS(aes_xcbc128_tv_template)
   5538		}
   5539	}, {
   5540		.alg = "xchacha12",
   5541		.test = alg_test_skcipher,
   5542		.suite = {
   5543			.cipher = __VECS(xchacha12_tv_template)
   5544		},
   5545	}, {
   5546		.alg = "xchacha20",
   5547		.test = alg_test_skcipher,
   5548		.suite = {
   5549			.cipher = __VECS(xchacha20_tv_template)
   5550		},
   5551	}, {
   5552		.alg = "xts(aes)",
   5553		.generic_driver = "xts(ecb(aes-generic))",
   5554		.test = alg_test_skcipher,
   5555		.fips_allowed = 1,
   5556		.suite = {
   5557			.cipher = __VECS(aes_xts_tv_template)
   5558		}
   5559	}, {
   5560		.alg = "xts(camellia)",
   5561		.generic_driver = "xts(ecb(camellia-generic))",
   5562		.test = alg_test_skcipher,
   5563		.suite = {
   5564			.cipher = __VECS(camellia_xts_tv_template)
   5565		}
   5566	}, {
   5567		.alg = "xts(cast6)",
   5568		.generic_driver = "xts(ecb(cast6-generic))",
   5569		.test = alg_test_skcipher,
   5570		.suite = {
   5571			.cipher = __VECS(cast6_xts_tv_template)
   5572		}
   5573	}, {
   5574		/* Same as xts(aes) except the key is stored in
   5575		 * hardware secure memory which we reference by index
   5576		 */
   5577		.alg = "xts(paes)",
   5578		.test = alg_test_null,
   5579		.fips_allowed = 1,
   5580	}, {
   5581		.alg = "xts(serpent)",
   5582		.generic_driver = "xts(ecb(serpent-generic))",
   5583		.test = alg_test_skcipher,
   5584		.suite = {
   5585			.cipher = __VECS(serpent_xts_tv_template)
   5586		}
   5587	}, {
   5588		.alg = "xts(twofish)",
   5589		.generic_driver = "xts(ecb(twofish-generic))",
   5590		.test = alg_test_skcipher,
   5591		.suite = {
   5592			.cipher = __VECS(tf_xts_tv_template)
   5593		}
   5594	}, {
   5595#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
   5596		.alg = "xts-paes-s390",
   5597		.fips_allowed = 1,
   5598		.test = alg_test_skcipher,
   5599		.suite = {
   5600			.cipher = __VECS(aes_xts_tv_template)
   5601		}
   5602	}, {
   5603#endif
   5604		.alg = "xts4096(paes)",
   5605		.test = alg_test_null,
   5606		.fips_allowed = 1,
   5607	}, {
   5608		.alg = "xts512(paes)",
   5609		.test = alg_test_null,
   5610		.fips_allowed = 1,
   5611	}, {
   5612		.alg = "xxhash64",
   5613		.test = alg_test_hash,
   5614		.fips_allowed = 1,
   5615		.suite = {
   5616			.hash = __VECS(xxhash64_tv_template)
   5617		}
   5618	}, {
   5619		.alg = "zlib-deflate",
   5620		.test = alg_test_comp,
   5621		.fips_allowed = 1,
   5622		.suite = {
   5623			.comp = {
   5624				.comp = __VECS(zlib_deflate_comp_tv_template),
   5625				.decomp = __VECS(zlib_deflate_decomp_tv_template)
   5626			}
   5627		}
   5628	}, {
   5629		.alg = "zstd",
   5630		.test = alg_test_comp,
   5631		.fips_allowed = 1,
   5632		.suite = {
   5633			.comp = {
   5634				.comp = __VECS(zstd_comp_tv_template),
   5635				.decomp = __VECS(zstd_decomp_tv_template)
   5636			}
   5637		}
   5638	}
   5639};
   5640
   5641static void alg_check_test_descs_order(void)
   5642{
   5643	int i;
   5644
   5645	for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) {
   5646		int diff = strcmp(alg_test_descs[i - 1].alg,
   5647				  alg_test_descs[i].alg);
   5648
   5649		if (WARN_ON(diff > 0)) {
   5650			pr_warn("testmgr: alg_test_descs entries in wrong order: '%s' before '%s'\n",
   5651				alg_test_descs[i - 1].alg,
   5652				alg_test_descs[i].alg);
   5653		}
   5654
   5655		if (WARN_ON(diff == 0)) {
   5656			pr_warn("testmgr: duplicate alg_test_descs entry: '%s'\n",
   5657				alg_test_descs[i].alg);
   5658		}
   5659	}
   5660}
   5661
   5662static void alg_check_testvec_configs(void)
   5663{
   5664	int i;
   5665
   5666	for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++)
   5667		WARN_ON(!valid_testvec_config(
   5668				&default_cipher_testvec_configs[i]));
   5669
   5670	for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++)
   5671		WARN_ON(!valid_testvec_config(
   5672				&default_hash_testvec_configs[i]));
   5673}
   5674
   5675static void testmgr_onetime_init(void)
   5676{
   5677	alg_check_test_descs_order();
   5678	alg_check_testvec_configs();
   5679
   5680#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
   5681	pr_warn("alg: extra crypto tests enabled.  This is intended for developer use only.\n");
   5682#endif
   5683}
   5684
   5685static int alg_find_test(const char *alg)
   5686{
   5687	int start = 0;
   5688	int end = ARRAY_SIZE(alg_test_descs);
   5689
   5690	while (start < end) {
   5691		int i = (start + end) / 2;
   5692		int diff = strcmp(alg_test_descs[i].alg, alg);
   5693
   5694		if (diff > 0) {
   5695			end = i;
   5696			continue;
   5697		}
   5698
   5699		if (diff < 0) {
   5700			start = i + 1;
   5701			continue;
   5702		}
   5703
   5704		return i;
   5705	}
   5706
   5707	return -1;
   5708}
   5709
   5710static int alg_fips_disabled(const char *driver, const char *alg)
   5711{
   5712	pr_info("alg: %s (%s) is disabled due to FIPS\n", alg, driver);
   5713
   5714	return -ECANCELED;
   5715}
   5716
   5717int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
   5718{
   5719	int i;
   5720	int j;
   5721	int rc;
   5722
   5723	if (!fips_enabled && notests) {
   5724		printk_once(KERN_INFO "alg: self-tests disabled\n");
   5725		return 0;
   5726	}
   5727
   5728	DO_ONCE(testmgr_onetime_init);
   5729
   5730	if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
   5731		char nalg[CRYPTO_MAX_ALG_NAME];
   5732
   5733		if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
   5734		    sizeof(nalg))
   5735			return -ENAMETOOLONG;
   5736
   5737		i = alg_find_test(nalg);
   5738		if (i < 0)
   5739			goto notest;
   5740
   5741		if (fips_enabled && !alg_test_descs[i].fips_allowed)
   5742			goto non_fips_alg;
   5743
   5744		rc = alg_test_cipher(alg_test_descs + i, driver, type, mask);
   5745		goto test_done;
   5746	}
   5747
   5748	i = alg_find_test(alg);
   5749	j = alg_find_test(driver);
   5750	if (i < 0 && j < 0)
   5751		goto notest;
   5752
   5753	if (fips_enabled) {
   5754		if (j >= 0 && !alg_test_descs[j].fips_allowed)
   5755			return -EINVAL;
   5756
   5757		if (i >= 0 && !alg_test_descs[i].fips_allowed)
   5758			goto non_fips_alg;
   5759	}
   5760
   5761	rc = 0;
   5762	if (i >= 0)
   5763		rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
   5764					     type, mask);
   5765	if (j >= 0 && j != i)
   5766		rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
   5767					     type, mask);
   5768
   5769test_done:
   5770	if (rc) {
   5771		if (fips_enabled || panic_on_fail) {
   5772			fips_fail_notify();
   5773			panic("alg: self-tests for %s (%s) failed in %s mode!\n",
   5774			      driver, alg,
   5775			      fips_enabled ? "fips" : "panic_on_fail");
   5776		}
   5777		WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)",
   5778		     driver, alg, rc);
   5779	} else {
   5780		if (fips_enabled)
   5781			pr_info("alg: self-tests for %s (%s) passed\n",
   5782				driver, alg);
   5783	}
   5784
   5785	return rc;
   5786
   5787notest:
   5788	printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
   5789
   5790	if (type & CRYPTO_ALG_FIPS_INTERNAL)
   5791		return alg_fips_disabled(driver, alg);
   5792
   5793	return 0;
   5794non_fips_alg:
   5795	return alg_fips_disabled(driver, alg);
   5796}
   5797
   5798#endif /* CONFIG_CRYPTO_MANAGER_DISABLE_TESTS */
   5799
   5800EXPORT_SYMBOL_GPL(alg_test);