cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

otx_cptvf_algs.c (45863B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Marvell OcteonTX CPT driver
      3 *
      4 * Copyright (C) 2019 Marvell International Ltd.
      5 *
      6 * This program is free software; you can redistribute it and/or modify
      7 * it under the terms of the GNU General Public License version 2 as
      8 * published by the Free Software Foundation.
      9 */
     10
     11#include <crypto/aes.h>
     12#include <crypto/authenc.h>
     13#include <crypto/cryptd.h>
     14#include <crypto/des.h>
     15#include <crypto/internal/aead.h>
     16#include <crypto/sha1.h>
     17#include <crypto/sha2.h>
     18#include <crypto/xts.h>
     19#include <crypto/scatterwalk.h>
     20#include <linux/rtnetlink.h>
     21#include <linux/sort.h>
     22#include <linux/module.h>
     23#include "otx_cptvf.h"
     24#include "otx_cptvf_algs.h"
     25#include "otx_cptvf_reqmgr.h"
     26
     27#define CPT_MAX_VF_NUM	64
     28/* Size of salt in AES GCM mode */
     29#define AES_GCM_SALT_SIZE	4
     30/* Size of IV in AES GCM mode */
     31#define AES_GCM_IV_SIZE		8
     32/* Size of ICV (Integrity Check Value) in AES GCM mode */
     33#define AES_GCM_ICV_SIZE	16
     34/* Offset of IV in AES GCM mode */
     35#define AES_GCM_IV_OFFSET	8
     36#define CONTROL_WORD_LEN	8
     37#define KEY2_OFFSET		48
     38#define DMA_MODE_FLAG(dma_mode) \
     39	(((dma_mode) == OTX_CPT_DMA_GATHER_SCATTER) ? (1 << 7) : 0)
     40
     41/* Truncated SHA digest size */
     42#define SHA1_TRUNC_DIGEST_SIZE		12
     43#define SHA256_TRUNC_DIGEST_SIZE	16
     44#define SHA384_TRUNC_DIGEST_SIZE	24
     45#define SHA512_TRUNC_DIGEST_SIZE	32
     46
     47static DEFINE_MUTEX(mutex);
     48static int is_crypto_registered;
     49
     50struct cpt_device_desc {
     51	enum otx_cptpf_type pf_type;
     52	struct pci_dev *dev;
     53	int num_queues;
     54};
     55
     56struct cpt_device_table {
     57	atomic_t count;
     58	struct cpt_device_desc desc[CPT_MAX_VF_NUM];
     59};
     60
     61static struct cpt_device_table se_devices = {
     62	.count = ATOMIC_INIT(0)
     63};
     64
     65static struct cpt_device_table ae_devices = {
     66	.count = ATOMIC_INIT(0)
     67};
     68
     69static inline int get_se_device(struct pci_dev **pdev, int *cpu_num)
     70{
     71	int count, ret = 0;
     72
     73	count = atomic_read(&se_devices.count);
     74	if (count < 1)
     75		return -ENODEV;
     76
     77	*cpu_num = get_cpu();
     78
     79	if (se_devices.desc[0].pf_type == OTX_CPT_SE) {
     80		/*
     81		 * On OcteonTX platform there is one CPT instruction queue bound
     82		 * to each VF. We get maximum performance if one CPT queue
     83		 * is available for each cpu otherwise CPT queues need to be
     84		 * shared between cpus.
     85		 */
     86		if (*cpu_num >= count)
     87			*cpu_num %= count;
     88		*pdev = se_devices.desc[*cpu_num].dev;
     89	} else {
     90		pr_err("Unknown PF type %d\n", se_devices.desc[0].pf_type);
     91		ret = -EINVAL;
     92	}
     93	put_cpu();
     94
     95	return ret;
     96}
     97
     98static inline int validate_hmac_cipher_null(struct otx_cpt_req_info *cpt_req)
     99{
    100	struct otx_cpt_req_ctx *rctx;
    101	struct aead_request *req;
    102	struct crypto_aead *tfm;
    103
    104	req = container_of(cpt_req->areq, struct aead_request, base);
    105	tfm = crypto_aead_reqtfm(req);
    106	rctx = aead_request_ctx(req);
    107	if (memcmp(rctx->fctx.hmac.s.hmac_calc,
    108		   rctx->fctx.hmac.s.hmac_recv,
    109		   crypto_aead_authsize(tfm)) != 0)
    110		return -EBADMSG;
    111
    112	return 0;
    113}
    114
    115static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
    116{
    117	struct otx_cpt_info_buffer *cpt_info = arg2;
    118	struct crypto_async_request *areq = arg1;
    119	struct otx_cpt_req_info *cpt_req;
    120	struct pci_dev *pdev;
    121
    122	if (!cpt_info)
    123		goto complete;
    124
    125	cpt_req = cpt_info->req;
    126	if (!status) {
    127		/*
    128		 * When selected cipher is NULL we need to manually
    129		 * verify whether calculated hmac value matches
    130		 * received hmac value
    131		 */
    132		if (cpt_req->req_type == OTX_CPT_AEAD_ENC_DEC_NULL_REQ &&
    133		    !cpt_req->is_enc)
    134			status = validate_hmac_cipher_null(cpt_req);
    135	}
    136	pdev = cpt_info->pdev;
    137	do_request_cleanup(pdev, cpt_info);
    138
    139complete:
    140	if (areq)
    141		areq->complete(areq, status);
    142}
    143
    144static void output_iv_copyback(struct crypto_async_request *areq)
    145{
    146	struct otx_cpt_req_info *req_info;
    147	struct skcipher_request *sreq;
    148	struct crypto_skcipher *stfm;
    149	struct otx_cpt_req_ctx *rctx;
    150	struct otx_cpt_enc_ctx *ctx;
    151	u32 start, ivsize;
    152
    153	sreq = container_of(areq, struct skcipher_request, base);
    154	stfm = crypto_skcipher_reqtfm(sreq);
    155	ctx = crypto_skcipher_ctx(stfm);
    156	if (ctx->cipher_type == OTX_CPT_AES_CBC ||
    157	    ctx->cipher_type == OTX_CPT_DES3_CBC) {
    158		rctx = skcipher_request_ctx(sreq);
    159		req_info = &rctx->cpt_req;
    160		ivsize = crypto_skcipher_ivsize(stfm);
    161		start = sreq->cryptlen - ivsize;
    162
    163		if (req_info->is_enc) {
    164			scatterwalk_map_and_copy(sreq->iv, sreq->dst, start,
    165						 ivsize, 0);
    166		} else {
    167			if (sreq->src != sreq->dst) {
    168				scatterwalk_map_and_copy(sreq->iv, sreq->src,
    169							 start, ivsize, 0);
    170			} else {
    171				memcpy(sreq->iv, req_info->iv_out, ivsize);
    172				kfree(req_info->iv_out);
    173			}
    174		}
    175	}
    176}
    177
    178static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
    179{
    180	struct otx_cpt_info_buffer *cpt_info = arg2;
    181	struct crypto_async_request *areq = arg1;
    182	struct pci_dev *pdev;
    183
    184	if (areq) {
    185		if (!status)
    186			output_iv_copyback(areq);
    187		if (cpt_info) {
    188			pdev = cpt_info->pdev;
    189			do_request_cleanup(pdev, cpt_info);
    190		}
    191		areq->complete(areq, status);
    192	}
    193}
    194
    195static inline void update_input_data(struct otx_cpt_req_info *req_info,
    196				     struct scatterlist *inp_sg,
    197				     u32 nbytes, u32 *argcnt)
    198{
    199	req_info->req.dlen += nbytes;
    200
    201	while (nbytes) {
    202		u32 len = min(nbytes, inp_sg->length);
    203		u8 *ptr = sg_virt(inp_sg);
    204
    205		req_info->in[*argcnt].vptr = (void *)ptr;
    206		req_info->in[*argcnt].size = len;
    207		nbytes -= len;
    208		++(*argcnt);
    209		inp_sg = sg_next(inp_sg);
    210	}
    211}
    212
    213static inline void update_output_data(struct otx_cpt_req_info *req_info,
    214				      struct scatterlist *outp_sg,
    215				      u32 offset, u32 nbytes, u32 *argcnt)
    216{
    217	req_info->rlen += nbytes;
    218
    219	while (nbytes) {
    220		u32 len = min(nbytes, outp_sg->length - offset);
    221		u8 *ptr = sg_virt(outp_sg);
    222
    223		req_info->out[*argcnt].vptr = (void *) (ptr + offset);
    224		req_info->out[*argcnt].size = len;
    225		nbytes -= len;
    226		++(*argcnt);
    227		offset = 0;
    228		outp_sg = sg_next(outp_sg);
    229	}
    230}
    231
    232static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
    233				 u32 *argcnt)
    234{
    235	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
    236	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
    237	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
    238	struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
    239	struct otx_cpt_enc_ctx *ctx = crypto_tfm_ctx(tfm);
    240	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
    241	int ivsize = crypto_skcipher_ivsize(stfm);
    242	u32 start = req->cryptlen - ivsize;
    243	gfp_t flags;
    244
    245	flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
    246			GFP_KERNEL : GFP_ATOMIC;
    247	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
    248	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
    249
    250	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
    251				DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
    252	if (enc) {
    253		req_info->req.opcode.s.minor = 2;
    254	} else {
    255		req_info->req.opcode.s.minor = 3;
    256		if ((ctx->cipher_type == OTX_CPT_AES_CBC ||
    257		    ctx->cipher_type == OTX_CPT_DES3_CBC) &&
    258		    req->src == req->dst) {
    259			req_info->iv_out = kmalloc(ivsize, flags);
    260			if (!req_info->iv_out)
    261				return -ENOMEM;
    262
    263			scatterwalk_map_and_copy(req_info->iv_out, req->src,
    264						 start, ivsize, 0);
    265		}
    266	}
    267	/* Encryption data length */
    268	req_info->req.param1 = req->cryptlen;
    269	/* Authentication data length */
    270	req_info->req.param2 = 0;
    271
    272	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
    273	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
    274	fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
    275
    276	if (ctx->cipher_type == OTX_CPT_AES_XTS)
    277		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
    278	else
    279		memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
    280
    281	memcpy(fctx->enc.encr_iv, req->iv, crypto_skcipher_ivsize(stfm));
    282
    283	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
    284
    285	/*
    286	 * Storing  Packet Data Information in offset
    287	 * Control Word First 8 bytes
    288	 */
    289	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
    290	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
    291	req_info->req.dlen += CONTROL_WORD_LEN;
    292	++(*argcnt);
    293
    294	req_info->in[*argcnt].vptr = (u8 *)fctx;
    295	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
    296	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
    297
    298	++(*argcnt);
    299
    300	return 0;
    301}
    302
    303static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
    304				    u32 enc_iv_len)
    305{
    306	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
    307	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
    308	u32 argcnt =  0;
    309	int ret;
    310
    311	ret = create_ctx_hdr(req, enc, &argcnt);
    312	if (ret)
    313		return ret;
    314
    315	update_input_data(req_info, req->src, req->cryptlen, &argcnt);
    316	req_info->incnt = argcnt;
    317
    318	return 0;
    319}
    320
    321static inline void create_output_list(struct skcipher_request *req,
    322				      u32 enc_iv_len)
    323{
    324	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
    325	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
    326	u32 argcnt = 0;
    327
    328	/*
    329	 * OUTPUT Buffer Processing
    330	 * AES encryption/decryption output would be
    331	 * received in the following format
    332	 *
    333	 * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
    334	 * [ 16 Bytes/     [   Request Enc/Dec/ DATA Len AES CBC ]
    335	 */
    336	update_output_data(req_info, req->dst, 0, req->cryptlen, &argcnt);
    337	req_info->outcnt = argcnt;
    338}
    339
    340static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
    341{
    342	struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
    343	struct otx_cpt_req_ctx *rctx = skcipher_request_ctx(req);
    344	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
    345	u32 enc_iv_len = crypto_skcipher_ivsize(stfm);
    346	struct pci_dev *pdev;
    347	int status, cpu_num;
    348
    349	/* Validate that request doesn't exceed maximum CPT supported size */
    350	if (req->cryptlen > OTX_CPT_MAX_REQ_SIZE)
    351		return -E2BIG;
    352
    353	/* Clear control words */
    354	rctx->ctrl_word.flags = 0;
    355	rctx->fctx.enc.enc_ctrl.flags = 0;
    356
    357	status = create_input_list(req, enc, enc_iv_len);
    358	if (status)
    359		return status;
    360	create_output_list(req, enc_iv_len);
    361
    362	status = get_se_device(&pdev, &cpu_num);
    363	if (status)
    364		return status;
    365
    366	req_info->callback = (void *)otx_cpt_skcipher_callback;
    367	req_info->areq = &req->base;
    368	req_info->req_type = OTX_CPT_ENC_DEC_REQ;
    369	req_info->is_enc = enc;
    370	req_info->is_trunc_hmac = false;
    371	req_info->ctrl.s.grp = 0;
    372
    373	/*
    374	 * We perform an asynchronous send and once
    375	 * the request is completed the driver would
    376	 * intimate through registered call back functions
    377	 */
    378	status = otx_cpt_do_request(pdev, req_info, cpu_num);
    379
    380	return status;
    381}
    382
    383static int otx_cpt_skcipher_encrypt(struct skcipher_request *req)
    384{
    385	return cpt_enc_dec(req, true);
    386}
    387
    388static int otx_cpt_skcipher_decrypt(struct skcipher_request *req)
    389{
    390	return cpt_enc_dec(req, false);
    391}
    392
    393static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
    394				       const u8 *key, u32 keylen)
    395{
    396	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
    397	const u8 *key2 = key + (keylen / 2);
    398	const u8 *key1 = key;
    399	int ret;
    400
    401	ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
    402	if (ret)
    403		return ret;
    404	ctx->key_len = keylen;
    405	memcpy(ctx->enc_key, key1, keylen / 2);
    406	memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
    407	ctx->cipher_type = OTX_CPT_AES_XTS;
    408	switch (ctx->key_len) {
    409	case 2 * AES_KEYSIZE_128:
    410		ctx->key_type = OTX_CPT_AES_128_BIT;
    411		break;
    412	case 2 * AES_KEYSIZE_256:
    413		ctx->key_type = OTX_CPT_AES_256_BIT;
    414		break;
    415	default:
    416		return -EINVAL;
    417	}
    418
    419	return 0;
    420}
    421
    422static int cpt_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
    423			  u32 keylen, u8 cipher_type)
    424{
    425	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
    426
    427	if (keylen != DES3_EDE_KEY_SIZE)
    428		return -EINVAL;
    429
    430	ctx->key_len = keylen;
    431	ctx->cipher_type = cipher_type;
    432
    433	memcpy(ctx->enc_key, key, keylen);
    434
    435	return 0;
    436}
    437
    438static int cpt_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
    439			  u32 keylen, u8 cipher_type)
    440{
    441	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
    442
    443	switch (keylen) {
    444	case AES_KEYSIZE_128:
    445		ctx->key_type = OTX_CPT_AES_128_BIT;
    446		break;
    447	case AES_KEYSIZE_192:
    448		ctx->key_type = OTX_CPT_AES_192_BIT;
    449		break;
    450	case AES_KEYSIZE_256:
    451		ctx->key_type = OTX_CPT_AES_256_BIT;
    452		break;
    453	default:
    454		return -EINVAL;
    455	}
    456	ctx->key_len = keylen;
    457	ctx->cipher_type = cipher_type;
    458
    459	memcpy(ctx->enc_key, key, keylen);
    460
    461	return 0;
    462}
    463
    464static int otx_cpt_skcipher_cbc_aes_setkey(struct crypto_skcipher *tfm,
    465					   const u8 *key, u32 keylen)
    466{
    467	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CBC);
    468}
    469
    470static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
    471					   const u8 *key, u32 keylen)
    472{
    473	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
    474}
    475
    476static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
    477					   const u8 *key, u32 keylen)
    478{
    479	return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
    480}
    481
    482static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
    483					    const u8 *key, u32 keylen)
    484{
    485	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_CBC);
    486}
    487
    488static int otx_cpt_skcipher_ecb_des3_setkey(struct crypto_skcipher *tfm,
    489					    const u8 *key, u32 keylen)
    490{
    491	return cpt_des_setkey(tfm, key, keylen, OTX_CPT_DES3_ECB);
    492}
    493
    494static int otx_cpt_enc_dec_init(struct crypto_skcipher *tfm)
    495{
    496	struct otx_cpt_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
    497
    498	memset(ctx, 0, sizeof(*ctx));
    499	/*
    500	 * Additional memory for skcipher_request is
    501	 * allocated since the cryptd daemon uses
    502	 * this memory for request_ctx information
    503	 */
    504	crypto_skcipher_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx) +
    505					sizeof(struct skcipher_request));
    506
    507	return 0;
    508}
    509
    510static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type)
    511{
    512	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
    513
    514	ctx->cipher_type = cipher_type;
    515	ctx->mac_type = mac_type;
    516
    517	/*
    518	 * When selected cipher is NULL we use HMAC opcode instead of
    519	 * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms
    520	 * for calculating ipad and opad
    521	 */
    522	if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) {
    523		switch (ctx->mac_type) {
    524		case OTX_CPT_SHA1:
    525			ctx->hashalg = crypto_alloc_shash("sha1", 0,
    526							  CRYPTO_ALG_ASYNC);
    527			if (IS_ERR(ctx->hashalg))
    528				return PTR_ERR(ctx->hashalg);
    529			break;
    530
    531		case OTX_CPT_SHA256:
    532			ctx->hashalg = crypto_alloc_shash("sha256", 0,
    533							  CRYPTO_ALG_ASYNC);
    534			if (IS_ERR(ctx->hashalg))
    535				return PTR_ERR(ctx->hashalg);
    536			break;
    537
    538		case OTX_CPT_SHA384:
    539			ctx->hashalg = crypto_alloc_shash("sha384", 0,
    540							  CRYPTO_ALG_ASYNC);
    541			if (IS_ERR(ctx->hashalg))
    542				return PTR_ERR(ctx->hashalg);
    543			break;
    544
    545		case OTX_CPT_SHA512:
    546			ctx->hashalg = crypto_alloc_shash("sha512", 0,
    547							  CRYPTO_ALG_ASYNC);
    548			if (IS_ERR(ctx->hashalg))
    549				return PTR_ERR(ctx->hashalg);
    550			break;
    551		}
    552	}
    553
    554	crypto_aead_set_reqsize(tfm, sizeof(struct otx_cpt_req_ctx));
    555
    556	return 0;
    557}
    558
    559static int otx_cpt_aead_cbc_aes_sha1_init(struct crypto_aead *tfm)
    560{
    561	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA1);
    562}
    563
    564static int otx_cpt_aead_cbc_aes_sha256_init(struct crypto_aead *tfm)
    565{
    566	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA256);
    567}
    568
    569static int otx_cpt_aead_cbc_aes_sha384_init(struct crypto_aead *tfm)
    570{
    571	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA384);
    572}
    573
    574static int otx_cpt_aead_cbc_aes_sha512_init(struct crypto_aead *tfm)
    575{
    576	return cpt_aead_init(tfm, OTX_CPT_AES_CBC, OTX_CPT_SHA512);
    577}
    578
    579static int otx_cpt_aead_ecb_null_sha1_init(struct crypto_aead *tfm)
    580{
    581	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA1);
    582}
    583
    584static int otx_cpt_aead_ecb_null_sha256_init(struct crypto_aead *tfm)
    585{
    586	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA256);
    587}
    588
    589static int otx_cpt_aead_ecb_null_sha384_init(struct crypto_aead *tfm)
    590{
    591	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA384);
    592}
    593
    594static int otx_cpt_aead_ecb_null_sha512_init(struct crypto_aead *tfm)
    595{
    596	return cpt_aead_init(tfm, OTX_CPT_CIPHER_NULL, OTX_CPT_SHA512);
    597}
    598
    599static int otx_cpt_aead_gcm_aes_init(struct crypto_aead *tfm)
    600{
    601	return cpt_aead_init(tfm, OTX_CPT_AES_GCM, OTX_CPT_MAC_NULL);
    602}
    603
    604static void otx_cpt_aead_exit(struct crypto_aead *tfm)
    605{
    606	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
    607
    608	kfree(ctx->ipad);
    609	kfree(ctx->opad);
    610	if (ctx->hashalg)
    611		crypto_free_shash(ctx->hashalg);
    612	kfree(ctx->sdesc);
    613}
    614
    615/*
    616 * This is the Integrity Check Value validation (aka the authentication tag
    617 * length)
    618 */
    619static int otx_cpt_aead_set_authsize(struct crypto_aead *tfm,
    620				     unsigned int authsize)
    621{
    622	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
    623
    624	switch (ctx->mac_type) {
    625	case OTX_CPT_SHA1:
    626		if (authsize != SHA1_DIGEST_SIZE &&
    627		    authsize != SHA1_TRUNC_DIGEST_SIZE)
    628			return -EINVAL;
    629
    630		if (authsize == SHA1_TRUNC_DIGEST_SIZE)
    631			ctx->is_trunc_hmac = true;
    632		break;
    633
    634	case OTX_CPT_SHA256:
    635		if (authsize != SHA256_DIGEST_SIZE &&
    636		    authsize != SHA256_TRUNC_DIGEST_SIZE)
    637			return -EINVAL;
    638
    639		if (authsize == SHA256_TRUNC_DIGEST_SIZE)
    640			ctx->is_trunc_hmac = true;
    641		break;
    642
    643	case OTX_CPT_SHA384:
    644		if (authsize != SHA384_DIGEST_SIZE &&
    645		    authsize != SHA384_TRUNC_DIGEST_SIZE)
    646			return -EINVAL;
    647
    648		if (authsize == SHA384_TRUNC_DIGEST_SIZE)
    649			ctx->is_trunc_hmac = true;
    650		break;
    651
    652	case OTX_CPT_SHA512:
    653		if (authsize != SHA512_DIGEST_SIZE &&
    654		    authsize != SHA512_TRUNC_DIGEST_SIZE)
    655			return -EINVAL;
    656
    657		if (authsize == SHA512_TRUNC_DIGEST_SIZE)
    658			ctx->is_trunc_hmac = true;
    659		break;
    660
    661	case OTX_CPT_MAC_NULL:
    662		if (ctx->cipher_type == OTX_CPT_AES_GCM) {
    663			if (authsize != AES_GCM_ICV_SIZE)
    664				return -EINVAL;
    665		} else
    666			return -EINVAL;
    667		break;
    668
    669	default:
    670		return -EINVAL;
    671	}
    672
    673	tfm->authsize = authsize;
    674	return 0;
    675}
    676
    677static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg)
    678{
    679	struct otx_cpt_sdesc *sdesc;
    680	int size;
    681
    682	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
    683	sdesc = kmalloc(size, GFP_KERNEL);
    684	if (!sdesc)
    685		return NULL;
    686
    687	sdesc->shash.tfm = alg;
    688
    689	return sdesc;
    690}
    691
    692static inline void swap_data32(void *buf, u32 len)
    693{
    694	cpu_to_be32_array(buf, buf, len / 4);
    695}
    696
    697static inline void swap_data64(void *buf, u32 len)
    698{
    699	__be64 *dst = buf;
    700	u64 *src = buf;
    701	int i = 0;
    702
    703	for (i = 0 ; i < len / 8; i++, src++, dst++)
    704		*dst = cpu_to_be64p(src);
    705}
    706
    707static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad)
    708{
    709	struct sha512_state *sha512;
    710	struct sha256_state *sha256;
    711	struct sha1_state *sha1;
    712
    713	switch (mac_type) {
    714	case OTX_CPT_SHA1:
    715		sha1 = (struct sha1_state *) in_pad;
    716		swap_data32(sha1->state, SHA1_DIGEST_SIZE);
    717		memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE);
    718		break;
    719
    720	case OTX_CPT_SHA256:
    721		sha256 = (struct sha256_state *) in_pad;
    722		swap_data32(sha256->state, SHA256_DIGEST_SIZE);
    723		memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE);
    724		break;
    725
    726	case OTX_CPT_SHA384:
    727	case OTX_CPT_SHA512:
    728		sha512 = (struct sha512_state *) in_pad;
    729		swap_data64(sha512->state, SHA512_DIGEST_SIZE);
    730		memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE);
    731		break;
    732
    733	default:
    734		return -EINVAL;
    735	}
    736
    737	return 0;
    738}
    739
    740static int aead_hmac_init(struct crypto_aead *cipher)
    741{
    742	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
    743	int state_size = crypto_shash_statesize(ctx->hashalg);
    744	int ds = crypto_shash_digestsize(ctx->hashalg);
    745	int bs = crypto_shash_blocksize(ctx->hashalg);
    746	int authkeylen = ctx->auth_key_len;
    747	u8 *ipad = NULL, *opad = NULL;
    748	int ret = 0, icount = 0;
    749
    750	ctx->sdesc = alloc_sdesc(ctx->hashalg);
    751	if (!ctx->sdesc)
    752		return -ENOMEM;
    753
    754	ctx->ipad = kzalloc(bs, GFP_KERNEL);
    755	if (!ctx->ipad) {
    756		ret = -ENOMEM;
    757		goto calc_fail;
    758	}
    759
    760	ctx->opad = kzalloc(bs, GFP_KERNEL);
    761	if (!ctx->opad) {
    762		ret = -ENOMEM;
    763		goto calc_fail;
    764	}
    765
    766	ipad = kzalloc(state_size, GFP_KERNEL);
    767	if (!ipad) {
    768		ret = -ENOMEM;
    769		goto calc_fail;
    770	}
    771
    772	opad = kzalloc(state_size, GFP_KERNEL);
    773	if (!opad) {
    774		ret = -ENOMEM;
    775		goto calc_fail;
    776	}
    777
    778	if (authkeylen > bs) {
    779		ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key,
    780					  authkeylen, ipad);
    781		if (ret)
    782			goto calc_fail;
    783
    784		authkeylen = ds;
    785	} else {
    786		memcpy(ipad, ctx->key, authkeylen);
    787	}
    788
    789	memset(ipad + authkeylen, 0, bs - authkeylen);
    790	memcpy(opad, ipad, bs);
    791
    792	for (icount = 0; icount < bs; icount++) {
    793		ipad[icount] ^= 0x36;
    794		opad[icount] ^= 0x5c;
    795	}
    796
    797	/*
    798	 * Partial Hash calculated from the software
    799	 * algorithm is retrieved for IPAD & OPAD
    800	 */
    801
    802	/* IPAD Calculation */
    803	crypto_shash_init(&ctx->sdesc->shash);
    804	crypto_shash_update(&ctx->sdesc->shash, ipad, bs);
    805	crypto_shash_export(&ctx->sdesc->shash, ipad);
    806	ret = copy_pad(ctx->mac_type, ctx->ipad, ipad);
    807	if (ret)
    808		goto calc_fail;
    809
    810	/* OPAD Calculation */
    811	crypto_shash_init(&ctx->sdesc->shash);
    812	crypto_shash_update(&ctx->sdesc->shash, opad, bs);
    813	crypto_shash_export(&ctx->sdesc->shash, opad);
    814	ret = copy_pad(ctx->mac_type, ctx->opad, opad);
    815	if (ret)
    816		goto calc_fail;
    817
    818	kfree(ipad);
    819	kfree(opad);
    820
    821	return 0;
    822
    823calc_fail:
    824	kfree(ctx->ipad);
    825	ctx->ipad = NULL;
    826	kfree(ctx->opad);
    827	ctx->opad = NULL;
    828	kfree(ipad);
    829	kfree(opad);
    830	kfree(ctx->sdesc);
    831	ctx->sdesc = NULL;
    832
    833	return ret;
    834}
    835
    836static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher,
    837					   const unsigned char *key,
    838					   unsigned int keylen)
    839{
    840	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
    841	struct crypto_authenc_key_param *param;
    842	int enckeylen = 0, authkeylen = 0;
    843	struct rtattr *rta = (void *)key;
    844	int status = -EINVAL;
    845
    846	if (!RTA_OK(rta, keylen))
    847		goto badkey;
    848
    849	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
    850		goto badkey;
    851
    852	if (RTA_PAYLOAD(rta) < sizeof(*param))
    853		goto badkey;
    854
    855	param = RTA_DATA(rta);
    856	enckeylen = be32_to_cpu(param->enckeylen);
    857	key += RTA_ALIGN(rta->rta_len);
    858	keylen -= RTA_ALIGN(rta->rta_len);
    859	if (keylen < enckeylen)
    860		goto badkey;
    861
    862	if (keylen > OTX_CPT_MAX_KEY_SIZE)
    863		goto badkey;
    864
    865	authkeylen = keylen - enckeylen;
    866	memcpy(ctx->key, key, keylen);
    867
    868	switch (enckeylen) {
    869	case AES_KEYSIZE_128:
    870		ctx->key_type = OTX_CPT_AES_128_BIT;
    871		break;
    872	case AES_KEYSIZE_192:
    873		ctx->key_type = OTX_CPT_AES_192_BIT;
    874		break;
    875	case AES_KEYSIZE_256:
    876		ctx->key_type = OTX_CPT_AES_256_BIT;
    877		break;
    878	default:
    879		/* Invalid key length */
    880		goto badkey;
    881	}
    882
    883	ctx->enc_key_len = enckeylen;
    884	ctx->auth_key_len = authkeylen;
    885
    886	status = aead_hmac_init(cipher);
    887	if (status)
    888		goto badkey;
    889
    890	return 0;
    891badkey:
    892	return status;
    893}
    894
    895static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher,
    896					    const unsigned char *key,
    897					    unsigned int keylen)
    898{
    899	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
    900	struct crypto_authenc_key_param *param;
    901	struct rtattr *rta = (void *)key;
    902	int enckeylen = 0;
    903
    904	if (!RTA_OK(rta, keylen))
    905		goto badkey;
    906
    907	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
    908		goto badkey;
    909
    910	if (RTA_PAYLOAD(rta) < sizeof(*param))
    911		goto badkey;
    912
    913	param = RTA_DATA(rta);
    914	enckeylen = be32_to_cpu(param->enckeylen);
    915	key += RTA_ALIGN(rta->rta_len);
    916	keylen -= RTA_ALIGN(rta->rta_len);
    917	if (enckeylen != 0)
    918		goto badkey;
    919
    920	if (keylen > OTX_CPT_MAX_KEY_SIZE)
    921		goto badkey;
    922
    923	memcpy(ctx->key, key, keylen);
    924	ctx->enc_key_len = enckeylen;
    925	ctx->auth_key_len = keylen;
    926	return 0;
    927badkey:
    928	return -EINVAL;
    929}
    930
    931static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher,
    932				       const unsigned char *key,
    933				       unsigned int keylen)
    934{
    935	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(cipher);
    936
    937	/*
    938	 * For aes gcm we expect to get encryption key (16, 24, 32 bytes)
    939	 * and salt (4 bytes)
    940	 */
    941	switch (keylen) {
    942	case AES_KEYSIZE_128 + AES_GCM_SALT_SIZE:
    943		ctx->key_type = OTX_CPT_AES_128_BIT;
    944		ctx->enc_key_len = AES_KEYSIZE_128;
    945		break;
    946	case AES_KEYSIZE_192 + AES_GCM_SALT_SIZE:
    947		ctx->key_type = OTX_CPT_AES_192_BIT;
    948		ctx->enc_key_len = AES_KEYSIZE_192;
    949		break;
    950	case AES_KEYSIZE_256 + AES_GCM_SALT_SIZE:
    951		ctx->key_type = OTX_CPT_AES_256_BIT;
    952		ctx->enc_key_len = AES_KEYSIZE_256;
    953		break;
    954	default:
    955		/* Invalid key and salt length */
    956		return -EINVAL;
    957	}
    958
    959	/* Store encryption key and salt */
    960	memcpy(ctx->key, key, keylen);
    961
    962	return 0;
    963}
    964
    965static inline u32 create_aead_ctx_hdr(struct aead_request *req, u32 enc,
    966				      u32 *argcnt)
    967{
    968	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
    969	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
    970	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
    971	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
    972	struct otx_cpt_fc_ctx *fctx = &rctx->fctx;
    973	int mac_len = crypto_aead_authsize(tfm);
    974	int ds;
    975
    976	rctx->ctrl_word.e.enc_data_offset = req->assoclen;
    977
    978	switch (ctx->cipher_type) {
    979	case OTX_CPT_AES_CBC:
    980		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_CPTR;
    981		/* Copy encryption key to context */
    982		memcpy(fctx->enc.encr_key, ctx->key + ctx->auth_key_len,
    983		       ctx->enc_key_len);
    984		/* Copy IV to context */
    985		memcpy(fctx->enc.encr_iv, req->iv, crypto_aead_ivsize(tfm));
    986
    987		ds = crypto_shash_digestsize(ctx->hashalg);
    988		if (ctx->mac_type == OTX_CPT_SHA384)
    989			ds = SHA512_DIGEST_SIZE;
    990		if (ctx->ipad)
    991			memcpy(fctx->hmac.e.ipad, ctx->ipad, ds);
    992		if (ctx->opad)
    993			memcpy(fctx->hmac.e.opad, ctx->opad, ds);
    994		break;
    995
    996	case OTX_CPT_AES_GCM:
    997		fctx->enc.enc_ctrl.e.iv_source = OTX_CPT_FROM_DPTR;
    998		/* Copy encryption key to context */
    999		memcpy(fctx->enc.encr_key, ctx->key, ctx->enc_key_len);
   1000		/* Copy salt to context */
   1001		memcpy(fctx->enc.encr_iv, ctx->key + ctx->enc_key_len,
   1002		       AES_GCM_SALT_SIZE);
   1003
   1004		rctx->ctrl_word.e.iv_offset = req->assoclen - AES_GCM_IV_OFFSET;
   1005		break;
   1006
   1007	default:
   1008		/* Unknown cipher type */
   1009		return -EINVAL;
   1010	}
   1011	rctx->ctrl_word.flags = cpu_to_be64(rctx->ctrl_word.cflags);
   1012
   1013	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
   1014	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
   1015	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_FC |
   1016				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
   1017	if (enc) {
   1018		req_info->req.opcode.s.minor = 2;
   1019		req_info->req.param1 = req->cryptlen;
   1020		req_info->req.param2 = req->cryptlen + req->assoclen;
   1021	} else {
   1022		req_info->req.opcode.s.minor = 3;
   1023		req_info->req.param1 = req->cryptlen - mac_len;
   1024		req_info->req.param2 = req->cryptlen + req->assoclen - mac_len;
   1025	}
   1026
   1027	fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
   1028	fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
   1029	fctx->enc.enc_ctrl.e.mac_type = ctx->mac_type;
   1030	fctx->enc.enc_ctrl.e.mac_len = mac_len;
   1031	fctx->enc.enc_ctrl.flags = cpu_to_be64(fctx->enc.enc_ctrl.cflags);
   1032
   1033	/*
   1034	 * Storing Packet Data Information in offset
   1035	 * Control Word First 8 bytes
   1036	 */
   1037	req_info->in[*argcnt].vptr = (u8 *)&rctx->ctrl_word;
   1038	req_info->in[*argcnt].size = CONTROL_WORD_LEN;
   1039	req_info->req.dlen += CONTROL_WORD_LEN;
   1040	++(*argcnt);
   1041
   1042	req_info->in[*argcnt].vptr = (u8 *)fctx;
   1043	req_info->in[*argcnt].size = sizeof(struct otx_cpt_fc_ctx);
   1044	req_info->req.dlen += sizeof(struct otx_cpt_fc_ctx);
   1045	++(*argcnt);
   1046
   1047	return 0;
   1048}
   1049
   1050static inline u32 create_hmac_ctx_hdr(struct aead_request *req, u32 *argcnt,
   1051				      u32 enc)
   1052{
   1053	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1054	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1055	struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx(tfm);
   1056	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
   1057
   1058	req_info->ctrl.s.dma_mode = OTX_CPT_DMA_GATHER_SCATTER;
   1059	req_info->ctrl.s.se_req = OTX_CPT_SE_CORE_REQ;
   1060	req_info->req.opcode.s.major = OTX_CPT_MAJOR_OP_HMAC |
   1061				 DMA_MODE_FLAG(OTX_CPT_DMA_GATHER_SCATTER);
   1062	req_info->is_trunc_hmac = ctx->is_trunc_hmac;
   1063
   1064	req_info->req.opcode.s.minor = 0;
   1065	req_info->req.param1 = ctx->auth_key_len;
   1066	req_info->req.param2 = ctx->mac_type << 8;
   1067
   1068	/* Add authentication key */
   1069	req_info->in[*argcnt].vptr = ctx->key;
   1070	req_info->in[*argcnt].size = round_up(ctx->auth_key_len, 8);
   1071	req_info->req.dlen += round_up(ctx->auth_key_len, 8);
   1072	++(*argcnt);
   1073
   1074	return 0;
   1075}
   1076
   1077static inline u32 create_aead_input_list(struct aead_request *req, u32 enc)
   1078{
   1079	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1080	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
   1081	u32 inputlen =  req->cryptlen + req->assoclen;
   1082	u32 status, argcnt = 0;
   1083
   1084	status = create_aead_ctx_hdr(req, enc, &argcnt);
   1085	if (status)
   1086		return status;
   1087	update_input_data(req_info, req->src, inputlen, &argcnt);
   1088	req_info->incnt = argcnt;
   1089
   1090	return 0;
   1091}
   1092
   1093static inline u32 create_aead_output_list(struct aead_request *req, u32 enc,
   1094					  u32 mac_len)
   1095{
   1096	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1097	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
   1098	u32 argcnt = 0, outputlen = 0;
   1099
   1100	if (enc)
   1101		outputlen = req->cryptlen +  req->assoclen + mac_len;
   1102	else
   1103		outputlen = req->cryptlen + req->assoclen - mac_len;
   1104
   1105	update_output_data(req_info, req->dst, 0, outputlen, &argcnt);
   1106	req_info->outcnt = argcnt;
   1107
   1108	return 0;
   1109}
   1110
   1111static inline u32 create_aead_null_input_list(struct aead_request *req,
   1112					      u32 enc, u32 mac_len)
   1113{
   1114	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1115	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
   1116	u32 inputlen, argcnt = 0;
   1117
   1118	if (enc)
   1119		inputlen =  req->cryptlen + req->assoclen;
   1120	else
   1121		inputlen =  req->cryptlen + req->assoclen - mac_len;
   1122
   1123	create_hmac_ctx_hdr(req, &argcnt, enc);
   1124	update_input_data(req_info, req->src, inputlen, &argcnt);
   1125	req_info->incnt = argcnt;
   1126
   1127	return 0;
   1128}
   1129
   1130static inline u32 create_aead_null_output_list(struct aead_request *req,
   1131					       u32 enc, u32 mac_len)
   1132{
   1133	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1134	struct otx_cpt_req_info *req_info =  &rctx->cpt_req;
   1135	struct scatterlist *dst;
   1136	u8 *ptr = NULL;
   1137	int argcnt = 0, status, offset;
   1138	u32 inputlen;
   1139
   1140	if (enc)
   1141		inputlen =  req->cryptlen + req->assoclen;
   1142	else
   1143		inputlen =  req->cryptlen + req->assoclen - mac_len;
   1144
   1145	/*
   1146	 * If source and destination are different
   1147	 * then copy payload to destination
   1148	 */
   1149	if (req->src != req->dst) {
   1150
   1151		ptr = kmalloc(inputlen, (req_info->areq->flags &
   1152					 CRYPTO_TFM_REQ_MAY_SLEEP) ?
   1153					 GFP_KERNEL : GFP_ATOMIC);
   1154		if (!ptr) {
   1155			status = -ENOMEM;
   1156			goto error;
   1157		}
   1158
   1159		status = sg_copy_to_buffer(req->src, sg_nents(req->src), ptr,
   1160					   inputlen);
   1161		if (status != inputlen) {
   1162			status = -EINVAL;
   1163			goto error_free;
   1164		}
   1165		status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
   1166					     inputlen);
   1167		if (status != inputlen) {
   1168			status = -EINVAL;
   1169			goto error_free;
   1170		}
   1171		kfree(ptr);
   1172	}
   1173
   1174	if (enc) {
   1175		/*
   1176		 * In an encryption scenario hmac needs
   1177		 * to be appended after payload
   1178		 */
   1179		dst = req->dst;
   1180		offset = inputlen;
   1181		while (offset >= dst->length) {
   1182			offset -= dst->length;
   1183			dst = sg_next(dst);
   1184			if (!dst) {
   1185				status = -ENOENT;
   1186				goto error;
   1187			}
   1188		}
   1189
   1190		update_output_data(req_info, dst, offset, mac_len, &argcnt);
   1191	} else {
   1192		/*
   1193		 * In a decryption scenario calculated hmac for received
   1194		 * payload needs to be compare with hmac received
   1195		 */
   1196		status = sg_copy_buffer(req->src, sg_nents(req->src),
   1197					rctx->fctx.hmac.s.hmac_recv, mac_len,
   1198					inputlen, true);
   1199		if (status != mac_len) {
   1200			status = -EINVAL;
   1201			goto error;
   1202		}
   1203
   1204		req_info->out[argcnt].vptr = rctx->fctx.hmac.s.hmac_calc;
   1205		req_info->out[argcnt].size = mac_len;
   1206		argcnt++;
   1207	}
   1208
   1209	req_info->outcnt = argcnt;
   1210	return 0;
   1211
   1212error_free:
   1213	kfree(ptr);
   1214error:
   1215	return status;
   1216}
   1217
   1218static u32 cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
   1219{
   1220	struct otx_cpt_req_ctx *rctx = aead_request_ctx(req);
   1221	struct otx_cpt_req_info *req_info = &rctx->cpt_req;
   1222	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1223	struct pci_dev *pdev;
   1224	u32 status, cpu_num;
   1225
   1226	/* Clear control words */
   1227	rctx->ctrl_word.flags = 0;
   1228	rctx->fctx.enc.enc_ctrl.flags = 0;
   1229
   1230	req_info->callback = otx_cpt_aead_callback;
   1231	req_info->areq = &req->base;
   1232	req_info->req_type = reg_type;
   1233	req_info->is_enc = enc;
   1234	req_info->is_trunc_hmac = false;
   1235
   1236	switch (reg_type) {
   1237	case OTX_CPT_AEAD_ENC_DEC_REQ:
   1238		status = create_aead_input_list(req, enc);
   1239		if (status)
   1240			return status;
   1241		status = create_aead_output_list(req, enc,
   1242						 crypto_aead_authsize(tfm));
   1243		if (status)
   1244			return status;
   1245		break;
   1246
   1247	case OTX_CPT_AEAD_ENC_DEC_NULL_REQ:
   1248		status = create_aead_null_input_list(req, enc,
   1249						     crypto_aead_authsize(tfm));
   1250		if (status)
   1251			return status;
   1252		status = create_aead_null_output_list(req, enc,
   1253						crypto_aead_authsize(tfm));
   1254		if (status)
   1255			return status;
   1256		break;
   1257
   1258	default:
   1259		return -EINVAL;
   1260	}
   1261
   1262	/* Validate that request doesn't exceed maximum CPT supported size */
   1263	if (req_info->req.param1 > OTX_CPT_MAX_REQ_SIZE ||
   1264	    req_info->req.param2 > OTX_CPT_MAX_REQ_SIZE)
   1265		return -E2BIG;
   1266
   1267	status = get_se_device(&pdev, &cpu_num);
   1268	if (status)
   1269		return status;
   1270
   1271	req_info->ctrl.s.grp = 0;
   1272
   1273	status = otx_cpt_do_request(pdev, req_info, cpu_num);
   1274	/*
   1275	 * We perform an asynchronous send and once
   1276	 * the request is completed the driver would
   1277	 * intimate through registered call back functions
   1278	 */
   1279	return status;
   1280}
   1281
   1282static int otx_cpt_aead_encrypt(struct aead_request *req)
   1283{
   1284	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, true);
   1285}
   1286
   1287static int otx_cpt_aead_decrypt(struct aead_request *req)
   1288{
   1289	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_REQ, false);
   1290}
   1291
   1292static int otx_cpt_aead_null_encrypt(struct aead_request *req)
   1293{
   1294	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, true);
   1295}
   1296
   1297static int otx_cpt_aead_null_decrypt(struct aead_request *req)
   1298{
   1299	return cpt_aead_enc_dec(req, OTX_CPT_AEAD_ENC_DEC_NULL_REQ, false);
   1300}
   1301
   1302static struct skcipher_alg otx_cpt_skciphers[] = { {
   1303	.base.cra_name = "xts(aes)",
   1304	.base.cra_driver_name = "cpt_xts_aes",
   1305	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1306	.base.cra_blocksize = AES_BLOCK_SIZE,
   1307	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
   1308	.base.cra_alignmask = 7,
   1309	.base.cra_priority = 4001,
   1310	.base.cra_module = THIS_MODULE,
   1311
   1312	.init = otx_cpt_enc_dec_init,
   1313	.ivsize = AES_BLOCK_SIZE,
   1314	.min_keysize = 2 * AES_MIN_KEY_SIZE,
   1315	.max_keysize = 2 * AES_MAX_KEY_SIZE,
   1316	.setkey = otx_cpt_skcipher_xts_setkey,
   1317	.encrypt = otx_cpt_skcipher_encrypt,
   1318	.decrypt = otx_cpt_skcipher_decrypt,
   1319}, {
   1320	.base.cra_name = "cbc(aes)",
   1321	.base.cra_driver_name = "cpt_cbc_aes",
   1322	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1323	.base.cra_blocksize = AES_BLOCK_SIZE,
   1324	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
   1325	.base.cra_alignmask = 7,
   1326	.base.cra_priority = 4001,
   1327	.base.cra_module = THIS_MODULE,
   1328
   1329	.init = otx_cpt_enc_dec_init,
   1330	.ivsize = AES_BLOCK_SIZE,
   1331	.min_keysize = AES_MIN_KEY_SIZE,
   1332	.max_keysize = AES_MAX_KEY_SIZE,
   1333	.setkey = otx_cpt_skcipher_cbc_aes_setkey,
   1334	.encrypt = otx_cpt_skcipher_encrypt,
   1335	.decrypt = otx_cpt_skcipher_decrypt,
   1336}, {
   1337	.base.cra_name = "ecb(aes)",
   1338	.base.cra_driver_name = "cpt_ecb_aes",
   1339	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1340	.base.cra_blocksize = AES_BLOCK_SIZE,
   1341	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
   1342	.base.cra_alignmask = 7,
   1343	.base.cra_priority = 4001,
   1344	.base.cra_module = THIS_MODULE,
   1345
   1346	.init = otx_cpt_enc_dec_init,
   1347	.ivsize = 0,
   1348	.min_keysize = AES_MIN_KEY_SIZE,
   1349	.max_keysize = AES_MAX_KEY_SIZE,
   1350	.setkey = otx_cpt_skcipher_ecb_aes_setkey,
   1351	.encrypt = otx_cpt_skcipher_encrypt,
   1352	.decrypt = otx_cpt_skcipher_decrypt,
   1353}, {
   1354	.base.cra_name = "cfb(aes)",
   1355	.base.cra_driver_name = "cpt_cfb_aes",
   1356	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1357	.base.cra_blocksize = AES_BLOCK_SIZE,
   1358	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
   1359	.base.cra_alignmask = 7,
   1360	.base.cra_priority = 4001,
   1361	.base.cra_module = THIS_MODULE,
   1362
   1363	.init = otx_cpt_enc_dec_init,
   1364	.ivsize = AES_BLOCK_SIZE,
   1365	.min_keysize = AES_MIN_KEY_SIZE,
   1366	.max_keysize = AES_MAX_KEY_SIZE,
   1367	.setkey = otx_cpt_skcipher_cfb_aes_setkey,
   1368	.encrypt = otx_cpt_skcipher_encrypt,
   1369	.decrypt = otx_cpt_skcipher_decrypt,
   1370}, {
   1371	.base.cra_name = "cbc(des3_ede)",
   1372	.base.cra_driver_name = "cpt_cbc_des3_ede",
   1373	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1374	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
   1375	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
   1376	.base.cra_alignmask = 7,
   1377	.base.cra_priority = 4001,
   1378	.base.cra_module = THIS_MODULE,
   1379
   1380	.init = otx_cpt_enc_dec_init,
   1381	.min_keysize = DES3_EDE_KEY_SIZE,
   1382	.max_keysize = DES3_EDE_KEY_SIZE,
   1383	.ivsize = DES_BLOCK_SIZE,
   1384	.setkey = otx_cpt_skcipher_cbc_des3_setkey,
   1385	.encrypt = otx_cpt_skcipher_encrypt,
   1386	.decrypt = otx_cpt_skcipher_decrypt,
   1387}, {
   1388	.base.cra_name = "ecb(des3_ede)",
   1389	.base.cra_driver_name = "cpt_ecb_des3_ede",
   1390	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1391	.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
   1392	.base.cra_ctxsize = sizeof(struct otx_cpt_des3_ctx),
   1393	.base.cra_alignmask = 7,
   1394	.base.cra_priority = 4001,
   1395	.base.cra_module = THIS_MODULE,
   1396
   1397	.init = otx_cpt_enc_dec_init,
   1398	.min_keysize = DES3_EDE_KEY_SIZE,
   1399	.max_keysize = DES3_EDE_KEY_SIZE,
   1400	.ivsize = 0,
   1401	.setkey = otx_cpt_skcipher_ecb_des3_setkey,
   1402	.encrypt = otx_cpt_skcipher_encrypt,
   1403	.decrypt = otx_cpt_skcipher_decrypt,
   1404} };
   1405
   1406static struct aead_alg otx_cpt_aeads[] = { {
   1407	.base = {
   1408		.cra_name = "authenc(hmac(sha1),cbc(aes))",
   1409		.cra_driver_name = "cpt_hmac_sha1_cbc_aes",
   1410		.cra_blocksize = AES_BLOCK_SIZE,
   1411		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1412		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1413		.cra_priority = 4001,
   1414		.cra_alignmask = 0,
   1415		.cra_module = THIS_MODULE,
   1416	},
   1417	.init = otx_cpt_aead_cbc_aes_sha1_init,
   1418	.exit = otx_cpt_aead_exit,
   1419	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
   1420	.setauthsize = otx_cpt_aead_set_authsize,
   1421	.encrypt = otx_cpt_aead_encrypt,
   1422	.decrypt = otx_cpt_aead_decrypt,
   1423	.ivsize = AES_BLOCK_SIZE,
   1424	.maxauthsize = SHA1_DIGEST_SIZE,
   1425}, {
   1426	.base = {
   1427		.cra_name = "authenc(hmac(sha256),cbc(aes))",
   1428		.cra_driver_name = "cpt_hmac_sha256_cbc_aes",
   1429		.cra_blocksize = AES_BLOCK_SIZE,
   1430		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1431		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1432		.cra_priority = 4001,
   1433		.cra_alignmask = 0,
   1434		.cra_module = THIS_MODULE,
   1435	},
   1436	.init = otx_cpt_aead_cbc_aes_sha256_init,
   1437	.exit = otx_cpt_aead_exit,
   1438	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
   1439	.setauthsize = otx_cpt_aead_set_authsize,
   1440	.encrypt = otx_cpt_aead_encrypt,
   1441	.decrypt = otx_cpt_aead_decrypt,
   1442	.ivsize = AES_BLOCK_SIZE,
   1443	.maxauthsize = SHA256_DIGEST_SIZE,
   1444}, {
   1445	.base = {
   1446		.cra_name = "authenc(hmac(sha384),cbc(aes))",
   1447		.cra_driver_name = "cpt_hmac_sha384_cbc_aes",
   1448		.cra_blocksize = AES_BLOCK_SIZE,
   1449		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1450		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1451		.cra_priority = 4001,
   1452		.cra_alignmask = 0,
   1453		.cra_module = THIS_MODULE,
   1454	},
   1455	.init = otx_cpt_aead_cbc_aes_sha384_init,
   1456	.exit = otx_cpt_aead_exit,
   1457	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
   1458	.setauthsize = otx_cpt_aead_set_authsize,
   1459	.encrypt = otx_cpt_aead_encrypt,
   1460	.decrypt = otx_cpt_aead_decrypt,
   1461	.ivsize = AES_BLOCK_SIZE,
   1462	.maxauthsize = SHA384_DIGEST_SIZE,
   1463}, {
   1464	.base = {
   1465		.cra_name = "authenc(hmac(sha512),cbc(aes))",
   1466		.cra_driver_name = "cpt_hmac_sha512_cbc_aes",
   1467		.cra_blocksize = AES_BLOCK_SIZE,
   1468		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1469		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1470		.cra_priority = 4001,
   1471		.cra_alignmask = 0,
   1472		.cra_module = THIS_MODULE,
   1473	},
   1474	.init = otx_cpt_aead_cbc_aes_sha512_init,
   1475	.exit = otx_cpt_aead_exit,
   1476	.setkey = otx_cpt_aead_cbc_aes_sha_setkey,
   1477	.setauthsize = otx_cpt_aead_set_authsize,
   1478	.encrypt = otx_cpt_aead_encrypt,
   1479	.decrypt = otx_cpt_aead_decrypt,
   1480	.ivsize = AES_BLOCK_SIZE,
   1481	.maxauthsize = SHA512_DIGEST_SIZE,
   1482}, {
   1483	.base = {
   1484		.cra_name = "authenc(hmac(sha1),ecb(cipher_null))",
   1485		.cra_driver_name = "cpt_hmac_sha1_ecb_null",
   1486		.cra_blocksize = 1,
   1487		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1488		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1489		.cra_priority = 4001,
   1490		.cra_alignmask = 0,
   1491		.cra_module = THIS_MODULE,
   1492	},
   1493	.init = otx_cpt_aead_ecb_null_sha1_init,
   1494	.exit = otx_cpt_aead_exit,
   1495	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
   1496	.setauthsize = otx_cpt_aead_set_authsize,
   1497	.encrypt = otx_cpt_aead_null_encrypt,
   1498	.decrypt = otx_cpt_aead_null_decrypt,
   1499	.ivsize = 0,
   1500	.maxauthsize = SHA1_DIGEST_SIZE,
   1501}, {
   1502	.base = {
   1503		.cra_name = "authenc(hmac(sha256),ecb(cipher_null))",
   1504		.cra_driver_name = "cpt_hmac_sha256_ecb_null",
   1505		.cra_blocksize = 1,
   1506		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1507		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1508		.cra_priority = 4001,
   1509		.cra_alignmask = 0,
   1510		.cra_module = THIS_MODULE,
   1511	},
   1512	.init = otx_cpt_aead_ecb_null_sha256_init,
   1513	.exit = otx_cpt_aead_exit,
   1514	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
   1515	.setauthsize = otx_cpt_aead_set_authsize,
   1516	.encrypt = otx_cpt_aead_null_encrypt,
   1517	.decrypt = otx_cpt_aead_null_decrypt,
   1518	.ivsize = 0,
   1519	.maxauthsize = SHA256_DIGEST_SIZE,
   1520}, {
   1521	.base = {
   1522		.cra_name = "authenc(hmac(sha384),ecb(cipher_null))",
   1523		.cra_driver_name = "cpt_hmac_sha384_ecb_null",
   1524		.cra_blocksize = 1,
   1525		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1526		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1527		.cra_priority = 4001,
   1528		.cra_alignmask = 0,
   1529		.cra_module = THIS_MODULE,
   1530	},
   1531	.init = otx_cpt_aead_ecb_null_sha384_init,
   1532	.exit = otx_cpt_aead_exit,
   1533	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
   1534	.setauthsize = otx_cpt_aead_set_authsize,
   1535	.encrypt = otx_cpt_aead_null_encrypt,
   1536	.decrypt = otx_cpt_aead_null_decrypt,
   1537	.ivsize = 0,
   1538	.maxauthsize = SHA384_DIGEST_SIZE,
   1539}, {
   1540	.base = {
   1541		.cra_name = "authenc(hmac(sha512),ecb(cipher_null))",
   1542		.cra_driver_name = "cpt_hmac_sha512_ecb_null",
   1543		.cra_blocksize = 1,
   1544		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1545		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1546		.cra_priority = 4001,
   1547		.cra_alignmask = 0,
   1548		.cra_module = THIS_MODULE,
   1549	},
   1550	.init = otx_cpt_aead_ecb_null_sha512_init,
   1551	.exit = otx_cpt_aead_exit,
   1552	.setkey = otx_cpt_aead_ecb_null_sha_setkey,
   1553	.setauthsize = otx_cpt_aead_set_authsize,
   1554	.encrypt = otx_cpt_aead_null_encrypt,
   1555	.decrypt = otx_cpt_aead_null_decrypt,
   1556	.ivsize = 0,
   1557	.maxauthsize = SHA512_DIGEST_SIZE,
   1558}, {
   1559	.base = {
   1560		.cra_name = "rfc4106(gcm(aes))",
   1561		.cra_driver_name = "cpt_rfc4106_gcm_aes",
   1562		.cra_blocksize = 1,
   1563		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
   1564		.cra_ctxsize = sizeof(struct otx_cpt_aead_ctx),
   1565		.cra_priority = 4001,
   1566		.cra_alignmask = 0,
   1567		.cra_module = THIS_MODULE,
   1568	},
   1569	.init = otx_cpt_aead_gcm_aes_init,
   1570	.exit = otx_cpt_aead_exit,
   1571	.setkey = otx_cpt_aead_gcm_aes_setkey,
   1572	.setauthsize = otx_cpt_aead_set_authsize,
   1573	.encrypt = otx_cpt_aead_encrypt,
   1574	.decrypt = otx_cpt_aead_decrypt,
   1575	.ivsize = AES_GCM_IV_SIZE,
   1576	.maxauthsize = AES_GCM_ICV_SIZE,
   1577} };
   1578
   1579static inline int is_any_alg_used(void)
   1580{
   1581	int i;
   1582
   1583	for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
   1584		if (refcount_read(&otx_cpt_skciphers[i].base.cra_refcnt) != 1)
   1585			return true;
   1586	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
   1587		if (refcount_read(&otx_cpt_aeads[i].base.cra_refcnt) != 1)
   1588			return true;
   1589	return false;
   1590}
   1591
   1592static inline int cpt_register_algs(void)
   1593{
   1594	int i, err = 0;
   1595
   1596	if (!IS_ENABLED(CONFIG_DM_CRYPT)) {
   1597		for (i = 0; i < ARRAY_SIZE(otx_cpt_skciphers); i++)
   1598			otx_cpt_skciphers[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
   1599
   1600		err = crypto_register_skciphers(otx_cpt_skciphers,
   1601						ARRAY_SIZE(otx_cpt_skciphers));
   1602		if (err)
   1603			return err;
   1604	}
   1605
   1606	for (i = 0; i < ARRAY_SIZE(otx_cpt_aeads); i++)
   1607		otx_cpt_aeads[i].base.cra_flags &= ~CRYPTO_ALG_DEAD;
   1608
   1609	err = crypto_register_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
   1610	if (err) {
   1611		crypto_unregister_skciphers(otx_cpt_skciphers,
   1612					    ARRAY_SIZE(otx_cpt_skciphers));
   1613		return err;
   1614	}
   1615
   1616	return 0;
   1617}
   1618
   1619static inline void cpt_unregister_algs(void)
   1620{
   1621	crypto_unregister_skciphers(otx_cpt_skciphers,
   1622				    ARRAY_SIZE(otx_cpt_skciphers));
   1623	crypto_unregister_aeads(otx_cpt_aeads, ARRAY_SIZE(otx_cpt_aeads));
   1624}
   1625
   1626static int compare_func(const void *lptr, const void *rptr)
   1627{
   1628	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
   1629	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
   1630
   1631	if (ldesc->dev->devfn < rdesc->dev->devfn)
   1632		return -1;
   1633	if (ldesc->dev->devfn > rdesc->dev->devfn)
   1634		return 1;
   1635	return 0;
   1636}
   1637
   1638static void swap_func(void *lptr, void *rptr, int size)
   1639{
   1640	struct cpt_device_desc *ldesc = (struct cpt_device_desc *) lptr;
   1641	struct cpt_device_desc *rdesc = (struct cpt_device_desc *) rptr;
   1642
   1643	swap(*ldesc, *rdesc);
   1644}
   1645
   1646int otx_cpt_crypto_init(struct pci_dev *pdev, struct module *mod,
   1647			enum otx_cptpf_type pf_type,
   1648			enum otx_cptvf_type engine_type,
   1649			int num_queues, int num_devices)
   1650{
   1651	int ret = 0;
   1652	int count;
   1653
   1654	mutex_lock(&mutex);
   1655	switch (engine_type) {
   1656	case OTX_CPT_SE_TYPES:
   1657		count = atomic_read(&se_devices.count);
   1658		if (count >= CPT_MAX_VF_NUM) {
   1659			dev_err(&pdev->dev, "No space to add a new device\n");
   1660			ret = -ENOSPC;
   1661			goto err;
   1662		}
   1663		se_devices.desc[count].pf_type = pf_type;
   1664		se_devices.desc[count].num_queues = num_queues;
   1665		se_devices.desc[count++].dev = pdev;
   1666		atomic_inc(&se_devices.count);
   1667
   1668		if (atomic_read(&se_devices.count) == num_devices &&
   1669		    is_crypto_registered == false) {
   1670			if (cpt_register_algs()) {
   1671				dev_err(&pdev->dev,
   1672				   "Error in registering crypto algorithms\n");
   1673				ret =  -EINVAL;
   1674				goto err;
   1675			}
   1676			try_module_get(mod);
   1677			is_crypto_registered = true;
   1678		}
   1679		sort(se_devices.desc, count, sizeof(struct cpt_device_desc),
   1680		     compare_func, swap_func);
   1681		break;
   1682
   1683	case OTX_CPT_AE_TYPES:
   1684		count = atomic_read(&ae_devices.count);
   1685		if (count >= CPT_MAX_VF_NUM) {
   1686			dev_err(&pdev->dev, "No space to a add new device\n");
   1687			ret = -ENOSPC;
   1688			goto err;
   1689		}
   1690		ae_devices.desc[count].pf_type = pf_type;
   1691		ae_devices.desc[count].num_queues = num_queues;
   1692		ae_devices.desc[count++].dev = pdev;
   1693		atomic_inc(&ae_devices.count);
   1694		sort(ae_devices.desc, count, sizeof(struct cpt_device_desc),
   1695		     compare_func, swap_func);
   1696		break;
   1697
   1698	default:
   1699		dev_err(&pdev->dev, "Unknown VF type %d\n", engine_type);
   1700		ret = BAD_OTX_CPTVF_TYPE;
   1701	}
   1702err:
   1703	mutex_unlock(&mutex);
   1704	return ret;
   1705}
   1706
   1707void otx_cpt_crypto_exit(struct pci_dev *pdev, struct module *mod,
   1708			 enum otx_cptvf_type engine_type)
   1709{
   1710	struct cpt_device_table *dev_tbl;
   1711	bool dev_found = false;
   1712	int i, j, count;
   1713
   1714	mutex_lock(&mutex);
   1715
   1716	dev_tbl = (engine_type == OTX_CPT_AE_TYPES) ? &ae_devices : &se_devices;
   1717	count = atomic_read(&dev_tbl->count);
   1718	for (i = 0; i < count; i++)
   1719		if (pdev == dev_tbl->desc[i].dev) {
   1720			for (j = i; j < count-1; j++)
   1721				dev_tbl->desc[j] = dev_tbl->desc[j+1];
   1722			dev_found = true;
   1723			break;
   1724		}
   1725
   1726	if (!dev_found) {
   1727		dev_err(&pdev->dev, "%s device not found\n", __func__);
   1728		goto exit;
   1729	}
   1730
   1731	if (engine_type != OTX_CPT_AE_TYPES) {
   1732		if (atomic_dec_and_test(&se_devices.count) &&
   1733		    !is_any_alg_used()) {
   1734			cpt_unregister_algs();
   1735			module_put(mod);
   1736			is_crypto_registered = false;
   1737		}
   1738	} else
   1739		atomic_dec(&ae_devices.count);
   1740exit:
   1741	mutex_unlock(&mutex);
   1742}