cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sec_crypto.c (65092B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2019 HiSilicon Limited. */
      3
      4#include <crypto/aes.h>
      5#include <crypto/aead.h>
      6#include <crypto/algapi.h>
      7#include <crypto/authenc.h>
      8#include <crypto/des.h>
      9#include <crypto/hash.h>
     10#include <crypto/internal/aead.h>
     11#include <crypto/internal/des.h>
     12#include <crypto/sha1.h>
     13#include <crypto/sha2.h>
     14#include <crypto/skcipher.h>
     15#include <crypto/xts.h>
     16#include <linux/crypto.h>
     17#include <linux/dma-mapping.h>
     18#include <linux/idr.h>
     19
     20#include "sec.h"
     21#include "sec_crypto.h"
     22
     23#define SEC_PRIORITY		4001
     24#define SEC_XTS_MIN_KEY_SIZE	(2 * AES_MIN_KEY_SIZE)
     25#define SEC_XTS_MID_KEY_SIZE	(3 * AES_MIN_KEY_SIZE)
     26#define SEC_XTS_MAX_KEY_SIZE	(2 * AES_MAX_KEY_SIZE)
     27#define SEC_DES3_2KEY_SIZE	(2 * DES_KEY_SIZE)
     28#define SEC_DES3_3KEY_SIZE	(3 * DES_KEY_SIZE)
     29
     30/* SEC sqe(bd) bit operational relative MACRO */
     31#define SEC_DE_OFFSET		1
     32#define SEC_CIPHER_OFFSET	4
     33#define SEC_SCENE_OFFSET	3
     34#define SEC_DST_SGL_OFFSET	2
     35#define SEC_SRC_SGL_OFFSET	7
     36#define SEC_CKEY_OFFSET		9
     37#define SEC_CMODE_OFFSET	12
     38#define SEC_AKEY_OFFSET         5
     39#define SEC_AEAD_ALG_OFFSET     11
     40#define SEC_AUTH_OFFSET		6
     41
     42#define SEC_DE_OFFSET_V3		9
     43#define SEC_SCENE_OFFSET_V3	5
     44#define SEC_CKEY_OFFSET_V3	13
     45#define SEC_CTR_CNT_OFFSET	25
     46#define SEC_CTR_CNT_ROLLOVER	2
     47#define SEC_SRC_SGL_OFFSET_V3	11
     48#define SEC_DST_SGL_OFFSET_V3	14
     49#define SEC_CALG_OFFSET_V3	4
     50#define SEC_AKEY_OFFSET_V3	9
     51#define SEC_MAC_OFFSET_V3	4
     52#define SEC_AUTH_ALG_OFFSET_V3	15
     53#define SEC_CIPHER_AUTH_V3	0xbf
     54#define SEC_AUTH_CIPHER_V3	0x40
     55#define SEC_FLAG_OFFSET		7
     56#define SEC_FLAG_MASK		0x0780
     57#define SEC_TYPE_MASK		0x0F
     58#define SEC_DONE_MASK		0x0001
     59#define SEC_ICV_MASK		0x000E
     60#define SEC_SQE_LEN_RATE_MASK	0x3
     61
     62#define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
     63#define SEC_SGL_SGE_NR		128
     64#define SEC_CIPHER_AUTH		0xfe
     65#define SEC_AUTH_CIPHER		0x1
     66#define SEC_MAX_MAC_LEN		64
     67#define SEC_MAX_AAD_LEN		65535
     68#define SEC_MAX_CCM_AAD_LEN	65279
     69#define SEC_TOTAL_MAC_SZ	(SEC_MAX_MAC_LEN * QM_Q_DEPTH)
     70
     71#define SEC_PBUF_SZ			512
     72#define SEC_PBUF_IV_OFFSET		SEC_PBUF_SZ
     73#define SEC_PBUF_MAC_OFFSET		(SEC_PBUF_SZ + SEC_IV_SIZE)
     74#define SEC_PBUF_PKG		(SEC_PBUF_SZ + SEC_IV_SIZE +	\
     75			SEC_MAX_MAC_LEN * 2)
     76#define SEC_PBUF_NUM		(PAGE_SIZE / SEC_PBUF_PKG)
     77#define SEC_PBUF_PAGE_NUM	(QM_Q_DEPTH / SEC_PBUF_NUM)
     78#define SEC_PBUF_LEFT_SZ	(SEC_PBUF_PKG * (QM_Q_DEPTH -	\
     79			SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
     80#define SEC_TOTAL_PBUF_SZ	(PAGE_SIZE * SEC_PBUF_PAGE_NUM +	\
     81			SEC_PBUF_LEFT_SZ)
     82
     83#define SEC_SQE_LEN_RATE	4
     84#define SEC_SQE_CFLAG		2
     85#define SEC_SQE_AEAD_FLAG	3
     86#define SEC_SQE_DONE		0x1
     87#define SEC_ICV_ERR		0x2
     88#define MIN_MAC_LEN		4
     89#define MAC_LEN_MASK		0x1U
     90#define MAX_INPUT_DATA_LEN	0xFFFE00
     91#define BITS_MASK		0xFF
     92#define BYTE_BITS		0x8
     93#define SEC_XTS_NAME_SZ		0x3
     94#define IV_CM_CAL_NUM		2
     95#define IV_CL_MASK		0x7
     96#define IV_CL_MIN		2
     97#define IV_CL_MID		4
     98#define IV_CL_MAX		8
     99#define IV_FLAGS_OFFSET	0x6
    100#define IV_CM_OFFSET		0x3
    101#define IV_LAST_BYTE1		1
    102#define IV_LAST_BYTE2		2
    103#define IV_LAST_BYTE_MASK	0xFF
    104#define IV_CTR_INIT		0x1
    105#define IV_BYTE_OFFSET		0x8
    106
    107/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
    108static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
    109{
    110	if (req->c_req.encrypt)
    111		return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
    112				 ctx->hlf_q_num;
    113
    114	return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
    115				 ctx->hlf_q_num;
    116}
    117
    118static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
    119{
    120	if (req->c_req.encrypt)
    121		atomic_dec(&ctx->enc_qcyclic);
    122	else
    123		atomic_dec(&ctx->dec_qcyclic);
    124}
    125
    126static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
    127{
    128	int req_id;
    129
    130	mutex_lock(&qp_ctx->req_lock);
    131
    132	req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
    133				  0, QM_Q_DEPTH, GFP_ATOMIC);
    134	mutex_unlock(&qp_ctx->req_lock);
    135	if (unlikely(req_id < 0)) {
    136		dev_err(req->ctx->dev, "alloc req id fail!\n");
    137		return req_id;
    138	}
    139
    140	req->qp_ctx = qp_ctx;
    141	qp_ctx->req_list[req_id] = req;
    142
    143	return req_id;
    144}
    145
    146static void sec_free_req_id(struct sec_req *req)
    147{
    148	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
    149	int req_id = req->req_id;
    150
    151	if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
    152		dev_err(req->ctx->dev, "free request id invalid!\n");
    153		return;
    154	}
    155
    156	qp_ctx->req_list[req_id] = NULL;
    157	req->qp_ctx = NULL;
    158
    159	mutex_lock(&qp_ctx->req_lock);
    160	idr_remove(&qp_ctx->req_idr, req_id);
    161	mutex_unlock(&qp_ctx->req_lock);
    162}
    163
    164static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
    165{
    166	struct sec_sqe *bd = resp;
    167
    168	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
    169	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
    170	status->flag = (le16_to_cpu(bd->type2.done_flag) &
    171					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
    172	status->tag = le16_to_cpu(bd->type2.tag);
    173	status->err_type = bd->type2.error_type;
    174
    175	return bd->type_cipher_auth & SEC_TYPE_MASK;
    176}
    177
    178static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
    179{
    180	struct sec_sqe3 *bd3 = resp;
    181
    182	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
    183	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
    184	status->flag = (le16_to_cpu(bd3->done_flag) &
    185					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
    186	status->tag = le64_to_cpu(bd3->tag);
    187	status->err_type = bd3->error_type;
    188
    189	return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
    190}
    191
    192static int sec_cb_status_check(struct sec_req *req,
    193			       struct bd_status *status)
    194{
    195	struct sec_ctx *ctx = req->ctx;
    196
    197	if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
    198		dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
    199				    req->err_type, status->done);
    200		return -EIO;
    201	}
    202
    203	if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
    204		if (unlikely(status->flag != SEC_SQE_CFLAG)) {
    205			dev_err_ratelimited(ctx->dev, "flag[%u]\n",
    206					    status->flag);
    207			return -EIO;
    208		}
    209	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
    210		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
    211			     status->icv == SEC_ICV_ERR)) {
    212			dev_err_ratelimited(ctx->dev,
    213					    "flag[%u], icv[%u]\n",
    214					    status->flag, status->icv);
    215			return -EBADMSG;
    216		}
    217	}
    218
    219	return 0;
    220}
    221
    222static void sec_req_cb(struct hisi_qp *qp, void *resp)
    223{
    224	struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
    225	struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
    226	u8 type_supported = qp_ctx->ctx->type_supported;
    227	struct bd_status status;
    228	struct sec_ctx *ctx;
    229	struct sec_req *req;
    230	int err;
    231	u8 type;
    232
    233	if (type_supported == SEC_BD_TYPE2) {
    234		type = pre_parse_finished_bd(&status, resp);
    235		req = qp_ctx->req_list[status.tag];
    236	} else {
    237		type = pre_parse_finished_bd3(&status, resp);
    238		req = (void *)(uintptr_t)status.tag;
    239	}
    240
    241	if (unlikely(type != type_supported)) {
    242		atomic64_inc(&dfx->err_bd_cnt);
    243		pr_err("err bd type [%u]\n", type);
    244		return;
    245	}
    246
    247	if (unlikely(!req)) {
    248		atomic64_inc(&dfx->invalid_req_cnt);
    249		atomic_inc(&qp->qp_status.used);
    250		return;
    251	}
    252
    253	req->err_type = status.err_type;
    254	ctx = req->ctx;
    255	err = sec_cb_status_check(req, &status);
    256	if (err)
    257		atomic64_inc(&dfx->done_flag_cnt);
    258
    259	atomic64_inc(&dfx->recv_cnt);
    260
    261	ctx->req_op->buf_unmap(ctx, req);
    262
    263	ctx->req_op->callback(ctx, req, err);
    264}
    265
    266static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
    267{
    268	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
    269	int ret;
    270
    271	if (ctx->fake_req_limit <=
    272	    atomic_read(&qp_ctx->qp->qp_status.used) &&
    273	    !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
    274		return -EBUSY;
    275
    276	mutex_lock(&qp_ctx->req_lock);
    277	ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
    278
    279	if (ctx->fake_req_limit <=
    280	    atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
    281		list_add_tail(&req->backlog_head, &qp_ctx->backlog);
    282		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
    283		atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
    284		mutex_unlock(&qp_ctx->req_lock);
    285		return -EBUSY;
    286	}
    287	mutex_unlock(&qp_ctx->req_lock);
    288
    289	if (unlikely(ret == -EBUSY))
    290		return -ENOBUFS;
    291
    292	if (likely(!ret)) {
    293		ret = -EINPROGRESS;
    294		atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
    295	}
    296
    297	return ret;
    298}
    299
    300/* Get DMA memory resources */
    301static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
    302{
    303	int i;
    304
    305	res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
    306					 &res->c_ivin_dma, GFP_KERNEL);
    307	if (!res->c_ivin)
    308		return -ENOMEM;
    309
    310	for (i = 1; i < QM_Q_DEPTH; i++) {
    311		res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
    312		res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
    313	}
    314
    315	return 0;
    316}
    317
    318static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
    319{
    320	if (res->c_ivin)
    321		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
    322				  res->c_ivin, res->c_ivin_dma);
    323}
    324
    325static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
    326{
    327	int i;
    328
    329	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
    330					 &res->a_ivin_dma, GFP_KERNEL);
    331	if (!res->a_ivin)
    332		return -ENOMEM;
    333
    334	for (i = 1; i < QM_Q_DEPTH; i++) {
    335		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
    336		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
    337	}
    338
    339	return 0;
    340}
    341
    342static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
    343{
    344	if (res->a_ivin)
    345		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
    346				  res->a_ivin, res->a_ivin_dma);
    347}
    348
    349static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
    350{
    351	int i;
    352
    353	res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
    354					  &res->out_mac_dma, GFP_KERNEL);
    355	if (!res->out_mac)
    356		return -ENOMEM;
    357
    358	for (i = 1; i < QM_Q_DEPTH; i++) {
    359		res[i].out_mac_dma = res->out_mac_dma +
    360				     i * (SEC_MAX_MAC_LEN << 1);
    361		res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
    362	}
    363
    364	return 0;
    365}
    366
    367static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
    368{
    369	if (res->out_mac)
    370		dma_free_coherent(dev, SEC_TOTAL_MAC_SZ << 1,
    371				  res->out_mac, res->out_mac_dma);
    372}
    373
    374static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
    375{
    376	if (res->pbuf)
    377		dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
    378				  res->pbuf, res->pbuf_dma);
    379}
    380
    381/*
    382 * To improve performance, pbuffer is used for
    383 * small packets (< 512Bytes) as IOMMU translation using.
    384 */
    385static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
    386{
    387	int pbuf_page_offset;
    388	int i, j, k;
    389
    390	res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ,
    391				&res->pbuf_dma, GFP_KERNEL);
    392	if (!res->pbuf)
    393		return -ENOMEM;
    394
    395	/*
    396	 * SEC_PBUF_PKG contains data pbuf, iv and
    397	 * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
    398	 * Every PAGE contains six SEC_PBUF_PKG
    399	 * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
    400	 * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
    401	 * for the SEC_TOTAL_PBUF_SZ
    402	 */
    403	for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
    404		pbuf_page_offset = PAGE_SIZE * i;
    405		for (j = 0; j < SEC_PBUF_NUM; j++) {
    406			k = i * SEC_PBUF_NUM + j;
    407			if (k == QM_Q_DEPTH)
    408				break;
    409			res[k].pbuf = res->pbuf +
    410				j * SEC_PBUF_PKG + pbuf_page_offset;
    411			res[k].pbuf_dma = res->pbuf_dma +
    412				j * SEC_PBUF_PKG + pbuf_page_offset;
    413		}
    414	}
    415
    416	return 0;
    417}
    418
    419static int sec_alg_resource_alloc(struct sec_ctx *ctx,
    420				  struct sec_qp_ctx *qp_ctx)
    421{
    422	struct sec_alg_res *res = qp_ctx->res;
    423	struct device *dev = ctx->dev;
    424	int ret;
    425
    426	ret = sec_alloc_civ_resource(dev, res);
    427	if (ret)
    428		return ret;
    429
    430	if (ctx->alg_type == SEC_AEAD) {
    431		ret = sec_alloc_aiv_resource(dev, res);
    432		if (ret)
    433			goto alloc_aiv_fail;
    434
    435		ret = sec_alloc_mac_resource(dev, res);
    436		if (ret)
    437			goto alloc_mac_fail;
    438	}
    439	if (ctx->pbuf_supported) {
    440		ret = sec_alloc_pbuf_resource(dev, res);
    441		if (ret) {
    442			dev_err(dev, "fail to alloc pbuf dma resource!\n");
    443			goto alloc_pbuf_fail;
    444		}
    445	}
    446
    447	return 0;
    448
    449alloc_pbuf_fail:
    450	if (ctx->alg_type == SEC_AEAD)
    451		sec_free_mac_resource(dev, qp_ctx->res);
    452alloc_mac_fail:
    453	if (ctx->alg_type == SEC_AEAD)
    454		sec_free_aiv_resource(dev, res);
    455alloc_aiv_fail:
    456	sec_free_civ_resource(dev, res);
    457	return ret;
    458}
    459
    460static void sec_alg_resource_free(struct sec_ctx *ctx,
    461				  struct sec_qp_ctx *qp_ctx)
    462{
    463	struct device *dev = ctx->dev;
    464
    465	sec_free_civ_resource(dev, qp_ctx->res);
    466
    467	if (ctx->pbuf_supported)
    468		sec_free_pbuf_resource(dev, qp_ctx->res);
    469	if (ctx->alg_type == SEC_AEAD)
    470		sec_free_mac_resource(dev, qp_ctx->res);
    471}
    472
    473static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx,
    474			     int qp_ctx_id, int alg_type)
    475{
    476	struct device *dev = ctx->dev;
    477	struct sec_qp_ctx *qp_ctx;
    478	struct hisi_qp *qp;
    479	int ret = -ENOMEM;
    480
    481	qp_ctx = &ctx->qp_ctx[qp_ctx_id];
    482	qp = ctx->qps[qp_ctx_id];
    483	qp->req_type = 0;
    484	qp->qp_ctx = qp_ctx;
    485	qp_ctx->qp = qp;
    486	qp_ctx->ctx = ctx;
    487
    488	qp->req_cb = sec_req_cb;
    489
    490	mutex_init(&qp_ctx->req_lock);
    491	idr_init(&qp_ctx->req_idr);
    492	INIT_LIST_HEAD(&qp_ctx->backlog);
    493
    494	qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
    495						     SEC_SGL_SGE_NR);
    496	if (IS_ERR(qp_ctx->c_in_pool)) {
    497		dev_err(dev, "fail to create sgl pool for input!\n");
    498		goto err_destroy_idr;
    499	}
    500
    501	qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
    502						      SEC_SGL_SGE_NR);
    503	if (IS_ERR(qp_ctx->c_out_pool)) {
    504		dev_err(dev, "fail to create sgl pool for output!\n");
    505		goto err_free_c_in_pool;
    506	}
    507
    508	ret = sec_alg_resource_alloc(ctx, qp_ctx);
    509	if (ret)
    510		goto err_free_c_out_pool;
    511
    512	ret = hisi_qm_start_qp(qp, 0);
    513	if (ret < 0)
    514		goto err_queue_free;
    515
    516	return 0;
    517
    518err_queue_free:
    519	sec_alg_resource_free(ctx, qp_ctx);
    520err_free_c_out_pool:
    521	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
    522err_free_c_in_pool:
    523	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
    524err_destroy_idr:
    525	idr_destroy(&qp_ctx->req_idr);
    526	return ret;
    527}
    528
    529static void sec_release_qp_ctx(struct sec_ctx *ctx,
    530			       struct sec_qp_ctx *qp_ctx)
    531{
    532	struct device *dev = ctx->dev;
    533
    534	hisi_qm_stop_qp(qp_ctx->qp);
    535	sec_alg_resource_free(ctx, qp_ctx);
    536
    537	hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
    538	hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
    539
    540	idr_destroy(&qp_ctx->req_idr);
    541}
    542
    543static int sec_ctx_base_init(struct sec_ctx *ctx)
    544{
    545	struct sec_dev *sec;
    546	int i, ret;
    547
    548	ctx->qps = sec_create_qps();
    549	if (!ctx->qps) {
    550		pr_err("Can not create sec qps!\n");
    551		return -ENODEV;
    552	}
    553
    554	sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
    555	ctx->sec = sec;
    556	ctx->dev = &sec->qm.pdev->dev;
    557	ctx->hlf_q_num = sec->ctx_q_num >> 1;
    558
    559	ctx->pbuf_supported = ctx->sec->iommu_used;
    560
    561	/* Half of queue depth is taken as fake requests limit in the queue. */
    562	ctx->fake_req_limit = QM_Q_DEPTH >> 1;
    563	ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
    564			      GFP_KERNEL);
    565	if (!ctx->qp_ctx) {
    566		ret = -ENOMEM;
    567		goto err_destroy_qps;
    568	}
    569
    570	for (i = 0; i < sec->ctx_q_num; i++) {
    571		ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0);
    572		if (ret)
    573			goto err_sec_release_qp_ctx;
    574	}
    575
    576	return 0;
    577
    578err_sec_release_qp_ctx:
    579	for (i = i - 1; i >= 0; i--)
    580		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
    581	kfree(ctx->qp_ctx);
    582err_destroy_qps:
    583	sec_destroy_qps(ctx->qps, sec->ctx_q_num);
    584	return ret;
    585}
    586
    587static void sec_ctx_base_uninit(struct sec_ctx *ctx)
    588{
    589	int i;
    590
    591	for (i = 0; i < ctx->sec->ctx_q_num; i++)
    592		sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
    593
    594	sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
    595	kfree(ctx->qp_ctx);
    596}
    597
    598static int sec_cipher_init(struct sec_ctx *ctx)
    599{
    600	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
    601
    602	c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
    603					  &c_ctx->c_key_dma, GFP_KERNEL);
    604	if (!c_ctx->c_key)
    605		return -ENOMEM;
    606
    607	return 0;
    608}
    609
    610static void sec_cipher_uninit(struct sec_ctx *ctx)
    611{
    612	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
    613
    614	memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
    615	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
    616			  c_ctx->c_key, c_ctx->c_key_dma);
    617}
    618
    619static int sec_auth_init(struct sec_ctx *ctx)
    620{
    621	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
    622
    623	a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
    624					  &a_ctx->a_key_dma, GFP_KERNEL);
    625	if (!a_ctx->a_key)
    626		return -ENOMEM;
    627
    628	return 0;
    629}
    630
    631static void sec_auth_uninit(struct sec_ctx *ctx)
    632{
    633	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
    634
    635	memzero_explicit(a_ctx->a_key, SEC_MAX_KEY_SIZE);
    636	dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
    637			  a_ctx->a_key, a_ctx->a_key_dma);
    638}
    639
    640static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
    641{
    642	const char *alg = crypto_tfm_alg_name(&tfm->base);
    643	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
    644	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
    645
    646	c_ctx->fallback = false;
    647
    648	/* Currently, only XTS mode need fallback tfm when using 192bit key */
    649	if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
    650		return 0;
    651
    652	c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
    653						  CRYPTO_ALG_NEED_FALLBACK);
    654	if (IS_ERR(c_ctx->fbtfm)) {
    655		pr_err("failed to alloc xts mode fallback tfm!\n");
    656		return PTR_ERR(c_ctx->fbtfm);
    657	}
    658
    659	return 0;
    660}
    661
    662static int sec_skcipher_init(struct crypto_skcipher *tfm)
    663{
    664	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
    665	int ret;
    666
    667	ctx->alg_type = SEC_SKCIPHER;
    668	crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
    669	ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
    670	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
    671		pr_err("get error skcipher iv size!\n");
    672		return -EINVAL;
    673	}
    674
    675	ret = sec_ctx_base_init(ctx);
    676	if (ret)
    677		return ret;
    678
    679	ret = sec_cipher_init(ctx);
    680	if (ret)
    681		goto err_cipher_init;
    682
    683	ret = sec_skcipher_fbtfm_init(tfm);
    684	if (ret)
    685		goto err_fbtfm_init;
    686
    687	return 0;
    688
    689err_fbtfm_init:
    690	sec_cipher_uninit(ctx);
    691err_cipher_init:
    692	sec_ctx_base_uninit(ctx);
    693	return ret;
    694}
    695
    696static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
    697{
    698	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
    699
    700	if (ctx->c_ctx.fbtfm)
    701		crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
    702
    703	sec_cipher_uninit(ctx);
    704	sec_ctx_base_uninit(ctx);
    705}
    706
    707static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key,
    708				    const u32 keylen,
    709				    const enum sec_cmode c_mode)
    710{
    711	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
    712	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
    713	int ret;
    714
    715	ret = verify_skcipher_des3_key(tfm, key);
    716	if (ret)
    717		return ret;
    718
    719	switch (keylen) {
    720	case SEC_DES3_2KEY_SIZE:
    721		c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
    722		break;
    723	case SEC_DES3_3KEY_SIZE:
    724		c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
    725		break;
    726	default:
    727		return -EINVAL;
    728	}
    729
    730	return 0;
    731}
    732
    733static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
    734				       const u32 keylen,
    735				       const enum sec_cmode c_mode)
    736{
    737	if (c_mode == SEC_CMODE_XTS) {
    738		switch (keylen) {
    739		case SEC_XTS_MIN_KEY_SIZE:
    740			c_ctx->c_key_len = SEC_CKEY_128BIT;
    741			break;
    742		case SEC_XTS_MID_KEY_SIZE:
    743			c_ctx->fallback = true;
    744			break;
    745		case SEC_XTS_MAX_KEY_SIZE:
    746			c_ctx->c_key_len = SEC_CKEY_256BIT;
    747			break;
    748		default:
    749			pr_err("hisi_sec2: xts mode key error!\n");
    750			return -EINVAL;
    751		}
    752	} else {
    753		if (c_ctx->c_alg == SEC_CALG_SM4 &&
    754		    keylen != AES_KEYSIZE_128) {
    755			pr_err("hisi_sec2: sm4 key error!\n");
    756			return -EINVAL;
    757		} else {
    758			switch (keylen) {
    759			case AES_KEYSIZE_128:
    760				c_ctx->c_key_len = SEC_CKEY_128BIT;
    761				break;
    762			case AES_KEYSIZE_192:
    763				c_ctx->c_key_len = SEC_CKEY_192BIT;
    764				break;
    765			case AES_KEYSIZE_256:
    766				c_ctx->c_key_len = SEC_CKEY_256BIT;
    767				break;
    768			default:
    769				pr_err("hisi_sec2: aes key error!\n");
    770				return -EINVAL;
    771			}
    772		}
    773	}
    774
    775	return 0;
    776}
    777
    778static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
    779			       const u32 keylen, const enum sec_calg c_alg,
    780			       const enum sec_cmode c_mode)
    781{
    782	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
    783	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
    784	struct device *dev = ctx->dev;
    785	int ret;
    786
    787	if (c_mode == SEC_CMODE_XTS) {
    788		ret = xts_verify_key(tfm, key, keylen);
    789		if (ret) {
    790			dev_err(dev, "xts mode key err!\n");
    791			return ret;
    792		}
    793	}
    794
    795	c_ctx->c_alg  = c_alg;
    796	c_ctx->c_mode = c_mode;
    797
    798	switch (c_alg) {
    799	case SEC_CALG_3DES:
    800		ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode);
    801		break;
    802	case SEC_CALG_AES:
    803	case SEC_CALG_SM4:
    804		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
    805		break;
    806	default:
    807		return -EINVAL;
    808	}
    809
    810	if (ret) {
    811		dev_err(dev, "set sec key err!\n");
    812		return ret;
    813	}
    814
    815	memcpy(c_ctx->c_key, key, keylen);
    816	if (c_ctx->fallback && c_ctx->fbtfm) {
    817		ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
    818		if (ret) {
    819			dev_err(dev, "failed to set fallback skcipher key!\n");
    820			return ret;
    821		}
    822	}
    823	return 0;
    824}
    825
    826#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode)			\
    827static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
    828	u32 keylen)							\
    829{									\
    830	return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode);	\
    831}
    832
    833GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
    834GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
    835GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
    836GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
    837GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
    838GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
    839GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
    840GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
    841GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
    842GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
    843GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
    844GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
    845GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
    846
    847static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
    848			struct scatterlist *src)
    849{
    850	struct sec_aead_req *a_req = &req->aead_req;
    851	struct aead_request *aead_req = a_req->aead_req;
    852	struct sec_cipher_req *c_req = &req->c_req;
    853	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
    854	struct device *dev = ctx->dev;
    855	int copy_size, pbuf_length;
    856	int req_id = req->req_id;
    857	struct crypto_aead *tfm;
    858	size_t authsize;
    859	u8 *mac_offset;
    860
    861	if (ctx->alg_type == SEC_AEAD)
    862		copy_size = aead_req->cryptlen + aead_req->assoclen;
    863	else
    864		copy_size = c_req->c_len;
    865
    866	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
    867			qp_ctx->res[req_id].pbuf, copy_size);
    868	if (unlikely(pbuf_length != copy_size)) {
    869		dev_err(dev, "copy src data to pbuf error!\n");
    870		return -EINVAL;
    871	}
    872	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
    873		tfm = crypto_aead_reqtfm(aead_req);
    874		authsize = crypto_aead_authsize(tfm);
    875		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
    876		memcpy(a_req->out_mac, mac_offset, authsize);
    877	}
    878
    879	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
    880	c_req->c_out_dma = req->in_dma;
    881
    882	return 0;
    883}
    884
    885static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
    886			struct scatterlist *dst)
    887{
    888	struct aead_request *aead_req = req->aead_req.aead_req;
    889	struct sec_cipher_req *c_req = &req->c_req;
    890	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
    891	int copy_size, pbuf_length;
    892	int req_id = req->req_id;
    893
    894	if (ctx->alg_type == SEC_AEAD)
    895		copy_size = c_req->c_len + aead_req->assoclen;
    896	else
    897		copy_size = c_req->c_len;
    898
    899	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
    900			qp_ctx->res[req_id].pbuf, copy_size);
    901	if (unlikely(pbuf_length != copy_size))
    902		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
    903}
    904
    905static int sec_aead_mac_init(struct sec_aead_req *req)
    906{
    907	struct aead_request *aead_req = req->aead_req;
    908	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
    909	size_t authsize = crypto_aead_authsize(tfm);
    910	u8 *mac_out = req->out_mac;
    911	struct scatterlist *sgl = aead_req->src;
    912	size_t copy_size;
    913	off_t skip_size;
    914
    915	/* Copy input mac */
    916	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
    917	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
    918				       authsize, skip_size);
    919	if (unlikely(copy_size != authsize))
    920		return -EINVAL;
    921
    922	return 0;
    923}
    924
    925static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
    926			  struct scatterlist *src, struct scatterlist *dst)
    927{
    928	struct sec_cipher_req *c_req = &req->c_req;
    929	struct sec_aead_req *a_req = &req->aead_req;
    930	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
    931	struct sec_alg_res *res = &qp_ctx->res[req->req_id];
    932	struct device *dev = ctx->dev;
    933	int ret;
    934
    935	if (req->use_pbuf) {
    936		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
    937		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
    938		if (ctx->alg_type == SEC_AEAD) {
    939			a_req->a_ivin = res->a_ivin;
    940			a_req->a_ivin_dma = res->a_ivin_dma;
    941			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
    942			a_req->out_mac_dma = res->pbuf_dma +
    943					SEC_PBUF_MAC_OFFSET;
    944		}
    945		ret = sec_cipher_pbuf_map(ctx, req, src);
    946
    947		return ret;
    948	}
    949	c_req->c_ivin = res->c_ivin;
    950	c_req->c_ivin_dma = res->c_ivin_dma;
    951	if (ctx->alg_type == SEC_AEAD) {
    952		a_req->a_ivin = res->a_ivin;
    953		a_req->a_ivin_dma = res->a_ivin_dma;
    954		a_req->out_mac = res->out_mac;
    955		a_req->out_mac_dma = res->out_mac_dma;
    956	}
    957
    958	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
    959						qp_ctx->c_in_pool,
    960						req->req_id,
    961						&req->in_dma);
    962	if (IS_ERR(req->in)) {
    963		dev_err(dev, "fail to dma map input sgl buffers!\n");
    964		return PTR_ERR(req->in);
    965	}
    966
    967	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
    968		ret = sec_aead_mac_init(a_req);
    969		if (unlikely(ret)) {
    970			dev_err(dev, "fail to init mac data for ICV!\n");
    971			return ret;
    972		}
    973	}
    974
    975	if (dst == src) {
    976		c_req->c_out = req->in;
    977		c_req->c_out_dma = req->in_dma;
    978	} else {
    979		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
    980							     qp_ctx->c_out_pool,
    981							     req->req_id,
    982							     &c_req->c_out_dma);
    983
    984		if (IS_ERR(c_req->c_out)) {
    985			dev_err(dev, "fail to dma map output sgl buffers!\n");
    986			hisi_acc_sg_buf_unmap(dev, src, req->in);
    987			return PTR_ERR(c_req->c_out);
    988		}
    989	}
    990
    991	return 0;
    992}
    993
    994static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
    995			     struct scatterlist *src, struct scatterlist *dst)
    996{
    997	struct sec_cipher_req *c_req = &req->c_req;
    998	struct device *dev = ctx->dev;
    999
   1000	if (req->use_pbuf) {
   1001		sec_cipher_pbuf_unmap(ctx, req, dst);
   1002	} else {
   1003		if (dst != src)
   1004			hisi_acc_sg_buf_unmap(dev, src, req->in);
   1005
   1006		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
   1007	}
   1008}
   1009
   1010static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
   1011{
   1012	struct skcipher_request *sq = req->c_req.sk_req;
   1013
   1014	return sec_cipher_map(ctx, req, sq->src, sq->dst);
   1015}
   1016
   1017static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
   1018{
   1019	struct skcipher_request *sq = req->c_req.sk_req;
   1020
   1021	sec_cipher_unmap(ctx, req, sq->src, sq->dst);
   1022}
   1023
   1024static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
   1025				struct crypto_authenc_keys *keys)
   1026{
   1027	switch (keys->enckeylen) {
   1028	case AES_KEYSIZE_128:
   1029		c_ctx->c_key_len = SEC_CKEY_128BIT;
   1030		break;
   1031	case AES_KEYSIZE_192:
   1032		c_ctx->c_key_len = SEC_CKEY_192BIT;
   1033		break;
   1034	case AES_KEYSIZE_256:
   1035		c_ctx->c_key_len = SEC_CKEY_256BIT;
   1036		break;
   1037	default:
   1038		pr_err("hisi_sec2: aead aes key error!\n");
   1039		return -EINVAL;
   1040	}
   1041	memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
   1042
   1043	return 0;
   1044}
   1045
   1046static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
   1047				 struct crypto_authenc_keys *keys)
   1048{
   1049	struct crypto_shash *hash_tfm = ctx->hash_tfm;
   1050	int blocksize, digestsize, ret;
   1051
   1052	if (!keys->authkeylen) {
   1053		pr_err("hisi_sec2: aead auth key error!\n");
   1054		return -EINVAL;
   1055	}
   1056
   1057	blocksize = crypto_shash_blocksize(hash_tfm);
   1058	digestsize = crypto_shash_digestsize(hash_tfm);
   1059	if (keys->authkeylen > blocksize) {
   1060		ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
   1061					      keys->authkeylen, ctx->a_key);
   1062		if (ret) {
   1063			pr_err("hisi_sec2: aead auth digest error!\n");
   1064			return -EINVAL;
   1065		}
   1066		ctx->a_key_len = digestsize;
   1067	} else {
   1068		memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
   1069		ctx->a_key_len = keys->authkeylen;
   1070	}
   1071
   1072	return 0;
   1073}
   1074
   1075static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
   1076{
   1077	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
   1078	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
   1079	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
   1080
   1081	if (unlikely(a_ctx->fallback_aead_tfm))
   1082		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
   1083
   1084	return 0;
   1085}
   1086
   1087static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
   1088				    struct crypto_aead *tfm, const u8 *key,
   1089				    unsigned int keylen)
   1090{
   1091	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
   1092	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
   1093			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
   1094	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
   1095}
   1096
   1097static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
   1098			   const u32 keylen, const enum sec_hash_alg a_alg,
   1099			   const enum sec_calg c_alg,
   1100			   const enum sec_mac_len mac_len,
   1101			   const enum sec_cmode c_mode)
   1102{
   1103	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1104	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
   1105	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
   1106	struct device *dev = ctx->dev;
   1107	struct crypto_authenc_keys keys;
   1108	int ret;
   1109
   1110	ctx->a_ctx.a_alg = a_alg;
   1111	ctx->c_ctx.c_alg = c_alg;
   1112	ctx->a_ctx.mac_len = mac_len;
   1113	c_ctx->c_mode = c_mode;
   1114
   1115	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
   1116		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
   1117		if (ret) {
   1118			dev_err(dev, "set sec aes ccm cipher key err!\n");
   1119			return ret;
   1120		}
   1121		memcpy(c_ctx->c_key, key, keylen);
   1122
   1123		if (unlikely(a_ctx->fallback_aead_tfm)) {
   1124			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
   1125			if (ret)
   1126				return ret;
   1127		}
   1128
   1129		return 0;
   1130	}
   1131
   1132	if (crypto_authenc_extractkeys(&keys, key, keylen))
   1133		goto bad_key;
   1134
   1135	ret = sec_aead_aes_set_key(c_ctx, &keys);
   1136	if (ret) {
   1137		dev_err(dev, "set sec cipher key err!\n");
   1138		goto bad_key;
   1139	}
   1140
   1141	ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
   1142	if (ret) {
   1143		dev_err(dev, "set sec auth key err!\n");
   1144		goto bad_key;
   1145	}
   1146
   1147	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
   1148	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
   1149		dev_err(dev, "MAC or AUTH key length error!\n");
   1150		goto bad_key;
   1151	}
   1152
   1153	return 0;
   1154
   1155bad_key:
   1156	memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
   1157	return -EINVAL;
   1158}
   1159
   1160
   1161#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
   1162static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
   1163	u32 keylen)							\
   1164{									\
   1165	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
   1166}
   1167
   1168GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
   1169			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
   1170GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
   1171			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
   1172GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
   1173			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
   1174GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
   1175			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
   1176GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
   1177			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
   1178GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
   1179			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
   1180GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
   1181			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
   1182
   1183static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
   1184{
   1185	struct aead_request *aq = req->aead_req.aead_req;
   1186
   1187	return sec_cipher_map(ctx, req, aq->src, aq->dst);
   1188}
   1189
   1190static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
   1191{
   1192	struct aead_request *aq = req->aead_req.aead_req;
   1193
   1194	sec_cipher_unmap(ctx, req, aq->src, aq->dst);
   1195}
   1196
   1197static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
   1198{
   1199	int ret;
   1200
   1201	ret = ctx->req_op->buf_map(ctx, req);
   1202	if (unlikely(ret))
   1203		return ret;
   1204
   1205	ctx->req_op->do_transfer(ctx, req);
   1206
   1207	ret = ctx->req_op->bd_fill(ctx, req);
   1208	if (unlikely(ret))
   1209		goto unmap_req_buf;
   1210
   1211	return ret;
   1212
   1213unmap_req_buf:
   1214	ctx->req_op->buf_unmap(ctx, req);
   1215	return ret;
   1216}
   1217
   1218static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
   1219{
   1220	ctx->req_op->buf_unmap(ctx, req);
   1221}
   1222
   1223static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
   1224{
   1225	struct skcipher_request *sk_req = req->c_req.sk_req;
   1226	struct sec_cipher_req *c_req = &req->c_req;
   1227
   1228	memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
   1229}
   1230
   1231static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
   1232{
   1233	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
   1234	struct sec_cipher_req *c_req = &req->c_req;
   1235	struct sec_sqe *sec_sqe = &req->sec_sqe;
   1236	u8 scene, sa_type, da_type;
   1237	u8 bd_type, cipher;
   1238	u8 de = 0;
   1239
   1240	memset(sec_sqe, 0, sizeof(struct sec_sqe));
   1241
   1242	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
   1243	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
   1244	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
   1245	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
   1246
   1247	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
   1248						SEC_CMODE_OFFSET);
   1249	sec_sqe->type2.c_alg = c_ctx->c_alg;
   1250	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
   1251						SEC_CKEY_OFFSET);
   1252
   1253	bd_type = SEC_BD_TYPE2;
   1254	if (c_req->encrypt)
   1255		cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
   1256	else
   1257		cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
   1258	sec_sqe->type_cipher_auth = bd_type | cipher;
   1259
   1260	/* Set destination and source address type */
   1261	if (req->use_pbuf) {
   1262		sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
   1263		da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
   1264	} else {
   1265		sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
   1266		da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
   1267	}
   1268
   1269	sec_sqe->sdm_addr_type |= da_type;
   1270	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
   1271	if (req->in_dma != c_req->c_out_dma)
   1272		de = 0x1 << SEC_DE_OFFSET;
   1273
   1274	sec_sqe->sds_sa_type = (de | scene | sa_type);
   1275
   1276	sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
   1277	sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
   1278
   1279	return 0;
   1280}
   1281
   1282static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
   1283{
   1284	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
   1285	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
   1286	struct sec_cipher_req *c_req = &req->c_req;
   1287	u32 bd_param = 0;
   1288	u16 cipher;
   1289
   1290	memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
   1291
   1292	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
   1293	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
   1294	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
   1295	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
   1296
   1297	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
   1298						c_ctx->c_mode;
   1299	sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
   1300						SEC_CKEY_OFFSET_V3);
   1301
   1302	if (c_req->encrypt)
   1303		cipher = SEC_CIPHER_ENC;
   1304	else
   1305		cipher = SEC_CIPHER_DEC;
   1306	sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
   1307
   1308	/* Set the CTR counter mode is 128bit rollover */
   1309	sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
   1310					SEC_CTR_CNT_OFFSET);
   1311
   1312	if (req->use_pbuf) {
   1313		bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
   1314		bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
   1315	} else {
   1316		bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
   1317		bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
   1318	}
   1319
   1320	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
   1321	if (req->in_dma != c_req->c_out_dma)
   1322		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
   1323
   1324	bd_param |= SEC_BD_TYPE3;
   1325	sec_sqe3->bd_param = cpu_to_le32(bd_param);
   1326
   1327	sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
   1328	sec_sqe3->tag = cpu_to_le64(req);
   1329
   1330	return 0;
   1331}
   1332
   1333/* increment counter (128-bit int) */
   1334static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
   1335{
   1336	do {
   1337		--bits;
   1338		nums += counter[bits];
   1339		counter[bits] = nums & BITS_MASK;
   1340		nums >>= BYTE_BITS;
   1341	} while (bits && nums);
   1342}
   1343
   1344static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
   1345{
   1346	struct aead_request *aead_req = req->aead_req.aead_req;
   1347	struct skcipher_request *sk_req = req->c_req.sk_req;
   1348	u32 iv_size = req->ctx->c_ctx.ivsize;
   1349	struct scatterlist *sgl;
   1350	unsigned int cryptlen;
   1351	size_t sz;
   1352	u8 *iv;
   1353
   1354	if (req->c_req.encrypt)
   1355		sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
   1356	else
   1357		sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
   1358
   1359	if (alg_type == SEC_SKCIPHER) {
   1360		iv = sk_req->iv;
   1361		cryptlen = sk_req->cryptlen;
   1362	} else {
   1363		iv = aead_req->iv;
   1364		cryptlen = aead_req->cryptlen;
   1365	}
   1366
   1367	if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
   1368		sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
   1369					cryptlen - iv_size);
   1370		if (unlikely(sz != iv_size))
   1371			dev_err(req->ctx->dev, "copy output iv error!\n");
   1372	} else {
   1373		sz = cryptlen / iv_size;
   1374		if (cryptlen % iv_size)
   1375			sz += 1;
   1376		ctr_iv_inc(iv, iv_size, sz);
   1377	}
   1378}
   1379
   1380static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
   1381				struct sec_qp_ctx *qp_ctx)
   1382{
   1383	struct sec_req *backlog_req = NULL;
   1384
   1385	mutex_lock(&qp_ctx->req_lock);
   1386	if (ctx->fake_req_limit >=
   1387	    atomic_read(&qp_ctx->qp->qp_status.used) &&
   1388	    !list_empty(&qp_ctx->backlog)) {
   1389		backlog_req = list_first_entry(&qp_ctx->backlog,
   1390				typeof(*backlog_req), backlog_head);
   1391		list_del(&backlog_req->backlog_head);
   1392	}
   1393	mutex_unlock(&qp_ctx->req_lock);
   1394
   1395	return backlog_req;
   1396}
   1397
   1398static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
   1399				  int err)
   1400{
   1401	struct skcipher_request *sk_req = req->c_req.sk_req;
   1402	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
   1403	struct skcipher_request *backlog_sk_req;
   1404	struct sec_req *backlog_req;
   1405
   1406	sec_free_req_id(req);
   1407
   1408	/* IV output at encrypto of CBC/CTR mode */
   1409	if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
   1410	    ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
   1411		sec_update_iv(req, SEC_SKCIPHER);
   1412
   1413	while (1) {
   1414		backlog_req = sec_back_req_clear(ctx, qp_ctx);
   1415		if (!backlog_req)
   1416			break;
   1417
   1418		backlog_sk_req = backlog_req->c_req.sk_req;
   1419		backlog_sk_req->base.complete(&backlog_sk_req->base,
   1420						-EINPROGRESS);
   1421		atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
   1422	}
   1423
   1424	sk_req->base.complete(&sk_req->base, err);
   1425}
   1426
   1427static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
   1428{
   1429	struct aead_request *aead_req = req->aead_req.aead_req;
   1430	struct sec_cipher_req *c_req = &req->c_req;
   1431	struct sec_aead_req *a_req = &req->aead_req;
   1432	size_t authsize = ctx->a_ctx.mac_len;
   1433	u32 data_size = aead_req->cryptlen;
   1434	u8 flage = 0;
   1435	u8 cm, cl;
   1436
   1437	/* the specification has been checked in aead_iv_demension_check() */
   1438	cl = c_req->c_ivin[0] + 1;
   1439	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
   1440	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
   1441	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
   1442
   1443	/* the last 3bit is L' */
   1444	flage |= c_req->c_ivin[0] & IV_CL_MASK;
   1445
   1446	/* the M' is bit3~bit5, the Flags is bit6 */
   1447	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
   1448	flage |= cm << IV_CM_OFFSET;
   1449	if (aead_req->assoclen)
   1450		flage |= 0x01 << IV_FLAGS_OFFSET;
   1451
   1452	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
   1453	a_req->a_ivin[0] = flage;
   1454
   1455	/*
   1456	 * the last 32bit is counter's initial number,
   1457	 * but the nonce uses the first 16bit
   1458	 * the tail 16bit fill with the cipher length
   1459	 */
   1460	if (!c_req->encrypt)
   1461		data_size = aead_req->cryptlen - authsize;
   1462
   1463	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
   1464			data_size & IV_LAST_BYTE_MASK;
   1465	data_size >>= IV_BYTE_OFFSET;
   1466	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
   1467			data_size & IV_LAST_BYTE_MASK;
   1468}
   1469
   1470static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
   1471{
   1472	struct aead_request *aead_req = req->aead_req.aead_req;
   1473	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
   1474	size_t authsize = crypto_aead_authsize(tfm);
   1475	struct sec_cipher_req *c_req = &req->c_req;
   1476	struct sec_aead_req *a_req = &req->aead_req;
   1477
   1478	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
   1479
   1480	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
   1481		/*
   1482		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
   1483		 * the  counter must set to 0x01
   1484		 */
   1485		ctx->a_ctx.mac_len = authsize;
   1486		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
   1487		set_aead_auth_iv(ctx, req);
   1488	}
   1489
   1490	/* GCM 12Byte Cipher_IV == Auth_IV */
   1491	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
   1492		ctx->a_ctx.mac_len = authsize;
   1493		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
   1494	}
   1495}
   1496
   1497static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
   1498				 struct sec_req *req, struct sec_sqe *sec_sqe)
   1499{
   1500	struct sec_aead_req *a_req = &req->aead_req;
   1501	struct aead_request *aq = a_req->aead_req;
   1502
   1503	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
   1504	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
   1505
   1506	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
   1507	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
   1508	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
   1509	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
   1510
   1511	if (dir)
   1512		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
   1513	else
   1514		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
   1515
   1516	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
   1517	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
   1518	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
   1519
   1520	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
   1521}
   1522
   1523static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
   1524				    struct sec_req *req, struct sec_sqe3 *sqe3)
   1525{
   1526	struct sec_aead_req *a_req = &req->aead_req;
   1527	struct aead_request *aq = a_req->aead_req;
   1528
   1529	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
   1530	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
   1531
   1532	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
   1533	sqe3->a_key_addr = sqe3->c_key_addr;
   1534	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
   1535	sqe3->auth_mac_key |= SEC_NO_AUTH;
   1536
   1537	if (dir)
   1538		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
   1539	else
   1540		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
   1541
   1542	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
   1543	sqe3->auth_src_offset = cpu_to_le16(0x0);
   1544	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
   1545	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
   1546}
   1547
   1548static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
   1549			       struct sec_req *req, struct sec_sqe *sec_sqe)
   1550{
   1551	struct sec_aead_req *a_req = &req->aead_req;
   1552	struct sec_cipher_req *c_req = &req->c_req;
   1553	struct aead_request *aq = a_req->aead_req;
   1554
   1555	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
   1556
   1557	sec_sqe->type2.mac_key_alg =
   1558			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);
   1559
   1560	sec_sqe->type2.mac_key_alg |=
   1561			cpu_to_le32((u32)((ctx->a_key_len) /
   1562			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
   1563
   1564	sec_sqe->type2.mac_key_alg |=
   1565			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
   1566
   1567	if (dir) {
   1568		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
   1569		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
   1570	} else {
   1571		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
   1572		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
   1573	}
   1574	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
   1575
   1576	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
   1577
   1578	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
   1579}
   1580
   1581static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
   1582{
   1583	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
   1584	struct sec_sqe *sec_sqe = &req->sec_sqe;
   1585	int ret;
   1586
   1587	ret = sec_skcipher_bd_fill(ctx, req);
   1588	if (unlikely(ret)) {
   1589		dev_err(ctx->dev, "skcipher bd fill is error!\n");
   1590		return ret;
   1591	}
   1592
   1593	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
   1594	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
   1595		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
   1596	else
   1597		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
   1598
   1599	return 0;
   1600}
   1601
   1602static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
   1603				   struct sec_req *req, struct sec_sqe3 *sqe3)
   1604{
   1605	struct sec_aead_req *a_req = &req->aead_req;
   1606	struct sec_cipher_req *c_req = &req->c_req;
   1607	struct aead_request *aq = a_req->aead_req;
   1608
   1609	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
   1610
   1611	sqe3->auth_mac_key |=
   1612			cpu_to_le32((u32)(ctx->mac_len /
   1613			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
   1614
   1615	sqe3->auth_mac_key |=
   1616			cpu_to_le32((u32)(ctx->a_key_len /
   1617			SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
   1618
   1619	sqe3->auth_mac_key |=
   1620			cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
   1621
   1622	if (dir) {
   1623		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
   1624		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
   1625	} else {
   1626		sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
   1627		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
   1628	}
   1629	sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
   1630
   1631	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
   1632
   1633	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
   1634}
   1635
   1636static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
   1637{
   1638	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
   1639	struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
   1640	int ret;
   1641
   1642	ret = sec_skcipher_bd_fill_v3(ctx, req);
   1643	if (unlikely(ret)) {
   1644		dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
   1645		return ret;
   1646	}
   1647
   1648	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
   1649	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
   1650		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
   1651					req, sec_sqe3);
   1652	else
   1653		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
   1654				       req, sec_sqe3);
   1655
   1656	return 0;
   1657}
   1658
   1659static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
   1660{
   1661	struct aead_request *a_req = req->aead_req.aead_req;
   1662	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
   1663	struct sec_aead_req *aead_req = &req->aead_req;
   1664	struct sec_cipher_req *c_req = &req->c_req;
   1665	size_t authsize = crypto_aead_authsize(tfm);
   1666	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
   1667	struct aead_request *backlog_aead_req;
   1668	struct sec_req *backlog_req;
   1669	size_t sz;
   1670
   1671	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
   1672		sec_update_iv(req, SEC_AEAD);
   1673
   1674	/* Copy output mac */
   1675	if (!err && c_req->encrypt) {
   1676		struct scatterlist *sgl = a_req->dst;
   1677
   1678		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
   1679					  aead_req->out_mac,
   1680					  authsize, a_req->cryptlen +
   1681					  a_req->assoclen);
   1682
   1683		if (unlikely(sz != authsize)) {
   1684			dev_err(c->dev, "copy out mac err!\n");
   1685			err = -EINVAL;
   1686		}
   1687	}
   1688
   1689	sec_free_req_id(req);
   1690
   1691	while (1) {
   1692		backlog_req = sec_back_req_clear(c, qp_ctx);
   1693		if (!backlog_req)
   1694			break;
   1695
   1696		backlog_aead_req = backlog_req->aead_req.aead_req;
   1697		backlog_aead_req->base.complete(&backlog_aead_req->base,
   1698						-EINPROGRESS);
   1699		atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
   1700	}
   1701
   1702	a_req->base.complete(&a_req->base, err);
   1703}
   1704
   1705static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
   1706{
   1707	sec_free_req_id(req);
   1708	sec_free_queue_id(ctx, req);
   1709}
   1710
   1711static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
   1712{
   1713	struct sec_qp_ctx *qp_ctx;
   1714	int queue_id;
   1715
   1716	/* To load balance */
   1717	queue_id = sec_alloc_queue_id(ctx, req);
   1718	qp_ctx = &ctx->qp_ctx[queue_id];
   1719
   1720	req->req_id = sec_alloc_req_id(req, qp_ctx);
   1721	if (unlikely(req->req_id < 0)) {
   1722		sec_free_queue_id(ctx, req);
   1723		return req->req_id;
   1724	}
   1725
   1726	return 0;
   1727}
   1728
   1729static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
   1730{
   1731	struct sec_cipher_req *c_req = &req->c_req;
   1732	int ret;
   1733
   1734	ret = sec_request_init(ctx, req);
   1735	if (unlikely(ret))
   1736		return ret;
   1737
   1738	ret = sec_request_transfer(ctx, req);
   1739	if (unlikely(ret))
   1740		goto err_uninit_req;
   1741
   1742	/* Output IV as decrypto */
   1743	if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
   1744	    ctx->c_ctx.c_mode == SEC_CMODE_CTR))
   1745		sec_update_iv(req, ctx->alg_type);
   1746
   1747	ret = ctx->req_op->bd_send(ctx, req);
   1748	if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
   1749		(ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
   1750		dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
   1751		goto err_send_req;
   1752	}
   1753
   1754	return ret;
   1755
   1756err_send_req:
   1757	/* As failing, restore the IV from user */
   1758	if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
   1759		if (ctx->alg_type == SEC_SKCIPHER)
   1760			memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
   1761			       ctx->c_ctx.ivsize);
   1762		else
   1763			memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
   1764			       ctx->c_ctx.ivsize);
   1765	}
   1766
   1767	sec_request_untransfer(ctx, req);
   1768err_uninit_req:
   1769	sec_request_uninit(ctx, req);
   1770	return ret;
   1771}
   1772
   1773static const struct sec_req_op sec_skcipher_req_ops = {
   1774	.buf_map	= sec_skcipher_sgl_map,
   1775	.buf_unmap	= sec_skcipher_sgl_unmap,
   1776	.do_transfer	= sec_skcipher_copy_iv,
   1777	.bd_fill	= sec_skcipher_bd_fill,
   1778	.bd_send	= sec_bd_send,
   1779	.callback	= sec_skcipher_callback,
   1780	.process	= sec_process,
   1781};
   1782
   1783static const struct sec_req_op sec_aead_req_ops = {
   1784	.buf_map	= sec_aead_sgl_map,
   1785	.buf_unmap	= sec_aead_sgl_unmap,
   1786	.do_transfer	= sec_aead_set_iv,
   1787	.bd_fill	= sec_aead_bd_fill,
   1788	.bd_send	= sec_bd_send,
   1789	.callback	= sec_aead_callback,
   1790	.process	= sec_process,
   1791};
   1792
   1793static const struct sec_req_op sec_skcipher_req_ops_v3 = {
   1794	.buf_map	= sec_skcipher_sgl_map,
   1795	.buf_unmap	= sec_skcipher_sgl_unmap,
   1796	.do_transfer	= sec_skcipher_copy_iv,
   1797	.bd_fill	= sec_skcipher_bd_fill_v3,
   1798	.bd_send	= sec_bd_send,
   1799	.callback	= sec_skcipher_callback,
   1800	.process	= sec_process,
   1801};
   1802
   1803static const struct sec_req_op sec_aead_req_ops_v3 = {
   1804	.buf_map	= sec_aead_sgl_map,
   1805	.buf_unmap	= sec_aead_sgl_unmap,
   1806	.do_transfer	= sec_aead_set_iv,
   1807	.bd_fill	= sec_aead_bd_fill_v3,
   1808	.bd_send	= sec_bd_send,
   1809	.callback	= sec_aead_callback,
   1810	.process	= sec_process,
   1811};
   1812
   1813static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
   1814{
   1815	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
   1816	int ret;
   1817
   1818	ret = sec_skcipher_init(tfm);
   1819	if (ret)
   1820		return ret;
   1821
   1822	if (ctx->sec->qm.ver < QM_HW_V3) {
   1823		ctx->type_supported = SEC_BD_TYPE2;
   1824		ctx->req_op = &sec_skcipher_req_ops;
   1825	} else {
   1826		ctx->type_supported = SEC_BD_TYPE3;
   1827		ctx->req_op = &sec_skcipher_req_ops_v3;
   1828	}
   1829
   1830	return ret;
   1831}
   1832
   1833static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
   1834{
   1835	sec_skcipher_uninit(tfm);
   1836}
   1837
   1838static int sec_aead_init(struct crypto_aead *tfm)
   1839{
   1840	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1841	int ret;
   1842
   1843	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
   1844	ctx->alg_type = SEC_AEAD;
   1845	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
   1846	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
   1847	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
   1848		pr_err("get error aead iv size!\n");
   1849		return -EINVAL;
   1850	}
   1851
   1852	ret = sec_ctx_base_init(ctx);
   1853	if (ret)
   1854		return ret;
   1855	if (ctx->sec->qm.ver < QM_HW_V3) {
   1856		ctx->type_supported = SEC_BD_TYPE2;
   1857		ctx->req_op = &sec_aead_req_ops;
   1858	} else {
   1859		ctx->type_supported = SEC_BD_TYPE3;
   1860		ctx->req_op = &sec_aead_req_ops_v3;
   1861	}
   1862
   1863	ret = sec_auth_init(ctx);
   1864	if (ret)
   1865		goto err_auth_init;
   1866
   1867	ret = sec_cipher_init(ctx);
   1868	if (ret)
   1869		goto err_cipher_init;
   1870
   1871	return ret;
   1872
   1873err_cipher_init:
   1874	sec_auth_uninit(ctx);
   1875err_auth_init:
   1876	sec_ctx_base_uninit(ctx);
   1877	return ret;
   1878}
   1879
   1880static void sec_aead_exit(struct crypto_aead *tfm)
   1881{
   1882	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1883
   1884	sec_cipher_uninit(ctx);
   1885	sec_auth_uninit(ctx);
   1886	sec_ctx_base_uninit(ctx);
   1887}
   1888
   1889static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
   1890{
   1891	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1892	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
   1893	int ret;
   1894
   1895	ret = sec_aead_init(tfm);
   1896	if (ret) {
   1897		pr_err("hisi_sec2: aead init error!\n");
   1898		return ret;
   1899	}
   1900
   1901	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
   1902	if (IS_ERR(auth_ctx->hash_tfm)) {
   1903		dev_err(ctx->dev, "aead alloc shash error!\n");
   1904		sec_aead_exit(tfm);
   1905		return PTR_ERR(auth_ctx->hash_tfm);
   1906	}
   1907
   1908	return 0;
   1909}
   1910
   1911static void sec_aead_ctx_exit(struct crypto_aead *tfm)
   1912{
   1913	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1914
   1915	crypto_free_shash(ctx->a_ctx.hash_tfm);
   1916	sec_aead_exit(tfm);
   1917}
   1918
   1919static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
   1920{
   1921	struct aead_alg *alg = crypto_aead_alg(tfm);
   1922	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1923	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
   1924	const char *aead_name = alg->base.cra_name;
   1925	int ret;
   1926
   1927	ret = sec_aead_init(tfm);
   1928	if (ret) {
   1929		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
   1930		return ret;
   1931	}
   1932
   1933	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
   1934						     CRYPTO_ALG_NEED_FALLBACK |
   1935						     CRYPTO_ALG_ASYNC);
   1936	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
   1937		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
   1938		sec_aead_exit(tfm);
   1939		return PTR_ERR(a_ctx->fallback_aead_tfm);
   1940	}
   1941	a_ctx->fallback = false;
   1942
   1943	return 0;
   1944}
   1945
   1946static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
   1947{
   1948	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   1949
   1950	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
   1951	sec_aead_exit(tfm);
   1952}
   1953
   1954static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
   1955{
   1956	return sec_aead_ctx_init(tfm, "sha1");
   1957}
   1958
   1959static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
   1960{
   1961	return sec_aead_ctx_init(tfm, "sha256");
   1962}
   1963
   1964static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
   1965{
   1966	return sec_aead_ctx_init(tfm, "sha512");
   1967}
   1968
   1969
   1970static int sec_skcipher_cryptlen_ckeck(struct sec_ctx *ctx,
   1971	struct sec_req *sreq)
   1972{
   1973	u32 cryptlen = sreq->c_req.sk_req->cryptlen;
   1974	struct device *dev = ctx->dev;
   1975	u8 c_mode = ctx->c_ctx.c_mode;
   1976	int ret = 0;
   1977
   1978	switch (c_mode) {
   1979	case SEC_CMODE_XTS:
   1980		if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
   1981			dev_err(dev, "skcipher XTS mode input length error!\n");
   1982			ret = -EINVAL;
   1983		}
   1984		break;
   1985	case SEC_CMODE_ECB:
   1986	case SEC_CMODE_CBC:
   1987		if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
   1988			dev_err(dev, "skcipher AES input length error!\n");
   1989			ret = -EINVAL;
   1990		}
   1991		break;
   1992	case SEC_CMODE_CFB:
   1993	case SEC_CMODE_OFB:
   1994	case SEC_CMODE_CTR:
   1995		if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
   1996			dev_err(dev, "skcipher HW version error!\n");
   1997			ret = -EINVAL;
   1998		}
   1999		break;
   2000	default:
   2001		ret = -EINVAL;
   2002	}
   2003
   2004	return ret;
   2005}
   2006
   2007static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
   2008{
   2009	struct skcipher_request *sk_req = sreq->c_req.sk_req;
   2010	struct device *dev = ctx->dev;
   2011	u8 c_alg = ctx->c_ctx.c_alg;
   2012
   2013	if (unlikely(!sk_req->src || !sk_req->dst ||
   2014		     sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
   2015		dev_err(dev, "skcipher input param error!\n");
   2016		return -EINVAL;
   2017	}
   2018	sreq->c_req.c_len = sk_req->cryptlen;
   2019
   2020	if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
   2021		sreq->use_pbuf = true;
   2022	else
   2023		sreq->use_pbuf = false;
   2024
   2025	if (c_alg == SEC_CALG_3DES) {
   2026		if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
   2027			dev_err(dev, "skcipher 3des input length error!\n");
   2028			return -EINVAL;
   2029		}
   2030		return 0;
   2031	} else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
   2032		return sec_skcipher_cryptlen_ckeck(ctx, sreq);
   2033	}
   2034
   2035	dev_err(dev, "skcipher algorithm error!\n");
   2036
   2037	return -EINVAL;
   2038}
   2039
   2040static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
   2041				    struct skcipher_request *sreq, bool encrypt)
   2042{
   2043	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
   2044	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
   2045	struct device *dev = ctx->dev;
   2046	int ret;
   2047
   2048	if (!c_ctx->fbtfm) {
   2049		dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
   2050		return -EINVAL;
   2051	}
   2052
   2053	skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
   2054
   2055	/* software need sync mode to do crypto */
   2056	skcipher_request_set_callback(subreq, sreq->base.flags,
   2057				      NULL, NULL);
   2058	skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
   2059				   sreq->cryptlen, sreq->iv);
   2060	if (encrypt)
   2061		ret = crypto_skcipher_encrypt(subreq);
   2062	else
   2063		ret = crypto_skcipher_decrypt(subreq);
   2064
   2065	skcipher_request_zero(subreq);
   2066
   2067	return ret;
   2068}
   2069
   2070static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
   2071{
   2072	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
   2073	struct sec_req *req = skcipher_request_ctx(sk_req);
   2074	struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
   2075	int ret;
   2076
   2077	if (!sk_req->cryptlen) {
   2078		if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
   2079			return -EINVAL;
   2080		return 0;
   2081	}
   2082
   2083	req->flag = sk_req->base.flags;
   2084	req->c_req.sk_req = sk_req;
   2085	req->c_req.encrypt = encrypt;
   2086	req->ctx = ctx;
   2087
   2088	ret = sec_skcipher_param_check(ctx, req);
   2089	if (unlikely(ret))
   2090		return -EINVAL;
   2091
   2092	if (unlikely(ctx->c_ctx.fallback))
   2093		return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
   2094
   2095	return ctx->req_op->process(ctx, req);
   2096}
   2097
   2098static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
   2099{
   2100	return sec_skcipher_crypto(sk_req, true);
   2101}
   2102
   2103static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
   2104{
   2105	return sec_skcipher_crypto(sk_req, false);
   2106}
   2107
   2108#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
   2109	sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
   2110{\
   2111	.base = {\
   2112		.cra_name = sec_cra_name,\
   2113		.cra_driver_name = "hisi_sec_"sec_cra_name,\
   2114		.cra_priority = SEC_PRIORITY,\
   2115		.cra_flags = CRYPTO_ALG_ASYNC |\
   2116		 CRYPTO_ALG_NEED_FALLBACK,\
   2117		.cra_blocksize = blk_size,\
   2118		.cra_ctxsize = sizeof(struct sec_ctx),\
   2119		.cra_module = THIS_MODULE,\
   2120	},\
   2121	.init = ctx_init,\
   2122	.exit = ctx_exit,\
   2123	.setkey = sec_set_key,\
   2124	.decrypt = sec_skcipher_decrypt,\
   2125	.encrypt = sec_skcipher_encrypt,\
   2126	.min_keysize = sec_min_key_size,\
   2127	.max_keysize = sec_max_key_size,\
   2128	.ivsize = iv_size,\
   2129},
   2130
   2131#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
   2132	max_key_size, blk_size, iv_size) \
   2133	SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
   2134	sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
   2135
   2136static struct skcipher_alg sec_skciphers[] = {
   2137	SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
   2138			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
   2139			 AES_BLOCK_SIZE, 0)
   2140
   2141	SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
   2142			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
   2143			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
   2144
   2145	SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
   2146			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
   2147			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
   2148
   2149	SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
   2150			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
   2151			 DES3_EDE_BLOCK_SIZE, 0)
   2152
   2153	SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
   2154			 SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE,
   2155			 DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
   2156
   2157	SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
   2158			 SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
   2159			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
   2160
   2161	SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
   2162			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
   2163			 AES_BLOCK_SIZE, AES_BLOCK_SIZE)
   2164};
   2165
   2166static struct skcipher_alg sec_skciphers_v3[] = {
   2167	SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb,
   2168			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
   2169			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2170
   2171	SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb,
   2172			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
   2173			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2174
   2175	SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,
   2176			 AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
   2177			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2178
   2179	SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb,
   2180			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
   2181			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2182
   2183	SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb,
   2184			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
   2185			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2186
   2187	SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr,
   2188			 AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
   2189			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
   2190};
   2191
   2192static int aead_iv_demension_check(struct aead_request *aead_req)
   2193{
   2194	u8 cl;
   2195
   2196	cl = aead_req->iv[0] + 1;
   2197	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
   2198		return -EINVAL;
   2199
   2200	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
   2201		return -EOVERFLOW;
   2202
   2203	return 0;
   2204}
   2205
   2206static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
   2207{
   2208	struct aead_request *req = sreq->aead_req.aead_req;
   2209	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2210	size_t authsize = crypto_aead_authsize(tfm);
   2211	u8 c_mode = ctx->c_ctx.c_mode;
   2212	struct device *dev = ctx->dev;
   2213	int ret;
   2214
   2215	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
   2216	    req->assoclen > SEC_MAX_AAD_LEN)) {
   2217		dev_err(dev, "aead input spec error!\n");
   2218		return -EINVAL;
   2219	}
   2220
   2221	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
   2222	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
   2223		authsize & MAC_LEN_MASK)))) {
   2224		dev_err(dev, "aead input mac length error!\n");
   2225		return -EINVAL;
   2226	}
   2227
   2228	if (c_mode == SEC_CMODE_CCM) {
   2229		if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
   2230			dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
   2231			return -EINVAL;
   2232		}
   2233		ret = aead_iv_demension_check(req);
   2234		if (ret) {
   2235			dev_err(dev, "aead input iv param error!\n");
   2236			return ret;
   2237		}
   2238	}
   2239
   2240	if (sreq->c_req.encrypt)
   2241		sreq->c_req.c_len = req->cryptlen;
   2242	else
   2243		sreq->c_req.c_len = req->cryptlen - authsize;
   2244	if (c_mode == SEC_CMODE_CBC) {
   2245		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
   2246			dev_err(dev, "aead crypto length error!\n");
   2247			return -EINVAL;
   2248		}
   2249	}
   2250
   2251	return 0;
   2252}
   2253
   2254static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
   2255{
   2256	struct aead_request *req = sreq->aead_req.aead_req;
   2257	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2258	size_t authsize = crypto_aead_authsize(tfm);
   2259	struct device *dev = ctx->dev;
   2260	u8 c_alg = ctx->c_ctx.c_alg;
   2261
   2262	if (unlikely(!req->src || !req->dst)) {
   2263		dev_err(dev, "aead input param error!\n");
   2264		return -EINVAL;
   2265	}
   2266
   2267	if (ctx->sec->qm.ver == QM_HW_V2) {
   2268		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
   2269		    req->cryptlen <= authsize))) {
   2270			ctx->a_ctx.fallback = true;
   2271			return -EINVAL;
   2272		}
   2273	}
   2274
   2275	/* Support AES or SM4 */
   2276	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
   2277		dev_err(dev, "aead crypto alg error!\n");
   2278		return -EINVAL;
   2279	}
   2280
   2281	if (unlikely(sec_aead_spec_check(ctx, sreq)))
   2282		return -EINVAL;
   2283
   2284	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
   2285		SEC_PBUF_SZ)
   2286		sreq->use_pbuf = true;
   2287	else
   2288		sreq->use_pbuf = false;
   2289
   2290	return 0;
   2291}
   2292
   2293static int sec_aead_soft_crypto(struct sec_ctx *ctx,
   2294				struct aead_request *aead_req,
   2295				bool encrypt)
   2296{
   2297	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
   2298	struct device *dev = ctx->dev;
   2299	struct aead_request *subreq;
   2300	int ret;
   2301
   2302	/* Kunpeng920 aead mode not support input 0 size */
   2303	if (!a_ctx->fallback_aead_tfm) {
   2304		dev_err(dev, "aead fallback tfm is NULL!\n");
   2305		return -EINVAL;
   2306	}
   2307
   2308	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
   2309	if (!subreq)
   2310		return -ENOMEM;
   2311
   2312	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
   2313	aead_request_set_callback(subreq, aead_req->base.flags,
   2314				  aead_req->base.complete, aead_req->base.data);
   2315	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
   2316			       aead_req->cryptlen, aead_req->iv);
   2317	aead_request_set_ad(subreq, aead_req->assoclen);
   2318
   2319	if (encrypt)
   2320		ret = crypto_aead_encrypt(subreq);
   2321	else
   2322		ret = crypto_aead_decrypt(subreq);
   2323	aead_request_free(subreq);
   2324
   2325	return ret;
   2326}
   2327
   2328static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
   2329{
   2330	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
   2331	struct sec_req *req = aead_request_ctx(a_req);
   2332	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
   2333	int ret;
   2334
   2335	req->flag = a_req->base.flags;
   2336	req->aead_req.aead_req = a_req;
   2337	req->c_req.encrypt = encrypt;
   2338	req->ctx = ctx;
   2339
   2340	ret = sec_aead_param_check(ctx, req);
   2341	if (unlikely(ret)) {
   2342		if (ctx->a_ctx.fallback)
   2343			return sec_aead_soft_crypto(ctx, a_req, encrypt);
   2344		return -EINVAL;
   2345	}
   2346
   2347	return ctx->req_op->process(ctx, req);
   2348}
   2349
   2350static int sec_aead_encrypt(struct aead_request *a_req)
   2351{
   2352	return sec_aead_crypto(a_req, true);
   2353}
   2354
   2355static int sec_aead_decrypt(struct aead_request *a_req)
   2356{
   2357	return sec_aead_crypto(a_req, false);
   2358}
   2359
   2360#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
   2361			 ctx_exit, blk_size, iv_size, max_authsize)\
   2362{\
   2363	.base = {\
   2364		.cra_name = sec_cra_name,\
   2365		.cra_driver_name = "hisi_sec_"sec_cra_name,\
   2366		.cra_priority = SEC_PRIORITY,\
   2367		.cra_flags = CRYPTO_ALG_ASYNC |\
   2368		 CRYPTO_ALG_NEED_FALLBACK,\
   2369		.cra_blocksize = blk_size,\
   2370		.cra_ctxsize = sizeof(struct sec_ctx),\
   2371		.cra_module = THIS_MODULE,\
   2372	},\
   2373	.init = ctx_init,\
   2374	.exit = ctx_exit,\
   2375	.setkey = sec_set_key,\
   2376	.setauthsize = sec_aead_setauthsize,\
   2377	.decrypt = sec_aead_decrypt,\
   2378	.encrypt = sec_aead_encrypt,\
   2379	.ivsize = iv_size,\
   2380	.maxauthsize = max_authsize,\
   2381}
   2382
   2383static struct aead_alg sec_aeads[] = {
   2384	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
   2385		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
   2386		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
   2387		     AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
   2388
   2389	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
   2390		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
   2391		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
   2392		     AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
   2393
   2394	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
   2395		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
   2396		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
   2397		     AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
   2398
   2399	SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
   2400		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
   2401		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
   2402
   2403	SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
   2404		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
   2405		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
   2406};
   2407
   2408static struct aead_alg sec_aeads_v3[] = {
   2409	SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
   2410		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
   2411		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
   2412
   2413	SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
   2414		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
   2415		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
   2416};
   2417
   2418int sec_register_to_crypto(struct hisi_qm *qm)
   2419{
   2420	int ret;
   2421
   2422	/* To avoid repeat register */
   2423	ret = crypto_register_skciphers(sec_skciphers,
   2424					ARRAY_SIZE(sec_skciphers));
   2425	if (ret)
   2426		return ret;
   2427
   2428	if (qm->ver > QM_HW_V2) {
   2429		ret = crypto_register_skciphers(sec_skciphers_v3,
   2430						ARRAY_SIZE(sec_skciphers_v3));
   2431		if (ret)
   2432			goto reg_skcipher_fail;
   2433	}
   2434
   2435	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
   2436	if (ret)
   2437		goto reg_aead_fail;
   2438	if (qm->ver > QM_HW_V2) {
   2439		ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
   2440		if (ret)
   2441			goto reg_aead_v3_fail;
   2442	}
   2443	return ret;
   2444
   2445reg_aead_v3_fail:
   2446	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
   2447reg_aead_fail:
   2448	if (qm->ver > QM_HW_V2)
   2449		crypto_unregister_skciphers(sec_skciphers_v3,
   2450					    ARRAY_SIZE(sec_skciphers_v3));
   2451reg_skcipher_fail:
   2452	crypto_unregister_skciphers(sec_skciphers,
   2453				    ARRAY_SIZE(sec_skciphers));
   2454	return ret;
   2455}
   2456
   2457void sec_unregister_from_crypto(struct hisi_qm *qm)
   2458{
   2459	if (qm->ver > QM_HW_V2)
   2460		crypto_unregister_aeads(sec_aeads_v3,
   2461					ARRAY_SIZE(sec_aeads_v3));
   2462	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
   2463
   2464	if (qm->ver > QM_HW_V2)
   2465		crypto_unregister_skciphers(sec_skciphers_v3,
   2466					    ARRAY_SIZE(sec_skciphers_v3));
   2467	crypto_unregister_skciphers(sec_skciphers,
   2468				    ARRAY_SIZE(sec_skciphers));
   2469}