cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

common.c (16759B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
      4 */
      5
      6#include <linux/err.h>
      7#include <linux/interrupt.h>
      8#include <linux/types.h>
      9#include <crypto/scatterwalk.h>
     10#include <crypto/sha1.h>
     11#include <crypto/sha2.h>
     12
     13#include "cipher.h"
     14#include "common.h"
     15#include "core.h"
     16#include "regs-v5.h"
     17#include "sha.h"
     18#include "aead.h"
     19
     20static inline u32 qce_read(struct qce_device *qce, u32 offset)
     21{
     22	return readl(qce->base + offset);
     23}
     24
     25static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
     26{
     27	writel(val, qce->base + offset);
     28}
     29
     30static inline void qce_write_array(struct qce_device *qce, u32 offset,
     31				   const u32 *val, unsigned int len)
     32{
     33	int i;
     34
     35	for (i = 0; i < len; i++)
     36		qce_write(qce, offset + i * sizeof(u32), val[i]);
     37}
     38
     39static inline void
     40qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
     41{
     42	int i;
     43
     44	for (i = 0; i < len; i++)
     45		qce_write(qce, offset + i * sizeof(u32), 0);
     46}
     47
     48static u32 qce_config_reg(struct qce_device *qce, int little)
     49{
     50	u32 beats = (qce->burst_size >> 3) - 1;
     51	u32 pipe_pair = qce->pipe_pair_id;
     52	u32 config;
     53
     54	config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
     55	config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
     56		  BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
     57	config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
     58	config &= ~HIGH_SPD_EN_N_SHIFT;
     59
     60	if (little)
     61		config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
     62
     63	return config;
     64}
     65
     66void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
     67{
     68	__be32 *d = dst;
     69	const u8 *s = src;
     70	unsigned int n;
     71
     72	n = len / sizeof(u32);
     73	for (; n > 0; n--) {
     74		*d = cpu_to_be32p((const __u32 *) s);
     75		s += sizeof(__u32);
     76		d++;
     77	}
     78}
     79
     80static void qce_setup_config(struct qce_device *qce)
     81{
     82	u32 config;
     83
     84	/* get big endianness */
     85	config = qce_config_reg(qce, 0);
     86
     87	/* clear status */
     88	qce_write(qce, REG_STATUS, 0);
     89	qce_write(qce, REG_CONFIG, config);
     90}
     91
     92static inline void qce_crypto_go(struct qce_device *qce, bool result_dump)
     93{
     94	if (result_dump)
     95		qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
     96	else
     97		qce_write(qce, REG_GOPROC, BIT(GO_SHIFT));
     98}
     99
    100#if defined(CONFIG_CRYPTO_DEV_QCE_SHA) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
    101static u32 qce_auth_cfg(unsigned long flags, u32 key_size, u32 auth_size)
    102{
    103	u32 cfg = 0;
    104
    105	if (IS_CCM(flags) || IS_CMAC(flags))
    106		cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
    107	else
    108		cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
    109
    110	if (IS_CCM(flags) || IS_CMAC(flags)) {
    111		if (key_size == AES_KEYSIZE_128)
    112			cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
    113		else if (key_size == AES_KEYSIZE_256)
    114			cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
    115	}
    116
    117	if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
    118		cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
    119	else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
    120		cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
    121	else if (IS_CMAC(flags))
    122		cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
    123	else if (IS_CCM(flags))
    124		cfg |= (auth_size - 1) << AUTH_SIZE_SHIFT;
    125
    126	if (IS_SHA1(flags) || IS_SHA256(flags))
    127		cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
    128	else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
    129		cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
    130	else if (IS_CCM(flags))
    131		cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
    132	else if (IS_CMAC(flags))
    133		cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
    134
    135	if (IS_SHA(flags) || IS_SHA_HMAC(flags))
    136		cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
    137
    138	if (IS_CCM(flags))
    139		cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
    140
    141	return cfg;
    142}
    143#endif
    144
    145#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
    146static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
    147{
    148	struct ahash_request *req = ahash_request_cast(async_req);
    149	struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
    150	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
    151	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
    152	struct qce_device *qce = tmpl->qce;
    153	unsigned int digestsize = crypto_ahash_digestsize(ahash);
    154	unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
    155	__be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
    156	__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
    157	u32 auth_cfg = 0, config;
    158	unsigned int iv_words;
    159
    160	/* if not the last, the size has to be on the block boundary */
    161	if (!rctx->last_blk && req->nbytes % blocksize)
    162		return -EINVAL;
    163
    164	qce_setup_config(qce);
    165
    166	if (IS_CMAC(rctx->flags)) {
    167		qce_write(qce, REG_AUTH_SEG_CFG, 0);
    168		qce_write(qce, REG_ENCR_SEG_CFG, 0);
    169		qce_write(qce, REG_ENCR_SEG_SIZE, 0);
    170		qce_clear_array(qce, REG_AUTH_IV0, 16);
    171		qce_clear_array(qce, REG_AUTH_KEY0, 16);
    172		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
    173
    174		auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen, digestsize);
    175	}
    176
    177	if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
    178		u32 authkey_words = rctx->authklen / sizeof(u32);
    179
    180		qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
    181		qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
    182				authkey_words);
    183	}
    184
    185	if (IS_CMAC(rctx->flags))
    186		goto go_proc;
    187
    188	if (rctx->first_blk)
    189		memcpy(auth, rctx->digest, digestsize);
    190	else
    191		qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
    192
    193	iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
    194	qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
    195
    196	if (rctx->first_blk)
    197		qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
    198	else
    199		qce_write_array(qce, REG_AUTH_BYTECNT0,
    200				(u32 *)rctx->byte_count, 2);
    201
    202	auth_cfg = qce_auth_cfg(rctx->flags, 0, digestsize);
    203
    204	if (rctx->last_blk)
    205		auth_cfg |= BIT(AUTH_LAST_SHIFT);
    206	else
    207		auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
    208
    209	if (rctx->first_blk)
    210		auth_cfg |= BIT(AUTH_FIRST_SHIFT);
    211	else
    212		auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
    213
    214go_proc:
    215	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
    216	qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
    217	qce_write(qce, REG_AUTH_SEG_START, 0);
    218	qce_write(qce, REG_ENCR_SEG_CFG, 0);
    219	qce_write(qce, REG_SEG_SIZE, req->nbytes);
    220
    221	/* get little endianness */
    222	config = qce_config_reg(qce, 1);
    223	qce_write(qce, REG_CONFIG, config);
    224
    225	qce_crypto_go(qce, true);
    226
    227	return 0;
    228}
    229#endif
    230
    231#if defined(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) || defined(CONFIG_CRYPTO_DEV_QCE_AEAD)
    232static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
    233{
    234	u32 cfg = 0;
    235
    236	if (IS_AES(flags)) {
    237		if (aes_key_size == AES_KEYSIZE_128)
    238			cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
    239		else if (aes_key_size == AES_KEYSIZE_256)
    240			cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
    241	}
    242
    243	if (IS_AES(flags))
    244		cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
    245	else if (IS_DES(flags) || IS_3DES(flags))
    246		cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
    247
    248	if (IS_DES(flags))
    249		cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
    250
    251	if (IS_3DES(flags))
    252		cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
    253
    254	switch (flags & QCE_MODE_MASK) {
    255	case QCE_MODE_ECB:
    256		cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
    257		break;
    258	case QCE_MODE_CBC:
    259		cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
    260		break;
    261	case QCE_MODE_CTR:
    262		cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
    263		break;
    264	case QCE_MODE_XTS:
    265		cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
    266		break;
    267	case QCE_MODE_CCM:
    268		cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
    269		cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
    270		break;
    271	default:
    272		return ~0;
    273	}
    274
    275	return cfg;
    276}
    277#endif
    278
    279#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
    280static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
    281{
    282	u8 swap[QCE_AES_IV_LENGTH];
    283	u32 i, j;
    284
    285	if (ivsize > QCE_AES_IV_LENGTH)
    286		return;
    287
    288	memset(swap, 0, QCE_AES_IV_LENGTH);
    289
    290	for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
    291	     i < QCE_AES_IV_LENGTH; i++, j--)
    292		swap[i] = src[j];
    293
    294	qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
    295}
    296
    297static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
    298		       unsigned int enckeylen, unsigned int cryptlen)
    299{
    300	u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
    301	unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
    302
    303	qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
    304			       enckeylen / 2);
    305	qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
    306
    307	/* Set data unit size to cryptlen. Anything else causes
    308	 * crypto engine to return back incorrect results.
    309	 */
    310	qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
    311}
    312
    313static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
    314{
    315	struct skcipher_request *req = skcipher_request_cast(async_req);
    316	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
    317	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
    318	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
    319	struct qce_device *qce = tmpl->qce;
    320	__be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
    321	__be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
    322	unsigned int enckey_words, enciv_words;
    323	unsigned int keylen;
    324	u32 encr_cfg = 0, auth_cfg = 0, config;
    325	unsigned int ivsize = rctx->ivsize;
    326	unsigned long flags = rctx->flags;
    327
    328	qce_setup_config(qce);
    329
    330	if (IS_XTS(flags))
    331		keylen = ctx->enc_keylen / 2;
    332	else
    333		keylen = ctx->enc_keylen;
    334
    335	qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
    336	enckey_words = keylen / sizeof(u32);
    337
    338	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
    339
    340	encr_cfg = qce_encr_cfg(flags, keylen);
    341
    342	if (IS_DES(flags)) {
    343		enciv_words = 2;
    344		enckey_words = 2;
    345	} else if (IS_3DES(flags)) {
    346		enciv_words = 2;
    347		enckey_words = 6;
    348	} else if (IS_AES(flags)) {
    349		if (IS_XTS(flags))
    350			qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
    351				   rctx->cryptlen);
    352		enciv_words = 4;
    353	} else {
    354		return -EINVAL;
    355	}
    356
    357	qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
    358
    359	if (!IS_ECB(flags)) {
    360		if (IS_XTS(flags))
    361			qce_xts_swapiv(enciv, rctx->iv, ivsize);
    362		else
    363			qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
    364
    365		qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
    366	}
    367
    368	if (IS_ENCRYPT(flags))
    369		encr_cfg |= BIT(ENCODE_SHIFT);
    370
    371	qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
    372	qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
    373	qce_write(qce, REG_ENCR_SEG_START, 0);
    374
    375	if (IS_CTR(flags)) {
    376		qce_write(qce, REG_CNTR_MASK, ~0);
    377		qce_write(qce, REG_CNTR_MASK0, ~0);
    378		qce_write(qce, REG_CNTR_MASK1, ~0);
    379		qce_write(qce, REG_CNTR_MASK2, ~0);
    380	}
    381
    382	qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
    383
    384	/* get little endianness */
    385	config = qce_config_reg(qce, 1);
    386	qce_write(qce, REG_CONFIG, config);
    387
    388	qce_crypto_go(qce, true);
    389
    390	return 0;
    391}
    392#endif
    393
    394#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
    395static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
    396	SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
    397};
    398
    399static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
    400	SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
    401	SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
    402};
    403
    404static unsigned int qce_be32_to_cpu_array(u32 *dst, const u8 *src, unsigned int len)
    405{
    406	u32 *d = dst;
    407	const u8 *s = src;
    408	unsigned int n;
    409
    410	n = len / sizeof(u32);
    411	for (; n > 0; n--) {
    412		*d = be32_to_cpup((const __be32 *)s);
    413		s += sizeof(u32);
    414		d++;
    415	}
    416	return DIV_ROUND_UP(len, sizeof(u32));
    417}
    418
    419static int qce_setup_regs_aead(struct crypto_async_request *async_req)
    420{
    421	struct aead_request *req = aead_request_cast(async_req);
    422	struct qce_aead_reqctx *rctx = aead_request_ctx(req);
    423	struct qce_aead_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
    424	struct qce_alg_template *tmpl = to_aead_tmpl(crypto_aead_reqtfm(req));
    425	struct qce_device *qce = tmpl->qce;
    426	u32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
    427	u32 enciv[QCE_MAX_IV_SIZE / sizeof(u32)] = {0};
    428	u32 authkey[QCE_SHA_HMAC_KEY_SIZE / sizeof(u32)] = {0};
    429	u32 authiv[SHA256_DIGEST_SIZE / sizeof(u32)] = {0};
    430	u32 authnonce[QCE_MAX_NONCE / sizeof(u32)] = {0};
    431	unsigned int enc_keylen = ctx->enc_keylen;
    432	unsigned int auth_keylen = ctx->auth_keylen;
    433	unsigned int enc_ivsize = rctx->ivsize;
    434	unsigned int auth_ivsize = 0;
    435	unsigned int enckey_words, enciv_words;
    436	unsigned int authkey_words, authiv_words, authnonce_words;
    437	unsigned long flags = rctx->flags;
    438	u32 encr_cfg, auth_cfg, config, totallen;
    439	u32 iv_last_word;
    440
    441	qce_setup_config(qce);
    442
    443	/* Write encryption key */
    444	enckey_words = qce_be32_to_cpu_array(enckey, ctx->enc_key, enc_keylen);
    445	qce_write_array(qce, REG_ENCR_KEY0, enckey, enckey_words);
    446
    447	/* Write encryption iv */
    448	enciv_words = qce_be32_to_cpu_array(enciv, rctx->iv, enc_ivsize);
    449	qce_write_array(qce, REG_CNTR0_IV0, enciv, enciv_words);
    450
    451	if (IS_CCM(rctx->flags)) {
    452		iv_last_word = enciv[enciv_words - 1];
    453		qce_write(qce, REG_CNTR3_IV3, iv_last_word + 1);
    454		qce_write_array(qce, REG_ENCR_CCM_INT_CNTR0, (u32 *)enciv, enciv_words);
    455		qce_write(qce, REG_CNTR_MASK, ~0);
    456		qce_write(qce, REG_CNTR_MASK0, ~0);
    457		qce_write(qce, REG_CNTR_MASK1, ~0);
    458		qce_write(qce, REG_CNTR_MASK2, ~0);
    459	}
    460
    461	/* Clear authentication IV and KEY registers of previous values */
    462	qce_clear_array(qce, REG_AUTH_IV0, 16);
    463	qce_clear_array(qce, REG_AUTH_KEY0, 16);
    464
    465	/* Clear byte count */
    466	qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
    467
    468	/* Write authentication key */
    469	authkey_words = qce_be32_to_cpu_array(authkey, ctx->auth_key, auth_keylen);
    470	qce_write_array(qce, REG_AUTH_KEY0, (u32 *)authkey, authkey_words);
    471
    472	/* Write initial authentication IV only for HMAC algorithms */
    473	if (IS_SHA_HMAC(rctx->flags)) {
    474		/* Write default authentication iv */
    475		if (IS_SHA1_HMAC(rctx->flags)) {
    476			auth_ivsize = SHA1_DIGEST_SIZE;
    477			memcpy(authiv, std_iv_sha1, auth_ivsize);
    478		} else if (IS_SHA256_HMAC(rctx->flags)) {
    479			auth_ivsize = SHA256_DIGEST_SIZE;
    480			memcpy(authiv, std_iv_sha256, auth_ivsize);
    481		}
    482		authiv_words = auth_ivsize / sizeof(u32);
    483		qce_write_array(qce, REG_AUTH_IV0, (u32 *)authiv, authiv_words);
    484	} else if (IS_CCM(rctx->flags)) {
    485		/* Write nonce for CCM algorithms */
    486		authnonce_words = qce_be32_to_cpu_array(authnonce, rctx->ccm_nonce, QCE_MAX_NONCE);
    487		qce_write_array(qce, REG_AUTH_INFO_NONCE0, authnonce, authnonce_words);
    488	}
    489
    490	/* Set up ENCR_SEG_CFG */
    491	encr_cfg = qce_encr_cfg(flags, enc_keylen);
    492	if (IS_ENCRYPT(flags))
    493		encr_cfg |= BIT(ENCODE_SHIFT);
    494	qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
    495
    496	/* Set up AUTH_SEG_CFG */
    497	auth_cfg = qce_auth_cfg(rctx->flags, auth_keylen, ctx->authsize);
    498	auth_cfg |= BIT(AUTH_LAST_SHIFT);
    499	auth_cfg |= BIT(AUTH_FIRST_SHIFT);
    500	if (IS_ENCRYPT(flags)) {
    501		if (IS_CCM(rctx->flags))
    502			auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
    503		else
    504			auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
    505	} else {
    506		if (IS_CCM(rctx->flags))
    507			auth_cfg |= AUTH_POS_AFTER << AUTH_POS_SHIFT;
    508		else
    509			auth_cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
    510	}
    511	qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
    512
    513	totallen = rctx->cryptlen + rctx->assoclen;
    514
    515	/* Set the encryption size and start offset */
    516	if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
    517		qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen + ctx->authsize);
    518	else
    519		qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
    520	qce_write(qce, REG_ENCR_SEG_START, rctx->assoclen & 0xffff);
    521
    522	/* Set the authentication size and start offset */
    523	qce_write(qce, REG_AUTH_SEG_SIZE, totallen);
    524	qce_write(qce, REG_AUTH_SEG_START, 0);
    525
    526	/* Write total length */
    527	if (IS_CCM(rctx->flags) && IS_DECRYPT(rctx->flags))
    528		qce_write(qce, REG_SEG_SIZE, totallen + ctx->authsize);
    529	else
    530		qce_write(qce, REG_SEG_SIZE, totallen);
    531
    532	/* get little endianness */
    533	config = qce_config_reg(qce, 1);
    534	qce_write(qce, REG_CONFIG, config);
    535
    536	/* Start the process */
    537	qce_crypto_go(qce, !IS_CCM(flags));
    538
    539	return 0;
    540}
    541#endif
    542
    543int qce_start(struct crypto_async_request *async_req, u32 type)
    544{
    545	switch (type) {
    546#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
    547	case CRYPTO_ALG_TYPE_SKCIPHER:
    548		return qce_setup_regs_skcipher(async_req);
    549#endif
    550#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
    551	case CRYPTO_ALG_TYPE_AHASH:
    552		return qce_setup_regs_ahash(async_req);
    553#endif
    554#ifdef CONFIG_CRYPTO_DEV_QCE_AEAD
    555	case CRYPTO_ALG_TYPE_AEAD:
    556		return qce_setup_regs_aead(async_req);
    557#endif
    558	default:
    559		return -EINVAL;
    560	}
    561}
    562
    563#define STATUS_ERRORS	\
    564		(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
    565
    566int qce_check_status(struct qce_device *qce, u32 *status)
    567{
    568	int ret = 0;
    569
    570	*status = qce_read(qce, REG_STATUS);
    571
    572	/*
    573	 * Don't use result dump status. The operation may not be complete.
    574	 * Instead, use the status we just read from device. In case, we need to
    575	 * use result_status from result dump the result_status needs to be byte
    576	 * swapped, since we set the device to little endian.
    577	 */
    578	if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
    579		ret = -ENXIO;
    580	else if (*status & BIT(MAC_FAILED_SHIFT))
    581		ret = -EBADMSG;
    582
    583	return ret;
    584}
    585
    586void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
    587{
    588	u32 val;
    589
    590	val = qce_read(qce, REG_VERSION);
    591	*major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
    592	*minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
    593	*step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
    594}