cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

atmel-aes.c (68644B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Cryptographic API.
      4 *
      5 * Support for ATMEL AES HW acceleration.
      6 *
      7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
      8 * Author: Nicolas Royer <nicolas@eukrea.com>
      9 *
     10 * Some ideas are from omap-aes.c driver.
     11 */
     12
     13
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/slab.h>
     17#include <linux/err.h>
     18#include <linux/clk.h>
     19#include <linux/io.h>
     20#include <linux/hw_random.h>
     21#include <linux/platform_device.h>
     22
     23#include <linux/device.h>
     24#include <linux/dmaengine.h>
     25#include <linux/init.h>
     26#include <linux/errno.h>
     27#include <linux/interrupt.h>
     28#include <linux/irq.h>
     29#include <linux/scatterlist.h>
     30#include <linux/dma-mapping.h>
     31#include <linux/of_device.h>
     32#include <linux/delay.h>
     33#include <linux/crypto.h>
     34#include <crypto/scatterwalk.h>
     35#include <crypto/algapi.h>
     36#include <crypto/aes.h>
     37#include <crypto/gcm.h>
     38#include <crypto/xts.h>
     39#include <crypto/internal/aead.h>
     40#include <crypto/internal/skcipher.h>
     41#include "atmel-aes-regs.h"
     42#include "atmel-authenc.h"
     43
     44#define ATMEL_AES_PRIORITY	300
     45
     46#define ATMEL_AES_BUFFER_ORDER	2
     47#define ATMEL_AES_BUFFER_SIZE	(PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
     48
     49#define CFB8_BLOCK_SIZE		1
     50#define CFB16_BLOCK_SIZE	2
     51#define CFB32_BLOCK_SIZE	4
     52#define CFB64_BLOCK_SIZE	8
     53
     54#define SIZE_IN_WORDS(x)	((x) >> 2)
     55
     56/* AES flags */
     57/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
     58#define AES_FLAGS_ENCRYPT	AES_MR_CYPHER_ENC
     59#define AES_FLAGS_GTAGEN	AES_MR_GTAGEN
     60#define AES_FLAGS_OPMODE_MASK	(AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
     61#define AES_FLAGS_ECB		AES_MR_OPMOD_ECB
     62#define AES_FLAGS_CBC		AES_MR_OPMOD_CBC
     63#define AES_FLAGS_OFB		AES_MR_OPMOD_OFB
     64#define AES_FLAGS_CFB128	(AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
     65#define AES_FLAGS_CFB64		(AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
     66#define AES_FLAGS_CFB32		(AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
     67#define AES_FLAGS_CFB16		(AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
     68#define AES_FLAGS_CFB8		(AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
     69#define AES_FLAGS_CTR		AES_MR_OPMOD_CTR
     70#define AES_FLAGS_GCM		AES_MR_OPMOD_GCM
     71#define AES_FLAGS_XTS		AES_MR_OPMOD_XTS
     72
     73#define AES_FLAGS_MODE_MASK	(AES_FLAGS_OPMODE_MASK |	\
     74				 AES_FLAGS_ENCRYPT |		\
     75				 AES_FLAGS_GTAGEN)
     76
     77#define AES_FLAGS_BUSY		BIT(3)
     78#define AES_FLAGS_DUMP_REG	BIT(4)
     79#define AES_FLAGS_OWN_SHA	BIT(5)
     80
     81#define AES_FLAGS_PERSISTENT	AES_FLAGS_BUSY
     82
     83#define ATMEL_AES_QUEUE_LENGTH	50
     84
     85#define ATMEL_AES_DMA_THRESHOLD		256
     86
     87
     88struct atmel_aes_caps {
     89	bool			has_dualbuff;
     90	bool			has_cfb64;
     91	bool			has_gcm;
     92	bool			has_xts;
     93	bool			has_authenc;
     94	u32			max_burst_size;
     95};
     96
     97struct atmel_aes_dev;
     98
     99
    100typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
    101
    102
    103struct atmel_aes_base_ctx {
    104	struct atmel_aes_dev	*dd;
    105	atmel_aes_fn_t		start;
    106	int			keylen;
    107	u32			key[AES_KEYSIZE_256 / sizeof(u32)];
    108	u16			block_size;
    109	bool			is_aead;
    110};
    111
    112struct atmel_aes_ctx {
    113	struct atmel_aes_base_ctx	base;
    114};
    115
    116struct atmel_aes_ctr_ctx {
    117	struct atmel_aes_base_ctx	base;
    118
    119	__be32			iv[AES_BLOCK_SIZE / sizeof(u32)];
    120	size_t			offset;
    121	struct scatterlist	src[2];
    122	struct scatterlist	dst[2];
    123	u32			blocks;
    124};
    125
    126struct atmel_aes_gcm_ctx {
    127	struct atmel_aes_base_ctx	base;
    128
    129	struct scatterlist	src[2];
    130	struct scatterlist	dst[2];
    131
    132	__be32			j0[AES_BLOCK_SIZE / sizeof(u32)];
    133	u32			tag[AES_BLOCK_SIZE / sizeof(u32)];
    134	__be32			ghash[AES_BLOCK_SIZE / sizeof(u32)];
    135	size_t			textlen;
    136
    137	const __be32		*ghash_in;
    138	__be32			*ghash_out;
    139	atmel_aes_fn_t		ghash_resume;
    140};
    141
    142struct atmel_aes_xts_ctx {
    143	struct atmel_aes_base_ctx	base;
    144
    145	u32			key2[AES_KEYSIZE_256 / sizeof(u32)];
    146	struct crypto_skcipher *fallback_tfm;
    147};
    148
    149#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
    150struct atmel_aes_authenc_ctx {
    151	struct atmel_aes_base_ctx	base;
    152	struct atmel_sha_authenc_ctx	*auth;
    153};
    154#endif
    155
    156struct atmel_aes_reqctx {
    157	unsigned long		mode;
    158	u8			lastc[AES_BLOCK_SIZE];
    159	struct skcipher_request fallback_req;
    160};
    161
    162#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
    163struct atmel_aes_authenc_reqctx {
    164	struct atmel_aes_reqctx	base;
    165
    166	struct scatterlist	src[2];
    167	struct scatterlist	dst[2];
    168	size_t			textlen;
    169	u32			digest[SHA512_DIGEST_SIZE / sizeof(u32)];
    170
    171	/* auth_req MUST be place last. */
    172	struct ahash_request	auth_req;
    173};
    174#endif
    175
    176struct atmel_aes_dma {
    177	struct dma_chan		*chan;
    178	struct scatterlist	*sg;
    179	int			nents;
    180	unsigned int		remainder;
    181	unsigned int		sg_len;
    182};
    183
    184struct atmel_aes_dev {
    185	struct list_head	list;
    186	unsigned long		phys_base;
    187	void __iomem		*io_base;
    188
    189	struct crypto_async_request	*areq;
    190	struct atmel_aes_base_ctx	*ctx;
    191
    192	bool			is_async;
    193	atmel_aes_fn_t		resume;
    194	atmel_aes_fn_t		cpu_transfer_complete;
    195
    196	struct device		*dev;
    197	struct clk		*iclk;
    198	int			irq;
    199
    200	unsigned long		flags;
    201
    202	spinlock_t		lock;
    203	struct crypto_queue	queue;
    204
    205	struct tasklet_struct	done_task;
    206	struct tasklet_struct	queue_task;
    207
    208	size_t			total;
    209	size_t			datalen;
    210	u32			*data;
    211
    212	struct atmel_aes_dma	src;
    213	struct atmel_aes_dma	dst;
    214
    215	size_t			buflen;
    216	void			*buf;
    217	struct scatterlist	aligned_sg;
    218	struct scatterlist	*real_dst;
    219
    220	struct atmel_aes_caps	caps;
    221
    222	u32			hw_version;
    223};
    224
    225struct atmel_aes_drv {
    226	struct list_head	dev_list;
    227	spinlock_t		lock;
    228};
    229
    230static struct atmel_aes_drv atmel_aes = {
    231	.dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
    232	.lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
    233};
    234
    235#ifdef VERBOSE_DEBUG
    236static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
    237{
    238	switch (offset) {
    239	case AES_CR:
    240		return "CR";
    241
    242	case AES_MR:
    243		return "MR";
    244
    245	case AES_ISR:
    246		return "ISR";
    247
    248	case AES_IMR:
    249		return "IMR";
    250
    251	case AES_IER:
    252		return "IER";
    253
    254	case AES_IDR:
    255		return "IDR";
    256
    257	case AES_KEYWR(0):
    258	case AES_KEYWR(1):
    259	case AES_KEYWR(2):
    260	case AES_KEYWR(3):
    261	case AES_KEYWR(4):
    262	case AES_KEYWR(5):
    263	case AES_KEYWR(6):
    264	case AES_KEYWR(7):
    265		snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
    266		break;
    267
    268	case AES_IDATAR(0):
    269	case AES_IDATAR(1):
    270	case AES_IDATAR(2):
    271	case AES_IDATAR(3):
    272		snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
    273		break;
    274
    275	case AES_ODATAR(0):
    276	case AES_ODATAR(1):
    277	case AES_ODATAR(2):
    278	case AES_ODATAR(3):
    279		snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
    280		break;
    281
    282	case AES_IVR(0):
    283	case AES_IVR(1):
    284	case AES_IVR(2):
    285	case AES_IVR(3):
    286		snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
    287		break;
    288
    289	case AES_AADLENR:
    290		return "AADLENR";
    291
    292	case AES_CLENR:
    293		return "CLENR";
    294
    295	case AES_GHASHR(0):
    296	case AES_GHASHR(1):
    297	case AES_GHASHR(2):
    298	case AES_GHASHR(3):
    299		snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
    300		break;
    301
    302	case AES_TAGR(0):
    303	case AES_TAGR(1):
    304	case AES_TAGR(2):
    305	case AES_TAGR(3):
    306		snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
    307		break;
    308
    309	case AES_CTRR:
    310		return "CTRR";
    311
    312	case AES_GCMHR(0):
    313	case AES_GCMHR(1):
    314	case AES_GCMHR(2):
    315	case AES_GCMHR(3):
    316		snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
    317		break;
    318
    319	case AES_EMR:
    320		return "EMR";
    321
    322	case AES_TWR(0):
    323	case AES_TWR(1):
    324	case AES_TWR(2):
    325	case AES_TWR(3):
    326		snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
    327		break;
    328
    329	case AES_ALPHAR(0):
    330	case AES_ALPHAR(1):
    331	case AES_ALPHAR(2):
    332	case AES_ALPHAR(3):
    333		snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
    334		break;
    335
    336	default:
    337		snprintf(tmp, sz, "0x%02x", offset);
    338		break;
    339	}
    340
    341	return tmp;
    342}
    343#endif /* VERBOSE_DEBUG */
    344
    345/* Shared functions */
    346
    347static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
    348{
    349	u32 value = readl_relaxed(dd->io_base + offset);
    350
    351#ifdef VERBOSE_DEBUG
    352	if (dd->flags & AES_FLAGS_DUMP_REG) {
    353		char tmp[16];
    354
    355		dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
    356			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
    357	}
    358#endif /* VERBOSE_DEBUG */
    359
    360	return value;
    361}
    362
    363static inline void atmel_aes_write(struct atmel_aes_dev *dd,
    364					u32 offset, u32 value)
    365{
    366#ifdef VERBOSE_DEBUG
    367	if (dd->flags & AES_FLAGS_DUMP_REG) {
    368		char tmp[16];
    369
    370		dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
    371			 atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
    372	}
    373#endif /* VERBOSE_DEBUG */
    374
    375	writel_relaxed(value, dd->io_base + offset);
    376}
    377
    378static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
    379					u32 *value, int count)
    380{
    381	for (; count--; value++, offset += 4)
    382		*value = atmel_aes_read(dd, offset);
    383}
    384
    385static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
    386			      const u32 *value, int count)
    387{
    388	for (; count--; value++, offset += 4)
    389		atmel_aes_write(dd, offset, *value);
    390}
    391
    392static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
    393					void *value)
    394{
    395	atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
    396}
    397
    398static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
    399					 const void *value)
    400{
    401	atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
    402}
    403
    404static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
    405						atmel_aes_fn_t resume)
    406{
    407	u32 isr = atmel_aes_read(dd, AES_ISR);
    408
    409	if (unlikely(isr & AES_INT_DATARDY))
    410		return resume(dd);
    411
    412	dd->resume = resume;
    413	atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
    414	return -EINPROGRESS;
    415}
    416
    417static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
    418{
    419	len &= block_size - 1;
    420	return len ? block_size - len : 0;
    421}
    422
    423static struct atmel_aes_dev *atmel_aes_dev_alloc(struct atmel_aes_base_ctx *ctx)
    424{
    425	struct atmel_aes_dev *aes_dd;
    426
    427	spin_lock_bh(&atmel_aes.lock);
    428	/* One AES IP per SoC. */
    429	aes_dd = list_first_entry_or_null(&atmel_aes.dev_list,
    430					  struct atmel_aes_dev, list);
    431	spin_unlock_bh(&atmel_aes.lock);
    432	return aes_dd;
    433}
    434
    435static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
    436{
    437	int err;
    438
    439	err = clk_enable(dd->iclk);
    440	if (err)
    441		return err;
    442
    443	atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
    444	atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
    445
    446	return 0;
    447}
    448
    449static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
    450{
    451	return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
    452}
    453
    454static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
    455{
    456	int err;
    457
    458	err = atmel_aes_hw_init(dd);
    459	if (err)
    460		return err;
    461
    462	dd->hw_version = atmel_aes_get_version(dd);
    463
    464	dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
    465
    466	clk_disable(dd->iclk);
    467	return 0;
    468}
    469
    470static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
    471				      const struct atmel_aes_reqctx *rctx)
    472{
    473	/* Clear all but persistent flags and set request flags. */
    474	dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
    475}
    476
    477static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
    478{
    479	return (dd->flags & AES_FLAGS_ENCRYPT);
    480}
    481
    482#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
    483static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
    484#endif
    485
    486static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
    487{
    488	struct skcipher_request *req = skcipher_request_cast(dd->areq);
    489	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
    490	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
    491	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
    492
    493	if (req->cryptlen < ivsize)
    494		return;
    495
    496	if (rctx->mode & AES_FLAGS_ENCRYPT) {
    497		scatterwalk_map_and_copy(req->iv, req->dst,
    498					 req->cryptlen - ivsize, ivsize, 0);
    499	} else {
    500		if (req->src == req->dst)
    501			memcpy(req->iv, rctx->lastc, ivsize);
    502		else
    503			scatterwalk_map_and_copy(req->iv, req->src,
    504						 req->cryptlen - ivsize,
    505						 ivsize, 0);
    506	}
    507}
    508
    509static inline struct atmel_aes_ctr_ctx *
    510atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
    511{
    512	return container_of(ctx, struct atmel_aes_ctr_ctx, base);
    513}
    514
    515static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd)
    516{
    517	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
    518	struct skcipher_request *req = skcipher_request_cast(dd->areq);
    519	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
    520	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
    521	int i;
    522
    523	/*
    524	 * The CTR transfer works in fragments of data of maximum 1 MByte
    525	 * because of the 16 bit CTR counter embedded in the IP. When reaching
    526	 * here, ctx->blocks contains the number of blocks of the last fragment
    527	 * processed, there is no need to explicit cast it to u16.
    528	 */
    529	for (i = 0; i < ctx->blocks; i++)
    530		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
    531
    532	memcpy(req->iv, ctx->iv, ivsize);
    533}
    534
    535static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
    536{
    537	struct skcipher_request *req = skcipher_request_cast(dd->areq);
    538	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
    539
    540#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
    541	if (dd->ctx->is_aead)
    542		atmel_aes_authenc_complete(dd, err);
    543#endif
    544
    545	clk_disable(dd->iclk);
    546	dd->flags &= ~AES_FLAGS_BUSY;
    547
    548	if (!err && !dd->ctx->is_aead &&
    549	    (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) {
    550		if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR)
    551			atmel_aes_set_iv_as_last_ciphertext_block(dd);
    552		else
    553			atmel_aes_ctr_update_req_iv(dd);
    554	}
    555
    556	if (dd->is_async)
    557		dd->areq->complete(dd->areq, err);
    558
    559	tasklet_schedule(&dd->queue_task);
    560
    561	return err;
    562}
    563
    564static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
    565				     const __be32 *iv, const u32 *key, int keylen)
    566{
    567	u32 valmr = 0;
    568
    569	/* MR register must be set before IV registers */
    570	if (keylen == AES_KEYSIZE_128)
    571		valmr |= AES_MR_KEYSIZE_128;
    572	else if (keylen == AES_KEYSIZE_192)
    573		valmr |= AES_MR_KEYSIZE_192;
    574	else
    575		valmr |= AES_MR_KEYSIZE_256;
    576
    577	valmr |= dd->flags & AES_FLAGS_MODE_MASK;
    578
    579	if (use_dma) {
    580		valmr |= AES_MR_SMOD_IDATAR0;
    581		if (dd->caps.has_dualbuff)
    582			valmr |= AES_MR_DUALBUFF;
    583	} else {
    584		valmr |= AES_MR_SMOD_AUTO;
    585	}
    586
    587	atmel_aes_write(dd, AES_MR, valmr);
    588
    589	atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
    590
    591	if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
    592		atmel_aes_write_block(dd, AES_IVR(0), iv);
    593}
    594
    595static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
    596					const __be32 *iv)
    597
    598{
    599	atmel_aes_write_ctrl_key(dd, use_dma, iv,
    600				 dd->ctx->key, dd->ctx->keylen);
    601}
    602
    603/* CPU transfer */
    604
    605static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
    606{
    607	int err = 0;
    608	u32 isr;
    609
    610	for (;;) {
    611		atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
    612		dd->data += 4;
    613		dd->datalen -= AES_BLOCK_SIZE;
    614
    615		if (dd->datalen < AES_BLOCK_SIZE)
    616			break;
    617
    618		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
    619
    620		isr = atmel_aes_read(dd, AES_ISR);
    621		if (!(isr & AES_INT_DATARDY)) {
    622			dd->resume = atmel_aes_cpu_transfer;
    623			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
    624			return -EINPROGRESS;
    625		}
    626	}
    627
    628	if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
    629				 dd->buf, dd->total))
    630		err = -EINVAL;
    631
    632	if (err)
    633		return atmel_aes_complete(dd, err);
    634
    635	return dd->cpu_transfer_complete(dd);
    636}
    637
    638static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
    639			       struct scatterlist *src,
    640			       struct scatterlist *dst,
    641			       size_t len,
    642			       atmel_aes_fn_t resume)
    643{
    644	size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
    645
    646	if (unlikely(len == 0))
    647		return -EINVAL;
    648
    649	sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
    650
    651	dd->total = len;
    652	dd->real_dst = dst;
    653	dd->cpu_transfer_complete = resume;
    654	dd->datalen = len + padlen;
    655	dd->data = (u32 *)dd->buf;
    656	atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
    657	return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
    658}
    659
    660
    661/* DMA transfer */
    662
    663static void atmel_aes_dma_callback(void *data);
    664
    665static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
    666				    struct scatterlist *sg,
    667				    size_t len,
    668				    struct atmel_aes_dma *dma)
    669{
    670	int nents;
    671
    672	if (!IS_ALIGNED(len, dd->ctx->block_size))
    673		return false;
    674
    675	for (nents = 0; sg; sg = sg_next(sg), ++nents) {
    676		if (!IS_ALIGNED(sg->offset, sizeof(u32)))
    677			return false;
    678
    679		if (len <= sg->length) {
    680			if (!IS_ALIGNED(len, dd->ctx->block_size))
    681				return false;
    682
    683			dma->nents = nents+1;
    684			dma->remainder = sg->length - len;
    685			sg->length = len;
    686			return true;
    687		}
    688
    689		if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
    690			return false;
    691
    692		len -= sg->length;
    693	}
    694
    695	return false;
    696}
    697
    698static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
    699{
    700	struct scatterlist *sg = dma->sg;
    701	int nents = dma->nents;
    702
    703	if (!dma->remainder)
    704		return;
    705
    706	while (--nents > 0 && sg)
    707		sg = sg_next(sg);
    708
    709	if (!sg)
    710		return;
    711
    712	sg->length += dma->remainder;
    713}
    714
    715static int atmel_aes_map(struct atmel_aes_dev *dd,
    716			 struct scatterlist *src,
    717			 struct scatterlist *dst,
    718			 size_t len)
    719{
    720	bool src_aligned, dst_aligned;
    721	size_t padlen;
    722
    723	dd->total = len;
    724	dd->src.sg = src;
    725	dd->dst.sg = dst;
    726	dd->real_dst = dst;
    727
    728	src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
    729	if (src == dst)
    730		dst_aligned = src_aligned;
    731	else
    732		dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
    733	if (!src_aligned || !dst_aligned) {
    734		padlen = atmel_aes_padlen(len, dd->ctx->block_size);
    735
    736		if (dd->buflen < len + padlen)
    737			return -ENOMEM;
    738
    739		if (!src_aligned) {
    740			sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
    741			dd->src.sg = &dd->aligned_sg;
    742			dd->src.nents = 1;
    743			dd->src.remainder = 0;
    744		}
    745
    746		if (!dst_aligned) {
    747			dd->dst.sg = &dd->aligned_sg;
    748			dd->dst.nents = 1;
    749			dd->dst.remainder = 0;
    750		}
    751
    752		sg_init_table(&dd->aligned_sg, 1);
    753		sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
    754	}
    755
    756	if (dd->src.sg == dd->dst.sg) {
    757		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
    758					    DMA_BIDIRECTIONAL);
    759		dd->dst.sg_len = dd->src.sg_len;
    760		if (!dd->src.sg_len)
    761			return -EFAULT;
    762	} else {
    763		dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
    764					    DMA_TO_DEVICE);
    765		if (!dd->src.sg_len)
    766			return -EFAULT;
    767
    768		dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
    769					    DMA_FROM_DEVICE);
    770		if (!dd->dst.sg_len) {
    771			dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
    772				     DMA_TO_DEVICE);
    773			return -EFAULT;
    774		}
    775	}
    776
    777	return 0;
    778}
    779
    780static void atmel_aes_unmap(struct atmel_aes_dev *dd)
    781{
    782	if (dd->src.sg == dd->dst.sg) {
    783		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
    784			     DMA_BIDIRECTIONAL);
    785
    786		if (dd->src.sg != &dd->aligned_sg)
    787			atmel_aes_restore_sg(&dd->src);
    788	} else {
    789		dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
    790			     DMA_FROM_DEVICE);
    791
    792		if (dd->dst.sg != &dd->aligned_sg)
    793			atmel_aes_restore_sg(&dd->dst);
    794
    795		dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
    796			     DMA_TO_DEVICE);
    797
    798		if (dd->src.sg != &dd->aligned_sg)
    799			atmel_aes_restore_sg(&dd->src);
    800	}
    801
    802	if (dd->dst.sg == &dd->aligned_sg)
    803		sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
    804				    dd->buf, dd->total);
    805}
    806
    807static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
    808					enum dma_slave_buswidth addr_width,
    809					enum dma_transfer_direction dir,
    810					u32 maxburst)
    811{
    812	struct dma_async_tx_descriptor *desc;
    813	struct dma_slave_config config;
    814	dma_async_tx_callback callback;
    815	struct atmel_aes_dma *dma;
    816	int err;
    817
    818	memset(&config, 0, sizeof(config));
    819	config.src_addr_width = addr_width;
    820	config.dst_addr_width = addr_width;
    821	config.src_maxburst = maxburst;
    822	config.dst_maxburst = maxburst;
    823
    824	switch (dir) {
    825	case DMA_MEM_TO_DEV:
    826		dma = &dd->src;
    827		callback = NULL;
    828		config.dst_addr = dd->phys_base + AES_IDATAR(0);
    829		break;
    830
    831	case DMA_DEV_TO_MEM:
    832		dma = &dd->dst;
    833		callback = atmel_aes_dma_callback;
    834		config.src_addr = dd->phys_base + AES_ODATAR(0);
    835		break;
    836
    837	default:
    838		return -EINVAL;
    839	}
    840
    841	err = dmaengine_slave_config(dma->chan, &config);
    842	if (err)
    843		return err;
    844
    845	desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
    846				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    847	if (!desc)
    848		return -ENOMEM;
    849
    850	desc->callback = callback;
    851	desc->callback_param = dd;
    852	dmaengine_submit(desc);
    853	dma_async_issue_pending(dma->chan);
    854
    855	return 0;
    856}
    857
    858static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
    859			       struct scatterlist *src,
    860			       struct scatterlist *dst,
    861			       size_t len,
    862			       atmel_aes_fn_t resume)
    863{
    864	enum dma_slave_buswidth addr_width;
    865	u32 maxburst;
    866	int err;
    867
    868	switch (dd->ctx->block_size) {
    869	case CFB8_BLOCK_SIZE:
    870		addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
    871		maxburst = 1;
    872		break;
    873
    874	case CFB16_BLOCK_SIZE:
    875		addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
    876		maxburst = 1;
    877		break;
    878
    879	case CFB32_BLOCK_SIZE:
    880	case CFB64_BLOCK_SIZE:
    881		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    882		maxburst = 1;
    883		break;
    884
    885	case AES_BLOCK_SIZE:
    886		addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    887		maxburst = dd->caps.max_burst_size;
    888		break;
    889
    890	default:
    891		err = -EINVAL;
    892		goto exit;
    893	}
    894
    895	err = atmel_aes_map(dd, src, dst, len);
    896	if (err)
    897		goto exit;
    898
    899	dd->resume = resume;
    900
    901	/* Set output DMA transfer first */
    902	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
    903					   maxburst);
    904	if (err)
    905		goto unmap;
    906
    907	/* Then set input DMA transfer */
    908	err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
    909					   maxburst);
    910	if (err)
    911		goto output_transfer_stop;
    912
    913	return -EINPROGRESS;
    914
    915output_transfer_stop:
    916	dmaengine_terminate_sync(dd->dst.chan);
    917unmap:
    918	atmel_aes_unmap(dd);
    919exit:
    920	return atmel_aes_complete(dd, err);
    921}
    922
    923static void atmel_aes_dma_callback(void *data)
    924{
    925	struct atmel_aes_dev *dd = data;
    926
    927	atmel_aes_unmap(dd);
    928	dd->is_async = true;
    929	(void)dd->resume(dd);
    930}
    931
    932static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
    933				  struct crypto_async_request *new_areq)
    934{
    935	struct crypto_async_request *areq, *backlog;
    936	struct atmel_aes_base_ctx *ctx;
    937	unsigned long flags;
    938	bool start_async;
    939	int err, ret = 0;
    940
    941	spin_lock_irqsave(&dd->lock, flags);
    942	if (new_areq)
    943		ret = crypto_enqueue_request(&dd->queue, new_areq);
    944	if (dd->flags & AES_FLAGS_BUSY) {
    945		spin_unlock_irqrestore(&dd->lock, flags);
    946		return ret;
    947	}
    948	backlog = crypto_get_backlog(&dd->queue);
    949	areq = crypto_dequeue_request(&dd->queue);
    950	if (areq)
    951		dd->flags |= AES_FLAGS_BUSY;
    952	spin_unlock_irqrestore(&dd->lock, flags);
    953
    954	if (!areq)
    955		return ret;
    956
    957	if (backlog)
    958		backlog->complete(backlog, -EINPROGRESS);
    959
    960	ctx = crypto_tfm_ctx(areq->tfm);
    961
    962	dd->areq = areq;
    963	dd->ctx = ctx;
    964	start_async = (areq != new_areq);
    965	dd->is_async = start_async;
    966
    967	/* WARNING: ctx->start() MAY change dd->is_async. */
    968	err = ctx->start(dd);
    969	return (start_async) ? ret : err;
    970}
    971
    972
    973/* AES async block ciphers */
    974
    975static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
    976{
    977	return atmel_aes_complete(dd, 0);
    978}
    979
    980static int atmel_aes_start(struct atmel_aes_dev *dd)
    981{
    982	struct skcipher_request *req = skcipher_request_cast(dd->areq);
    983	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
    984	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD ||
    985			dd->ctx->block_size != AES_BLOCK_SIZE);
    986	int err;
    987
    988	atmel_aes_set_mode(dd, rctx);
    989
    990	err = atmel_aes_hw_init(dd);
    991	if (err)
    992		return atmel_aes_complete(dd, err);
    993
    994	atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv);
    995	if (use_dma)
    996		return atmel_aes_dma_start(dd, req->src, req->dst,
    997					   req->cryptlen,
    998					   atmel_aes_transfer_complete);
    999
   1000	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
   1001				   atmel_aes_transfer_complete);
   1002}
   1003
   1004static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
   1005{
   1006	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
   1007	struct skcipher_request *req = skcipher_request_cast(dd->areq);
   1008	struct scatterlist *src, *dst;
   1009	size_t datalen;
   1010	u32 ctr;
   1011	u16 start, end;
   1012	bool use_dma, fragmented = false;
   1013
   1014	/* Check for transfer completion. */
   1015	ctx->offset += dd->total;
   1016	if (ctx->offset >= req->cryptlen)
   1017		return atmel_aes_transfer_complete(dd);
   1018
   1019	/* Compute data length. */
   1020	datalen = req->cryptlen - ctx->offset;
   1021	ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
   1022	ctr = be32_to_cpu(ctx->iv[3]);
   1023
   1024	/* Check 16bit counter overflow. */
   1025	start = ctr & 0xffff;
   1026	end = start + ctx->blocks - 1;
   1027
   1028	if (ctx->blocks >> 16 || end < start) {
   1029		ctr |= 0xffff;
   1030		datalen = AES_BLOCK_SIZE * (0x10000 - start);
   1031		fragmented = true;
   1032	}
   1033
   1034	use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
   1035
   1036	/* Jump to offset. */
   1037	src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
   1038	dst = ((req->src == req->dst) ? src :
   1039	       scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
   1040
   1041	/* Configure hardware. */
   1042	atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
   1043	if (unlikely(fragmented)) {
   1044		/*
   1045		 * Increment the counter manually to cope with the hardware
   1046		 * counter overflow.
   1047		 */
   1048		ctx->iv[3] = cpu_to_be32(ctr);
   1049		crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
   1050	}
   1051
   1052	if (use_dma)
   1053		return atmel_aes_dma_start(dd, src, dst, datalen,
   1054					   atmel_aes_ctr_transfer);
   1055
   1056	return atmel_aes_cpu_start(dd, src, dst, datalen,
   1057				   atmel_aes_ctr_transfer);
   1058}
   1059
   1060static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
   1061{
   1062	struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
   1063	struct skcipher_request *req = skcipher_request_cast(dd->areq);
   1064	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
   1065	int err;
   1066
   1067	atmel_aes_set_mode(dd, rctx);
   1068
   1069	err = atmel_aes_hw_init(dd);
   1070	if (err)
   1071		return atmel_aes_complete(dd, err);
   1072
   1073	memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE);
   1074	ctx->offset = 0;
   1075	dd->total = 0;
   1076	return atmel_aes_ctr_transfer(dd);
   1077}
   1078
   1079static int atmel_aes_xts_fallback(struct skcipher_request *req, bool enc)
   1080{
   1081	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
   1082	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(
   1083			crypto_skcipher_reqtfm(req));
   1084
   1085	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
   1086	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
   1087				      req->base.complete, req->base.data);
   1088	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
   1089				   req->cryptlen, req->iv);
   1090
   1091	return enc ? crypto_skcipher_encrypt(&rctx->fallback_req) :
   1092		     crypto_skcipher_decrypt(&rctx->fallback_req);
   1093}
   1094
   1095static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
   1096{
   1097	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
   1098	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher);
   1099	struct atmel_aes_reqctx *rctx;
   1100	u32 opmode = mode & AES_FLAGS_OPMODE_MASK;
   1101
   1102	if (opmode == AES_FLAGS_XTS) {
   1103		if (req->cryptlen < XTS_BLOCK_SIZE)
   1104			return -EINVAL;
   1105
   1106		if (!IS_ALIGNED(req->cryptlen, XTS_BLOCK_SIZE))
   1107			return atmel_aes_xts_fallback(req,
   1108						      mode & AES_FLAGS_ENCRYPT);
   1109	}
   1110
   1111	/*
   1112	 * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
   1113	 * to have a positve integer length.
   1114	 */
   1115	if (!req->cryptlen && opmode != AES_FLAGS_XTS)
   1116		return 0;
   1117
   1118	if ((opmode == AES_FLAGS_ECB || opmode == AES_FLAGS_CBC) &&
   1119	    !IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
   1120		return -EINVAL;
   1121
   1122	switch (mode & AES_FLAGS_OPMODE_MASK) {
   1123	case AES_FLAGS_CFB8:
   1124		ctx->block_size = CFB8_BLOCK_SIZE;
   1125		break;
   1126
   1127	case AES_FLAGS_CFB16:
   1128		ctx->block_size = CFB16_BLOCK_SIZE;
   1129		break;
   1130
   1131	case AES_FLAGS_CFB32:
   1132		ctx->block_size = CFB32_BLOCK_SIZE;
   1133		break;
   1134
   1135	case AES_FLAGS_CFB64:
   1136		ctx->block_size = CFB64_BLOCK_SIZE;
   1137		break;
   1138
   1139	default:
   1140		ctx->block_size = AES_BLOCK_SIZE;
   1141		break;
   1142	}
   1143	ctx->is_aead = false;
   1144
   1145	rctx = skcipher_request_ctx(req);
   1146	rctx->mode = mode;
   1147
   1148	if (opmode != AES_FLAGS_ECB &&
   1149	    !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) {
   1150		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
   1151
   1152		if (req->cryptlen >= ivsize)
   1153			scatterwalk_map_and_copy(rctx->lastc, req->src,
   1154						 req->cryptlen - ivsize,
   1155						 ivsize, 0);
   1156	}
   1157
   1158	return atmel_aes_handle_queue(ctx->dd, &req->base);
   1159}
   1160
   1161static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
   1162			   unsigned int keylen)
   1163{
   1164	struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm);
   1165
   1166	if (keylen != AES_KEYSIZE_128 &&
   1167	    keylen != AES_KEYSIZE_192 &&
   1168	    keylen != AES_KEYSIZE_256)
   1169		return -EINVAL;
   1170
   1171	memcpy(ctx->key, key, keylen);
   1172	ctx->keylen = keylen;
   1173
   1174	return 0;
   1175}
   1176
   1177static int atmel_aes_ecb_encrypt(struct skcipher_request *req)
   1178{
   1179	return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
   1180}
   1181
   1182static int atmel_aes_ecb_decrypt(struct skcipher_request *req)
   1183{
   1184	return atmel_aes_crypt(req, AES_FLAGS_ECB);
   1185}
   1186
   1187static int atmel_aes_cbc_encrypt(struct skcipher_request *req)
   1188{
   1189	return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
   1190}
   1191
   1192static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
   1193{
   1194	return atmel_aes_crypt(req, AES_FLAGS_CBC);
   1195}
   1196
   1197static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
   1198{
   1199	return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
   1200}
   1201
   1202static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
   1203{
   1204	return atmel_aes_crypt(req, AES_FLAGS_OFB);
   1205}
   1206
   1207static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
   1208{
   1209	return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
   1210}
   1211
   1212static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
   1213{
   1214	return atmel_aes_crypt(req, AES_FLAGS_CFB128);
   1215}
   1216
   1217static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
   1218{
   1219	return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
   1220}
   1221
   1222static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
   1223{
   1224	return atmel_aes_crypt(req, AES_FLAGS_CFB64);
   1225}
   1226
   1227static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
   1228{
   1229	return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
   1230}
   1231
   1232static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
   1233{
   1234	return atmel_aes_crypt(req, AES_FLAGS_CFB32);
   1235}
   1236
   1237static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
   1238{
   1239	return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
   1240}
   1241
   1242static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
   1243{
   1244	return atmel_aes_crypt(req, AES_FLAGS_CFB16);
   1245}
   1246
   1247static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
   1248{
   1249	return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
   1250}
   1251
   1252static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
   1253{
   1254	return atmel_aes_crypt(req, AES_FLAGS_CFB8);
   1255}
   1256
   1257static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
   1258{
   1259	return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
   1260}
   1261
   1262static int atmel_aes_ctr_decrypt(struct skcipher_request *req)
   1263{
   1264	return atmel_aes_crypt(req, AES_FLAGS_CTR);
   1265}
   1266
   1267static int atmel_aes_init_tfm(struct crypto_skcipher *tfm)
   1268{
   1269	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
   1270	struct atmel_aes_dev *dd;
   1271
   1272	dd = atmel_aes_dev_alloc(&ctx->base);
   1273	if (!dd)
   1274		return -ENODEV;
   1275
   1276	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
   1277	ctx->base.dd = dd;
   1278	ctx->base.start = atmel_aes_start;
   1279
   1280	return 0;
   1281}
   1282
   1283static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm)
   1284{
   1285	struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
   1286	struct atmel_aes_dev *dd;
   1287
   1288	dd = atmel_aes_dev_alloc(&ctx->base);
   1289	if (!dd)
   1290		return -ENODEV;
   1291
   1292	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
   1293	ctx->base.dd = dd;
   1294	ctx->base.start = atmel_aes_ctr_start;
   1295
   1296	return 0;
   1297}
   1298
   1299static struct skcipher_alg aes_algs[] = {
   1300{
   1301	.base.cra_name		= "ecb(aes)",
   1302	.base.cra_driver_name	= "atmel-ecb-aes",
   1303	.base.cra_blocksize	= AES_BLOCK_SIZE,
   1304	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1305
   1306	.init			= atmel_aes_init_tfm,
   1307	.min_keysize		= AES_MIN_KEY_SIZE,
   1308	.max_keysize		= AES_MAX_KEY_SIZE,
   1309	.setkey			= atmel_aes_setkey,
   1310	.encrypt		= atmel_aes_ecb_encrypt,
   1311	.decrypt		= atmel_aes_ecb_decrypt,
   1312},
   1313{
   1314	.base.cra_name		= "cbc(aes)",
   1315	.base.cra_driver_name	= "atmel-cbc-aes",
   1316	.base.cra_blocksize	= AES_BLOCK_SIZE,
   1317	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1318
   1319	.init			= atmel_aes_init_tfm,
   1320	.min_keysize		= AES_MIN_KEY_SIZE,
   1321	.max_keysize		= AES_MAX_KEY_SIZE,
   1322	.setkey			= atmel_aes_setkey,
   1323	.encrypt		= atmel_aes_cbc_encrypt,
   1324	.decrypt		= atmel_aes_cbc_decrypt,
   1325	.ivsize			= AES_BLOCK_SIZE,
   1326},
   1327{
   1328	.base.cra_name		= "ofb(aes)",
   1329	.base.cra_driver_name	= "atmel-ofb-aes",
   1330	.base.cra_blocksize	= 1,
   1331	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1332
   1333	.init			= atmel_aes_init_tfm,
   1334	.min_keysize		= AES_MIN_KEY_SIZE,
   1335	.max_keysize		= AES_MAX_KEY_SIZE,
   1336	.setkey			= atmel_aes_setkey,
   1337	.encrypt		= atmel_aes_ofb_encrypt,
   1338	.decrypt		= atmel_aes_ofb_decrypt,
   1339	.ivsize			= AES_BLOCK_SIZE,
   1340},
   1341{
   1342	.base.cra_name		= "cfb(aes)",
   1343	.base.cra_driver_name	= "atmel-cfb-aes",
   1344	.base.cra_blocksize	= AES_BLOCK_SIZE,
   1345	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1346
   1347	.init			= atmel_aes_init_tfm,
   1348	.min_keysize		= AES_MIN_KEY_SIZE,
   1349	.max_keysize		= AES_MAX_KEY_SIZE,
   1350	.setkey			= atmel_aes_setkey,
   1351	.encrypt		= atmel_aes_cfb_encrypt,
   1352	.decrypt		= atmel_aes_cfb_decrypt,
   1353	.ivsize			= AES_BLOCK_SIZE,
   1354},
   1355{
   1356	.base.cra_name		= "cfb32(aes)",
   1357	.base.cra_driver_name	= "atmel-cfb32-aes",
   1358	.base.cra_blocksize	= CFB32_BLOCK_SIZE,
   1359	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1360
   1361	.init			= atmel_aes_init_tfm,
   1362	.min_keysize		= AES_MIN_KEY_SIZE,
   1363	.max_keysize		= AES_MAX_KEY_SIZE,
   1364	.setkey			= atmel_aes_setkey,
   1365	.encrypt		= atmel_aes_cfb32_encrypt,
   1366	.decrypt		= atmel_aes_cfb32_decrypt,
   1367	.ivsize			= AES_BLOCK_SIZE,
   1368},
   1369{
   1370	.base.cra_name		= "cfb16(aes)",
   1371	.base.cra_driver_name	= "atmel-cfb16-aes",
   1372	.base.cra_blocksize	= CFB16_BLOCK_SIZE,
   1373	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1374
   1375	.init			= atmel_aes_init_tfm,
   1376	.min_keysize		= AES_MIN_KEY_SIZE,
   1377	.max_keysize		= AES_MAX_KEY_SIZE,
   1378	.setkey			= atmel_aes_setkey,
   1379	.encrypt		= atmel_aes_cfb16_encrypt,
   1380	.decrypt		= atmel_aes_cfb16_decrypt,
   1381	.ivsize			= AES_BLOCK_SIZE,
   1382},
   1383{
   1384	.base.cra_name		= "cfb8(aes)",
   1385	.base.cra_driver_name	= "atmel-cfb8-aes",
   1386	.base.cra_blocksize	= CFB8_BLOCK_SIZE,
   1387	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1388
   1389	.init			= atmel_aes_init_tfm,
   1390	.min_keysize		= AES_MIN_KEY_SIZE,
   1391	.max_keysize		= AES_MAX_KEY_SIZE,
   1392	.setkey			= atmel_aes_setkey,
   1393	.encrypt		= atmel_aes_cfb8_encrypt,
   1394	.decrypt		= atmel_aes_cfb8_decrypt,
   1395	.ivsize			= AES_BLOCK_SIZE,
   1396},
   1397{
   1398	.base.cra_name		= "ctr(aes)",
   1399	.base.cra_driver_name	= "atmel-ctr-aes",
   1400	.base.cra_blocksize	= 1,
   1401	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctr_ctx),
   1402
   1403	.init			= atmel_aes_ctr_init_tfm,
   1404	.min_keysize		= AES_MIN_KEY_SIZE,
   1405	.max_keysize		= AES_MAX_KEY_SIZE,
   1406	.setkey			= atmel_aes_setkey,
   1407	.encrypt		= atmel_aes_ctr_encrypt,
   1408	.decrypt		= atmel_aes_ctr_decrypt,
   1409	.ivsize			= AES_BLOCK_SIZE,
   1410},
   1411};
   1412
   1413static struct skcipher_alg aes_cfb64_alg = {
   1414	.base.cra_name		= "cfb64(aes)",
   1415	.base.cra_driver_name	= "atmel-cfb64-aes",
   1416	.base.cra_blocksize	= CFB64_BLOCK_SIZE,
   1417	.base.cra_ctxsize	= sizeof(struct atmel_aes_ctx),
   1418
   1419	.init			= atmel_aes_init_tfm,
   1420	.min_keysize		= AES_MIN_KEY_SIZE,
   1421	.max_keysize		= AES_MAX_KEY_SIZE,
   1422	.setkey			= atmel_aes_setkey,
   1423	.encrypt		= atmel_aes_cfb64_encrypt,
   1424	.decrypt		= atmel_aes_cfb64_decrypt,
   1425	.ivsize			= AES_BLOCK_SIZE,
   1426};
   1427
   1428
   1429/* gcm aead functions */
   1430
   1431static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
   1432			       const u32 *data, size_t datalen,
   1433			       const __be32 *ghash_in, __be32 *ghash_out,
   1434			       atmel_aes_fn_t resume);
   1435static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
   1436static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
   1437
   1438static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
   1439static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
   1440static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
   1441static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
   1442static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
   1443static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
   1444static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
   1445
   1446static inline struct atmel_aes_gcm_ctx *
   1447atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
   1448{
   1449	return container_of(ctx, struct atmel_aes_gcm_ctx, base);
   1450}
   1451
   1452static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
   1453			       const u32 *data, size_t datalen,
   1454			       const __be32 *ghash_in, __be32 *ghash_out,
   1455			       atmel_aes_fn_t resume)
   1456{
   1457	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1458
   1459	dd->data = (u32 *)data;
   1460	dd->datalen = datalen;
   1461	ctx->ghash_in = ghash_in;
   1462	ctx->ghash_out = ghash_out;
   1463	ctx->ghash_resume = resume;
   1464
   1465	atmel_aes_write_ctrl(dd, false, NULL);
   1466	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
   1467}
   1468
   1469static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
   1470{
   1471	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1472
   1473	/* Set the data length. */
   1474	atmel_aes_write(dd, AES_AADLENR, dd->total);
   1475	atmel_aes_write(dd, AES_CLENR, 0);
   1476
   1477	/* If needed, overwrite the GCM Intermediate Hash Word Registers */
   1478	if (ctx->ghash_in)
   1479		atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
   1480
   1481	return atmel_aes_gcm_ghash_finalize(dd);
   1482}
   1483
   1484static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
   1485{
   1486	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1487	u32 isr;
   1488
   1489	/* Write data into the Input Data Registers. */
   1490	while (dd->datalen > 0) {
   1491		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
   1492		dd->data += 4;
   1493		dd->datalen -= AES_BLOCK_SIZE;
   1494
   1495		isr = atmel_aes_read(dd, AES_ISR);
   1496		if (!(isr & AES_INT_DATARDY)) {
   1497			dd->resume = atmel_aes_gcm_ghash_finalize;
   1498			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
   1499			return -EINPROGRESS;
   1500		}
   1501	}
   1502
   1503	/* Read the computed hash from GHASHRx. */
   1504	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
   1505
   1506	return ctx->ghash_resume(dd);
   1507}
   1508
   1509
   1510static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
   1511{
   1512	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1513	struct aead_request *req = aead_request_cast(dd->areq);
   1514	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1515	struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
   1516	size_t ivsize = crypto_aead_ivsize(tfm);
   1517	size_t datalen, padlen;
   1518	const void *iv = req->iv;
   1519	u8 *data = dd->buf;
   1520	int err;
   1521
   1522	atmel_aes_set_mode(dd, rctx);
   1523
   1524	err = atmel_aes_hw_init(dd);
   1525	if (err)
   1526		return atmel_aes_complete(dd, err);
   1527
   1528	if (likely(ivsize == GCM_AES_IV_SIZE)) {
   1529		memcpy(ctx->j0, iv, ivsize);
   1530		ctx->j0[3] = cpu_to_be32(1);
   1531		return atmel_aes_gcm_process(dd);
   1532	}
   1533
   1534	padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
   1535	datalen = ivsize + padlen + AES_BLOCK_SIZE;
   1536	if (datalen > dd->buflen)
   1537		return atmel_aes_complete(dd, -EINVAL);
   1538
   1539	memcpy(data, iv, ivsize);
   1540	memset(data + ivsize, 0, padlen + sizeof(u64));
   1541	((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
   1542
   1543	return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
   1544				   NULL, ctx->j0, atmel_aes_gcm_process);
   1545}
   1546
   1547static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
   1548{
   1549	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1550	struct aead_request *req = aead_request_cast(dd->areq);
   1551	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1552	bool enc = atmel_aes_is_encrypt(dd);
   1553	u32 authsize;
   1554
   1555	/* Compute text length. */
   1556	authsize = crypto_aead_authsize(tfm);
   1557	ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
   1558
   1559	/*
   1560	 * According to tcrypt test suite, the GCM Automatic Tag Generation
   1561	 * fails when both the message and its associated data are empty.
   1562	 */
   1563	if (likely(req->assoclen != 0 || ctx->textlen != 0))
   1564		dd->flags |= AES_FLAGS_GTAGEN;
   1565
   1566	atmel_aes_write_ctrl(dd, false, NULL);
   1567	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
   1568}
   1569
   1570static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
   1571{
   1572	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1573	struct aead_request *req = aead_request_cast(dd->areq);
   1574	__be32 j0_lsw, *j0 = ctx->j0;
   1575	size_t padlen;
   1576
   1577	/* Write incr32(J0) into IV. */
   1578	j0_lsw = j0[3];
   1579	be32_add_cpu(&j0[3], 1);
   1580	atmel_aes_write_block(dd, AES_IVR(0), j0);
   1581	j0[3] = j0_lsw;
   1582
   1583	/* Set aad and text lengths. */
   1584	atmel_aes_write(dd, AES_AADLENR, req->assoclen);
   1585	atmel_aes_write(dd, AES_CLENR, ctx->textlen);
   1586
   1587	/* Check whether AAD are present. */
   1588	if (unlikely(req->assoclen == 0)) {
   1589		dd->datalen = 0;
   1590		return atmel_aes_gcm_data(dd);
   1591	}
   1592
   1593	/* Copy assoc data and add padding. */
   1594	padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
   1595	if (unlikely(req->assoclen + padlen > dd->buflen))
   1596		return atmel_aes_complete(dd, -EINVAL);
   1597	sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
   1598
   1599	/* Write assoc data into the Input Data register. */
   1600	dd->data = (u32 *)dd->buf;
   1601	dd->datalen = req->assoclen + padlen;
   1602	return atmel_aes_gcm_data(dd);
   1603}
   1604
   1605static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
   1606{
   1607	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1608	struct aead_request *req = aead_request_cast(dd->areq);
   1609	bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
   1610	struct scatterlist *src, *dst;
   1611	u32 isr, mr;
   1612
   1613	/* Write AAD first. */
   1614	while (dd->datalen > 0) {
   1615		atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
   1616		dd->data += 4;
   1617		dd->datalen -= AES_BLOCK_SIZE;
   1618
   1619		isr = atmel_aes_read(dd, AES_ISR);
   1620		if (!(isr & AES_INT_DATARDY)) {
   1621			dd->resume = atmel_aes_gcm_data;
   1622			atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
   1623			return -EINPROGRESS;
   1624		}
   1625	}
   1626
   1627	/* GMAC only. */
   1628	if (unlikely(ctx->textlen == 0))
   1629		return atmel_aes_gcm_tag_init(dd);
   1630
   1631	/* Prepare src and dst scatter lists to transfer cipher/plain texts */
   1632	src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
   1633	dst = ((req->src == req->dst) ? src :
   1634	       scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
   1635
   1636	if (use_dma) {
   1637		/* Update the Mode Register for DMA transfers. */
   1638		mr = atmel_aes_read(dd, AES_MR);
   1639		mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
   1640		mr |= AES_MR_SMOD_IDATAR0;
   1641		if (dd->caps.has_dualbuff)
   1642			mr |= AES_MR_DUALBUFF;
   1643		atmel_aes_write(dd, AES_MR, mr);
   1644
   1645		return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
   1646					   atmel_aes_gcm_tag_init);
   1647	}
   1648
   1649	return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
   1650				   atmel_aes_gcm_tag_init);
   1651}
   1652
   1653static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
   1654{
   1655	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1656	struct aead_request *req = aead_request_cast(dd->areq);
   1657	__be64 *data = dd->buf;
   1658
   1659	if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
   1660		if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
   1661			dd->resume = atmel_aes_gcm_tag_init;
   1662			atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
   1663			return -EINPROGRESS;
   1664		}
   1665
   1666		return atmel_aes_gcm_finalize(dd);
   1667	}
   1668
   1669	/* Read the GCM Intermediate Hash Word Registers. */
   1670	atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
   1671
   1672	data[0] = cpu_to_be64(req->assoclen * 8);
   1673	data[1] = cpu_to_be64(ctx->textlen * 8);
   1674
   1675	return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
   1676				   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
   1677}
   1678
   1679static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
   1680{
   1681	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1682	unsigned long flags;
   1683
   1684	/*
   1685	 * Change mode to CTR to complete the tag generation.
   1686	 * Use J0 as Initialization Vector.
   1687	 */
   1688	flags = dd->flags;
   1689	dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
   1690	dd->flags |= AES_FLAGS_CTR;
   1691	atmel_aes_write_ctrl(dd, false, ctx->j0);
   1692	dd->flags = flags;
   1693
   1694	atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
   1695	return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
   1696}
   1697
   1698static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
   1699{
   1700	struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
   1701	struct aead_request *req = aead_request_cast(dd->areq);
   1702	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1703	bool enc = atmel_aes_is_encrypt(dd);
   1704	u32 offset, authsize, itag[4], *otag = ctx->tag;
   1705	int err;
   1706
   1707	/* Read the computed tag. */
   1708	if (likely(dd->flags & AES_FLAGS_GTAGEN))
   1709		atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
   1710	else
   1711		atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
   1712
   1713	offset = req->assoclen + ctx->textlen;
   1714	authsize = crypto_aead_authsize(tfm);
   1715	if (enc) {
   1716		scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
   1717		err = 0;
   1718	} else {
   1719		scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
   1720		err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
   1721	}
   1722
   1723	return atmel_aes_complete(dd, err);
   1724}
   1725
   1726static int atmel_aes_gcm_crypt(struct aead_request *req,
   1727			       unsigned long mode)
   1728{
   1729	struct atmel_aes_base_ctx *ctx;
   1730	struct atmel_aes_reqctx *rctx;
   1731
   1732	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
   1733	ctx->block_size = AES_BLOCK_SIZE;
   1734	ctx->is_aead = true;
   1735
   1736	rctx = aead_request_ctx(req);
   1737	rctx->mode = AES_FLAGS_GCM | mode;
   1738
   1739	return atmel_aes_handle_queue(ctx->dd, &req->base);
   1740}
   1741
   1742static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
   1743				unsigned int keylen)
   1744{
   1745	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
   1746
   1747	if (keylen != AES_KEYSIZE_256 &&
   1748	    keylen != AES_KEYSIZE_192 &&
   1749	    keylen != AES_KEYSIZE_128)
   1750		return -EINVAL;
   1751
   1752	memcpy(ctx->key, key, keylen);
   1753	ctx->keylen = keylen;
   1754
   1755	return 0;
   1756}
   1757
   1758static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
   1759				     unsigned int authsize)
   1760{
   1761	return crypto_gcm_check_authsize(authsize);
   1762}
   1763
   1764static int atmel_aes_gcm_encrypt(struct aead_request *req)
   1765{
   1766	return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
   1767}
   1768
   1769static int atmel_aes_gcm_decrypt(struct aead_request *req)
   1770{
   1771	return atmel_aes_gcm_crypt(req, 0);
   1772}
   1773
   1774static int atmel_aes_gcm_init(struct crypto_aead *tfm)
   1775{
   1776	struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
   1777	struct atmel_aes_dev *dd;
   1778
   1779	dd = atmel_aes_dev_alloc(&ctx->base);
   1780	if (!dd)
   1781		return -ENODEV;
   1782
   1783	crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
   1784	ctx->base.dd = dd;
   1785	ctx->base.start = atmel_aes_gcm_start;
   1786
   1787	return 0;
   1788}
   1789
   1790static struct aead_alg aes_gcm_alg = {
   1791	.setkey		= atmel_aes_gcm_setkey,
   1792	.setauthsize	= atmel_aes_gcm_setauthsize,
   1793	.encrypt	= atmel_aes_gcm_encrypt,
   1794	.decrypt	= atmel_aes_gcm_decrypt,
   1795	.init		= atmel_aes_gcm_init,
   1796	.ivsize		= GCM_AES_IV_SIZE,
   1797	.maxauthsize	= AES_BLOCK_SIZE,
   1798
   1799	.base = {
   1800		.cra_name		= "gcm(aes)",
   1801		.cra_driver_name	= "atmel-gcm-aes",
   1802		.cra_blocksize		= 1,
   1803		.cra_ctxsize		= sizeof(struct atmel_aes_gcm_ctx),
   1804	},
   1805};
   1806
   1807
   1808/* xts functions */
   1809
   1810static inline struct atmel_aes_xts_ctx *
   1811atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
   1812{
   1813	return container_of(ctx, struct atmel_aes_xts_ctx, base);
   1814}
   1815
   1816static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
   1817
   1818static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
   1819{
   1820	struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
   1821	struct skcipher_request *req = skcipher_request_cast(dd->areq);
   1822	struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req);
   1823	unsigned long flags;
   1824	int err;
   1825
   1826	atmel_aes_set_mode(dd, rctx);
   1827
   1828	err = atmel_aes_hw_init(dd);
   1829	if (err)
   1830		return atmel_aes_complete(dd, err);
   1831
   1832	/* Compute the tweak value from req->iv with ecb(aes). */
   1833	flags = dd->flags;
   1834	dd->flags &= ~AES_FLAGS_MODE_MASK;
   1835	dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
   1836	atmel_aes_write_ctrl_key(dd, false, NULL,
   1837				 ctx->key2, ctx->base.keylen);
   1838	dd->flags = flags;
   1839
   1840	atmel_aes_write_block(dd, AES_IDATAR(0), req->iv);
   1841	return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
   1842}
   1843
   1844static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
   1845{
   1846	struct skcipher_request *req = skcipher_request_cast(dd->areq);
   1847	bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD);
   1848	u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
   1849	static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
   1850	u8 *tweak_bytes = (u8 *)tweak;
   1851	int i;
   1852
   1853	/* Read the computed ciphered tweak value. */
   1854	atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
   1855	/*
   1856	 * Hardware quirk:
   1857	 * the order of the ciphered tweak bytes need to be reversed before
   1858	 * writing them into the ODATARx registers.
   1859	 */
   1860	for (i = 0; i < AES_BLOCK_SIZE/2; ++i)
   1861		swap(tweak_bytes[i], tweak_bytes[AES_BLOCK_SIZE - 1 - i]);
   1862
   1863	/* Process the data. */
   1864	atmel_aes_write_ctrl(dd, use_dma, NULL);
   1865	atmel_aes_write_block(dd, AES_TWR(0), tweak);
   1866	atmel_aes_write_block(dd, AES_ALPHAR(0), one);
   1867	if (use_dma)
   1868		return atmel_aes_dma_start(dd, req->src, req->dst,
   1869					   req->cryptlen,
   1870					   atmel_aes_transfer_complete);
   1871
   1872	return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen,
   1873				   atmel_aes_transfer_complete);
   1874}
   1875
   1876static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
   1877				unsigned int keylen)
   1878{
   1879	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
   1880	int err;
   1881
   1882	err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
   1883	if (err)
   1884		return err;
   1885
   1886	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
   1887	crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags &
   1888				  CRYPTO_TFM_REQ_MASK);
   1889	err = crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
   1890	if (err)
   1891		return err;
   1892
   1893	memcpy(ctx->base.key, key, keylen/2);
   1894	memcpy(ctx->key2, key + keylen/2, keylen/2);
   1895	ctx->base.keylen = keylen/2;
   1896
   1897	return 0;
   1898}
   1899
   1900static int atmel_aes_xts_encrypt(struct skcipher_request *req)
   1901{
   1902	return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
   1903}
   1904
   1905static int atmel_aes_xts_decrypt(struct skcipher_request *req)
   1906{
   1907	return atmel_aes_crypt(req, AES_FLAGS_XTS);
   1908}
   1909
   1910static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm)
   1911{
   1912	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
   1913	struct atmel_aes_dev *dd;
   1914	const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
   1915
   1916	dd = atmel_aes_dev_alloc(&ctx->base);
   1917	if (!dd)
   1918		return -ENODEV;
   1919
   1920	ctx->fallback_tfm = crypto_alloc_skcipher(tfm_name, 0,
   1921						  CRYPTO_ALG_NEED_FALLBACK);
   1922	if (IS_ERR(ctx->fallback_tfm))
   1923		return PTR_ERR(ctx->fallback_tfm);
   1924
   1925	crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx) +
   1926				    crypto_skcipher_reqsize(ctx->fallback_tfm));
   1927	ctx->base.dd = dd;
   1928	ctx->base.start = atmel_aes_xts_start;
   1929
   1930	return 0;
   1931}
   1932
   1933static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
   1934{
   1935	struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
   1936
   1937	crypto_free_skcipher(ctx->fallback_tfm);
   1938}
   1939
   1940static struct skcipher_alg aes_xts_alg = {
   1941	.base.cra_name		= "xts(aes)",
   1942	.base.cra_driver_name	= "atmel-xts-aes",
   1943	.base.cra_blocksize	= AES_BLOCK_SIZE,
   1944	.base.cra_ctxsize	= sizeof(struct atmel_aes_xts_ctx),
   1945	.base.cra_flags		= CRYPTO_ALG_NEED_FALLBACK,
   1946
   1947	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
   1948	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
   1949	.ivsize			= AES_BLOCK_SIZE,
   1950	.setkey			= atmel_aes_xts_setkey,
   1951	.encrypt		= atmel_aes_xts_encrypt,
   1952	.decrypt		= atmel_aes_xts_decrypt,
   1953	.init			= atmel_aes_xts_init_tfm,
   1954	.exit			= atmel_aes_xts_exit_tfm,
   1955};
   1956
   1957#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
   1958/* authenc aead functions */
   1959
   1960static int atmel_aes_authenc_start(struct atmel_aes_dev *dd);
   1961static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
   1962				  bool is_async);
   1963static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
   1964				      bool is_async);
   1965static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd);
   1966static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
   1967				   bool is_async);
   1968
   1969static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err)
   1970{
   1971	struct aead_request *req = aead_request_cast(dd->areq);
   1972	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   1973
   1974	if (err && (dd->flags & AES_FLAGS_OWN_SHA))
   1975		atmel_sha_authenc_abort(&rctx->auth_req);
   1976	dd->flags &= ~AES_FLAGS_OWN_SHA;
   1977}
   1978
   1979static int atmel_aes_authenc_start(struct atmel_aes_dev *dd)
   1980{
   1981	struct aead_request *req = aead_request_cast(dd->areq);
   1982	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   1983	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   1984	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
   1985	int err;
   1986
   1987	atmel_aes_set_mode(dd, &rctx->base);
   1988
   1989	err = atmel_aes_hw_init(dd);
   1990	if (err)
   1991		return atmel_aes_complete(dd, err);
   1992
   1993	return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth,
   1994					  atmel_aes_authenc_init, dd);
   1995}
   1996
   1997static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err,
   1998				  bool is_async)
   1999{
   2000	struct aead_request *req = aead_request_cast(dd->areq);
   2001	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   2002
   2003	if (is_async)
   2004		dd->is_async = true;
   2005	if (err)
   2006		return atmel_aes_complete(dd, err);
   2007
   2008	/* If here, we've got the ownership of the SHA device. */
   2009	dd->flags |= AES_FLAGS_OWN_SHA;
   2010
   2011	/* Configure the SHA device. */
   2012	return atmel_sha_authenc_init(&rctx->auth_req,
   2013				      req->src, req->assoclen,
   2014				      rctx->textlen,
   2015				      atmel_aes_authenc_transfer, dd);
   2016}
   2017
   2018static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err,
   2019				      bool is_async)
   2020{
   2021	struct aead_request *req = aead_request_cast(dd->areq);
   2022	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   2023	bool enc = atmel_aes_is_encrypt(dd);
   2024	struct scatterlist *src, *dst;
   2025	__be32 iv[AES_BLOCK_SIZE / sizeof(u32)];
   2026	u32 emr;
   2027
   2028	if (is_async)
   2029		dd->is_async = true;
   2030	if (err)
   2031		return atmel_aes_complete(dd, err);
   2032
   2033	/* Prepare src and dst scatter-lists to transfer cipher/plain texts. */
   2034	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
   2035	dst = src;
   2036
   2037	if (req->src != req->dst)
   2038		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
   2039
   2040	/* Configure the AES device. */
   2041	memcpy(iv, req->iv, sizeof(iv));
   2042
   2043	/*
   2044	 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to
   2045	 * 'true' even if the data transfer is actually performed by the CPU (so
   2046	 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the
   2047	 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD
   2048	 * must be set to *_MR_SMOD_IDATAR0.
   2049	 */
   2050	atmel_aes_write_ctrl(dd, true, iv);
   2051	emr = AES_EMR_PLIPEN;
   2052	if (!enc)
   2053		emr |= AES_EMR_PLIPD;
   2054	atmel_aes_write(dd, AES_EMR, emr);
   2055
   2056	/* Transfer data. */
   2057	return atmel_aes_dma_start(dd, src, dst, rctx->textlen,
   2058				   atmel_aes_authenc_digest);
   2059}
   2060
   2061static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd)
   2062{
   2063	struct aead_request *req = aead_request_cast(dd->areq);
   2064	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   2065
   2066	/* atmel_sha_authenc_final() releases the SHA device. */
   2067	dd->flags &= ~AES_FLAGS_OWN_SHA;
   2068	return atmel_sha_authenc_final(&rctx->auth_req,
   2069				       rctx->digest, sizeof(rctx->digest),
   2070				       atmel_aes_authenc_final, dd);
   2071}
   2072
   2073static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err,
   2074				   bool is_async)
   2075{
   2076	struct aead_request *req = aead_request_cast(dd->areq);
   2077	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   2078	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2079	bool enc = atmel_aes_is_encrypt(dd);
   2080	u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest;
   2081	u32 offs, authsize;
   2082
   2083	if (is_async)
   2084		dd->is_async = true;
   2085	if (err)
   2086		goto complete;
   2087
   2088	offs = req->assoclen + rctx->textlen;
   2089	authsize = crypto_aead_authsize(tfm);
   2090	if (enc) {
   2091		scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1);
   2092	} else {
   2093		scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0);
   2094		if (crypto_memneq(idigest, odigest, authsize))
   2095			err = -EBADMSG;
   2096	}
   2097
   2098complete:
   2099	return atmel_aes_complete(dd, err);
   2100}
   2101
   2102static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key,
   2103				    unsigned int keylen)
   2104{
   2105	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
   2106	struct crypto_authenc_keys keys;
   2107	int err;
   2108
   2109	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
   2110		goto badkey;
   2111
   2112	if (keys.enckeylen > sizeof(ctx->base.key))
   2113		goto badkey;
   2114
   2115	/* Save auth key. */
   2116	err = atmel_sha_authenc_setkey(ctx->auth,
   2117				       keys.authkey, keys.authkeylen,
   2118				       crypto_aead_get_flags(tfm));
   2119	if (err) {
   2120		memzero_explicit(&keys, sizeof(keys));
   2121		return err;
   2122	}
   2123
   2124	/* Save enc key. */
   2125	ctx->base.keylen = keys.enckeylen;
   2126	memcpy(ctx->base.key, keys.enckey, keys.enckeylen);
   2127
   2128	memzero_explicit(&keys, sizeof(keys));
   2129	return 0;
   2130
   2131badkey:
   2132	memzero_explicit(&keys, sizeof(keys));
   2133	return -EINVAL;
   2134}
   2135
   2136static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm,
   2137				      unsigned long auth_mode)
   2138{
   2139	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
   2140	unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize();
   2141	struct atmel_aes_dev *dd;
   2142
   2143	dd = atmel_aes_dev_alloc(&ctx->base);
   2144	if (!dd)
   2145		return -ENODEV;
   2146
   2147	ctx->auth = atmel_sha_authenc_spawn(auth_mode);
   2148	if (IS_ERR(ctx->auth))
   2149		return PTR_ERR(ctx->auth);
   2150
   2151	crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) +
   2152				      auth_reqsize));
   2153	ctx->base.dd = dd;
   2154	ctx->base.start = atmel_aes_authenc_start;
   2155
   2156	return 0;
   2157}
   2158
   2159static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm)
   2160{
   2161	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1);
   2162}
   2163
   2164static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm)
   2165{
   2166	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224);
   2167}
   2168
   2169static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm)
   2170{
   2171	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256);
   2172}
   2173
   2174static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm)
   2175{
   2176	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384);
   2177}
   2178
   2179static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm)
   2180{
   2181	return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512);
   2182}
   2183
   2184static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm)
   2185{
   2186	struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm);
   2187
   2188	atmel_sha_authenc_free(ctx->auth);
   2189}
   2190
   2191static int atmel_aes_authenc_crypt(struct aead_request *req,
   2192				   unsigned long mode)
   2193{
   2194	struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req);
   2195	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
   2196	struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
   2197	u32 authsize = crypto_aead_authsize(tfm);
   2198	bool enc = (mode & AES_FLAGS_ENCRYPT);
   2199
   2200	/* Compute text length. */
   2201	if (!enc && req->cryptlen < authsize)
   2202		return -EINVAL;
   2203	rctx->textlen = req->cryptlen - (enc ? 0 : authsize);
   2204
   2205	/*
   2206	 * Currently, empty messages are not supported yet:
   2207	 * the SHA auto-padding can be used only on non-empty messages.
   2208	 * Hence a special case needs to be implemented for empty message.
   2209	 */
   2210	if (!rctx->textlen && !req->assoclen)
   2211		return -EINVAL;
   2212
   2213	rctx->base.mode = mode;
   2214	ctx->block_size = AES_BLOCK_SIZE;
   2215	ctx->is_aead = true;
   2216
   2217	return atmel_aes_handle_queue(ctx->dd, &req->base);
   2218}
   2219
   2220static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req)
   2221{
   2222	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
   2223}
   2224
   2225static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req)
   2226{
   2227	return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC);
   2228}
   2229
   2230static struct aead_alg aes_authenc_algs[] = {
   2231{
   2232	.setkey		= atmel_aes_authenc_setkey,
   2233	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
   2234	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
   2235	.init		= atmel_aes_authenc_hmac_sha1_init_tfm,
   2236	.exit		= atmel_aes_authenc_exit_tfm,
   2237	.ivsize		= AES_BLOCK_SIZE,
   2238	.maxauthsize	= SHA1_DIGEST_SIZE,
   2239
   2240	.base = {
   2241		.cra_name		= "authenc(hmac(sha1),cbc(aes))",
   2242		.cra_driver_name	= "atmel-authenc-hmac-sha1-cbc-aes",
   2243		.cra_blocksize		= AES_BLOCK_SIZE,
   2244		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
   2245	},
   2246},
   2247{
   2248	.setkey		= atmel_aes_authenc_setkey,
   2249	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
   2250	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
   2251	.init		= atmel_aes_authenc_hmac_sha224_init_tfm,
   2252	.exit		= atmel_aes_authenc_exit_tfm,
   2253	.ivsize		= AES_BLOCK_SIZE,
   2254	.maxauthsize	= SHA224_DIGEST_SIZE,
   2255
   2256	.base = {
   2257		.cra_name		= "authenc(hmac(sha224),cbc(aes))",
   2258		.cra_driver_name	= "atmel-authenc-hmac-sha224-cbc-aes",
   2259		.cra_blocksize		= AES_BLOCK_SIZE,
   2260		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
   2261	},
   2262},
   2263{
   2264	.setkey		= atmel_aes_authenc_setkey,
   2265	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
   2266	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
   2267	.init		= atmel_aes_authenc_hmac_sha256_init_tfm,
   2268	.exit		= atmel_aes_authenc_exit_tfm,
   2269	.ivsize		= AES_BLOCK_SIZE,
   2270	.maxauthsize	= SHA256_DIGEST_SIZE,
   2271
   2272	.base = {
   2273		.cra_name		= "authenc(hmac(sha256),cbc(aes))",
   2274		.cra_driver_name	= "atmel-authenc-hmac-sha256-cbc-aes",
   2275		.cra_blocksize		= AES_BLOCK_SIZE,
   2276		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
   2277	},
   2278},
   2279{
   2280	.setkey		= atmel_aes_authenc_setkey,
   2281	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
   2282	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
   2283	.init		= atmel_aes_authenc_hmac_sha384_init_tfm,
   2284	.exit		= atmel_aes_authenc_exit_tfm,
   2285	.ivsize		= AES_BLOCK_SIZE,
   2286	.maxauthsize	= SHA384_DIGEST_SIZE,
   2287
   2288	.base = {
   2289		.cra_name		= "authenc(hmac(sha384),cbc(aes))",
   2290		.cra_driver_name	= "atmel-authenc-hmac-sha384-cbc-aes",
   2291		.cra_blocksize		= AES_BLOCK_SIZE,
   2292		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
   2293	},
   2294},
   2295{
   2296	.setkey		= atmel_aes_authenc_setkey,
   2297	.encrypt	= atmel_aes_authenc_cbc_aes_encrypt,
   2298	.decrypt	= atmel_aes_authenc_cbc_aes_decrypt,
   2299	.init		= atmel_aes_authenc_hmac_sha512_init_tfm,
   2300	.exit		= atmel_aes_authenc_exit_tfm,
   2301	.ivsize		= AES_BLOCK_SIZE,
   2302	.maxauthsize	= SHA512_DIGEST_SIZE,
   2303
   2304	.base = {
   2305		.cra_name		= "authenc(hmac(sha512),cbc(aes))",
   2306		.cra_driver_name	= "atmel-authenc-hmac-sha512-cbc-aes",
   2307		.cra_blocksize		= AES_BLOCK_SIZE,
   2308		.cra_ctxsize		= sizeof(struct atmel_aes_authenc_ctx),
   2309	},
   2310},
   2311};
   2312#endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */
   2313
   2314/* Probe functions */
   2315
   2316static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
   2317{
   2318	dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
   2319	dd->buflen = ATMEL_AES_BUFFER_SIZE;
   2320	dd->buflen &= ~(AES_BLOCK_SIZE - 1);
   2321
   2322	if (!dd->buf) {
   2323		dev_err(dd->dev, "unable to alloc pages.\n");
   2324		return -ENOMEM;
   2325	}
   2326
   2327	return 0;
   2328}
   2329
   2330static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
   2331{
   2332	free_page((unsigned long)dd->buf);
   2333}
   2334
   2335static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
   2336{
   2337	int ret;
   2338
   2339	/* Try to grab 2 DMA channels */
   2340	dd->src.chan = dma_request_chan(dd->dev, "tx");
   2341	if (IS_ERR(dd->src.chan)) {
   2342		ret = PTR_ERR(dd->src.chan);
   2343		goto err_dma_in;
   2344	}
   2345
   2346	dd->dst.chan = dma_request_chan(dd->dev, "rx");
   2347	if (IS_ERR(dd->dst.chan)) {
   2348		ret = PTR_ERR(dd->dst.chan);
   2349		goto err_dma_out;
   2350	}
   2351
   2352	return 0;
   2353
   2354err_dma_out:
   2355	dma_release_channel(dd->src.chan);
   2356err_dma_in:
   2357	dev_err(dd->dev, "no DMA channel available\n");
   2358	return ret;
   2359}
   2360
   2361static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
   2362{
   2363	dma_release_channel(dd->dst.chan);
   2364	dma_release_channel(dd->src.chan);
   2365}
   2366
   2367static void atmel_aes_queue_task(unsigned long data)
   2368{
   2369	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
   2370
   2371	atmel_aes_handle_queue(dd, NULL);
   2372}
   2373
   2374static void atmel_aes_done_task(unsigned long data)
   2375{
   2376	struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
   2377
   2378	dd->is_async = true;
   2379	(void)dd->resume(dd);
   2380}
   2381
   2382static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
   2383{
   2384	struct atmel_aes_dev *aes_dd = dev_id;
   2385	u32 reg;
   2386
   2387	reg = atmel_aes_read(aes_dd, AES_ISR);
   2388	if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
   2389		atmel_aes_write(aes_dd, AES_IDR, reg);
   2390		if (AES_FLAGS_BUSY & aes_dd->flags)
   2391			tasklet_schedule(&aes_dd->done_task);
   2392		else
   2393			dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
   2394		return IRQ_HANDLED;
   2395	}
   2396
   2397	return IRQ_NONE;
   2398}
   2399
   2400static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
   2401{
   2402	int i;
   2403
   2404#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
   2405	if (dd->caps.has_authenc)
   2406		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++)
   2407			crypto_unregister_aead(&aes_authenc_algs[i]);
   2408#endif
   2409
   2410	if (dd->caps.has_xts)
   2411		crypto_unregister_skcipher(&aes_xts_alg);
   2412
   2413	if (dd->caps.has_gcm)
   2414		crypto_unregister_aead(&aes_gcm_alg);
   2415
   2416	if (dd->caps.has_cfb64)
   2417		crypto_unregister_skcipher(&aes_cfb64_alg);
   2418
   2419	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
   2420		crypto_unregister_skcipher(&aes_algs[i]);
   2421}
   2422
   2423static void atmel_aes_crypto_alg_init(struct crypto_alg *alg)
   2424{
   2425	alg->cra_flags |= CRYPTO_ALG_ASYNC;
   2426	alg->cra_alignmask = 0xf;
   2427	alg->cra_priority = ATMEL_AES_PRIORITY;
   2428	alg->cra_module = THIS_MODULE;
   2429}
   2430
   2431static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
   2432{
   2433	int err, i, j;
   2434
   2435	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
   2436		atmel_aes_crypto_alg_init(&aes_algs[i].base);
   2437
   2438		err = crypto_register_skcipher(&aes_algs[i]);
   2439		if (err)
   2440			goto err_aes_algs;
   2441	}
   2442
   2443	if (dd->caps.has_cfb64) {
   2444		atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
   2445
   2446		err = crypto_register_skcipher(&aes_cfb64_alg);
   2447		if (err)
   2448			goto err_aes_cfb64_alg;
   2449	}
   2450
   2451	if (dd->caps.has_gcm) {
   2452		atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
   2453
   2454		err = crypto_register_aead(&aes_gcm_alg);
   2455		if (err)
   2456			goto err_aes_gcm_alg;
   2457	}
   2458
   2459	if (dd->caps.has_xts) {
   2460		atmel_aes_crypto_alg_init(&aes_xts_alg.base);
   2461
   2462		err = crypto_register_skcipher(&aes_xts_alg);
   2463		if (err)
   2464			goto err_aes_xts_alg;
   2465	}
   2466
   2467#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
   2468	if (dd->caps.has_authenc) {
   2469		for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) {
   2470			atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base);
   2471
   2472			err = crypto_register_aead(&aes_authenc_algs[i]);
   2473			if (err)
   2474				goto err_aes_authenc_alg;
   2475		}
   2476	}
   2477#endif
   2478
   2479	return 0;
   2480
   2481#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
   2482	/* i = ARRAY_SIZE(aes_authenc_algs); */
   2483err_aes_authenc_alg:
   2484	for (j = 0; j < i; j++)
   2485		crypto_unregister_aead(&aes_authenc_algs[j]);
   2486	crypto_unregister_skcipher(&aes_xts_alg);
   2487#endif
   2488err_aes_xts_alg:
   2489	crypto_unregister_aead(&aes_gcm_alg);
   2490err_aes_gcm_alg:
   2491	crypto_unregister_skcipher(&aes_cfb64_alg);
   2492err_aes_cfb64_alg:
   2493	i = ARRAY_SIZE(aes_algs);
   2494err_aes_algs:
   2495	for (j = 0; j < i; j++)
   2496		crypto_unregister_skcipher(&aes_algs[j]);
   2497
   2498	return err;
   2499}
   2500
   2501static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
   2502{
   2503	dd->caps.has_dualbuff = 0;
   2504	dd->caps.has_cfb64 = 0;
   2505	dd->caps.has_gcm = 0;
   2506	dd->caps.has_xts = 0;
   2507	dd->caps.has_authenc = 0;
   2508	dd->caps.max_burst_size = 1;
   2509
   2510	/* keep only major version number */
   2511	switch (dd->hw_version & 0xff0) {
   2512	case 0x700:
   2513	case 0x500:
   2514		dd->caps.has_dualbuff = 1;
   2515		dd->caps.has_cfb64 = 1;
   2516		dd->caps.has_gcm = 1;
   2517		dd->caps.has_xts = 1;
   2518		dd->caps.has_authenc = 1;
   2519		dd->caps.max_burst_size = 4;
   2520		break;
   2521	case 0x200:
   2522		dd->caps.has_dualbuff = 1;
   2523		dd->caps.has_cfb64 = 1;
   2524		dd->caps.has_gcm = 1;
   2525		dd->caps.max_burst_size = 4;
   2526		break;
   2527	case 0x130:
   2528		dd->caps.has_dualbuff = 1;
   2529		dd->caps.has_cfb64 = 1;
   2530		dd->caps.max_burst_size = 4;
   2531		break;
   2532	case 0x120:
   2533		break;
   2534	default:
   2535		dev_warn(dd->dev,
   2536				"Unmanaged aes version, set minimum capabilities\n");
   2537		break;
   2538	}
   2539}
   2540
   2541#if defined(CONFIG_OF)
   2542static const struct of_device_id atmel_aes_dt_ids[] = {
   2543	{ .compatible = "atmel,at91sam9g46-aes" },
   2544	{ /* sentinel */ }
   2545};
   2546MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
   2547#endif
   2548
   2549static int atmel_aes_probe(struct platform_device *pdev)
   2550{
   2551	struct atmel_aes_dev *aes_dd;
   2552	struct device *dev = &pdev->dev;
   2553	struct resource *aes_res;
   2554	int err;
   2555
   2556	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
   2557	if (!aes_dd)
   2558		return -ENOMEM;
   2559
   2560	aes_dd->dev = dev;
   2561
   2562	platform_set_drvdata(pdev, aes_dd);
   2563
   2564	INIT_LIST_HEAD(&aes_dd->list);
   2565	spin_lock_init(&aes_dd->lock);
   2566
   2567	tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
   2568					(unsigned long)aes_dd);
   2569	tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
   2570					(unsigned long)aes_dd);
   2571
   2572	crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
   2573
   2574	/* Get the base address */
   2575	aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   2576	if (!aes_res) {
   2577		dev_err(dev, "no MEM resource info\n");
   2578		err = -ENODEV;
   2579		goto err_tasklet_kill;
   2580	}
   2581	aes_dd->phys_base = aes_res->start;
   2582
   2583	/* Get the IRQ */
   2584	aes_dd->irq = platform_get_irq(pdev,  0);
   2585	if (aes_dd->irq < 0) {
   2586		err = aes_dd->irq;
   2587		goto err_tasklet_kill;
   2588	}
   2589
   2590	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
   2591			       IRQF_SHARED, "atmel-aes", aes_dd);
   2592	if (err) {
   2593		dev_err(dev, "unable to request aes irq.\n");
   2594		goto err_tasklet_kill;
   2595	}
   2596
   2597	/* Initializing the clock */
   2598	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
   2599	if (IS_ERR(aes_dd->iclk)) {
   2600		dev_err(dev, "clock initialization failed.\n");
   2601		err = PTR_ERR(aes_dd->iclk);
   2602		goto err_tasklet_kill;
   2603	}
   2604
   2605	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
   2606	if (IS_ERR(aes_dd->io_base)) {
   2607		dev_err(dev, "can't ioremap\n");
   2608		err = PTR_ERR(aes_dd->io_base);
   2609		goto err_tasklet_kill;
   2610	}
   2611
   2612	err = clk_prepare(aes_dd->iclk);
   2613	if (err)
   2614		goto err_tasklet_kill;
   2615
   2616	err = atmel_aes_hw_version_init(aes_dd);
   2617	if (err)
   2618		goto err_iclk_unprepare;
   2619
   2620	atmel_aes_get_cap(aes_dd);
   2621
   2622#if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC)
   2623	if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) {
   2624		err = -EPROBE_DEFER;
   2625		goto err_iclk_unprepare;
   2626	}
   2627#endif
   2628
   2629	err = atmel_aes_buff_init(aes_dd);
   2630	if (err)
   2631		goto err_iclk_unprepare;
   2632
   2633	err = atmel_aes_dma_init(aes_dd);
   2634	if (err)
   2635		goto err_buff_cleanup;
   2636
   2637	spin_lock(&atmel_aes.lock);
   2638	list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
   2639	spin_unlock(&atmel_aes.lock);
   2640
   2641	err = atmel_aes_register_algs(aes_dd);
   2642	if (err)
   2643		goto err_algs;
   2644
   2645	dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
   2646			dma_chan_name(aes_dd->src.chan),
   2647			dma_chan_name(aes_dd->dst.chan));
   2648
   2649	return 0;
   2650
   2651err_algs:
   2652	spin_lock(&atmel_aes.lock);
   2653	list_del(&aes_dd->list);
   2654	spin_unlock(&atmel_aes.lock);
   2655	atmel_aes_dma_cleanup(aes_dd);
   2656err_buff_cleanup:
   2657	atmel_aes_buff_cleanup(aes_dd);
   2658err_iclk_unprepare:
   2659	clk_unprepare(aes_dd->iclk);
   2660err_tasklet_kill:
   2661	tasklet_kill(&aes_dd->done_task);
   2662	tasklet_kill(&aes_dd->queue_task);
   2663
   2664	return err;
   2665}
   2666
   2667static int atmel_aes_remove(struct platform_device *pdev)
   2668{
   2669	struct atmel_aes_dev *aes_dd;
   2670
   2671	aes_dd = platform_get_drvdata(pdev);
   2672	if (!aes_dd)
   2673		return -ENODEV;
   2674	spin_lock(&atmel_aes.lock);
   2675	list_del(&aes_dd->list);
   2676	spin_unlock(&atmel_aes.lock);
   2677
   2678	atmel_aes_unregister_algs(aes_dd);
   2679
   2680	tasklet_kill(&aes_dd->done_task);
   2681	tasklet_kill(&aes_dd->queue_task);
   2682
   2683	atmel_aes_dma_cleanup(aes_dd);
   2684	atmel_aes_buff_cleanup(aes_dd);
   2685
   2686	clk_unprepare(aes_dd->iclk);
   2687
   2688	return 0;
   2689}
   2690
   2691static struct platform_driver atmel_aes_driver = {
   2692	.probe		= atmel_aes_probe,
   2693	.remove		= atmel_aes_remove,
   2694	.driver		= {
   2695		.name	= "atmel_aes",
   2696		.of_match_table = of_match_ptr(atmel_aes_dt_ids),
   2697	},
   2698};
   2699
   2700module_platform_driver(atmel_aes_driver);
   2701
   2702MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
   2703MODULE_LICENSE("GPL v2");
   2704MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");