cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

artpec6_crypto.c (79585B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *   Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
      4 *
      5 *    Copyright (C) 2014-2017  Axis Communications AB
      6 */
      7#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
      8
      9#include <linux/bitfield.h>
     10#include <linux/crypto.h>
     11#include <linux/debugfs.h>
     12#include <linux/delay.h>
     13#include <linux/dma-mapping.h>
     14#include <linux/fault-inject.h>
     15#include <linux/init.h>
     16#include <linux/interrupt.h>
     17#include <linux/kernel.h>
     18#include <linux/list.h>
     19#include <linux/module.h>
     20#include <linux/of.h>
     21#include <linux/platform_device.h>
     22#include <linux/scatterlist.h>
     23#include <linux/slab.h>
     24
     25#include <crypto/aes.h>
     26#include <crypto/gcm.h>
     27#include <crypto/internal/aead.h>
     28#include <crypto/internal/hash.h>
     29#include <crypto/internal/skcipher.h>
     30#include <crypto/scatterwalk.h>
     31#include <crypto/sha1.h>
     32#include <crypto/sha2.h>
     33#include <crypto/xts.h>
     34
     35/* Max length of a line in all cache levels for Artpec SoCs. */
     36#define ARTPEC_CACHE_LINE_MAX	32
     37
     38#define PDMA_OUT_CFG		0x0000
     39#define PDMA_OUT_BUF_CFG	0x0004
     40#define PDMA_OUT_CMD		0x0008
     41#define PDMA_OUT_DESCRQ_PUSH	0x0010
     42#define PDMA_OUT_DESCRQ_STAT	0x0014
     43
     44#define A6_PDMA_IN_CFG		0x0028
     45#define A6_PDMA_IN_BUF_CFG	0x002c
     46#define A6_PDMA_IN_CMD		0x0030
     47#define A6_PDMA_IN_STATQ_PUSH	0x0038
     48#define A6_PDMA_IN_DESCRQ_PUSH	0x0044
     49#define A6_PDMA_IN_DESCRQ_STAT	0x0048
     50#define A6_PDMA_INTR_MASK	0x0068
     51#define A6_PDMA_ACK_INTR	0x006c
     52#define A6_PDMA_MASKED_INTR	0x0074
     53
     54#define A7_PDMA_IN_CFG		0x002c
     55#define A7_PDMA_IN_BUF_CFG	0x0030
     56#define A7_PDMA_IN_CMD		0x0034
     57#define A7_PDMA_IN_STATQ_PUSH	0x003c
     58#define A7_PDMA_IN_DESCRQ_PUSH	0x0048
     59#define A7_PDMA_IN_DESCRQ_STAT	0x004C
     60#define A7_PDMA_INTR_MASK	0x006c
     61#define A7_PDMA_ACK_INTR	0x0070
     62#define A7_PDMA_MASKED_INTR	0x0078
     63
     64#define PDMA_OUT_CFG_EN				BIT(0)
     65
     66#define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
     67#define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
     68
     69#define PDMA_OUT_CMD_START			BIT(0)
     70#define A6_PDMA_OUT_CMD_STOP			BIT(3)
     71#define A7_PDMA_OUT_CMD_STOP			BIT(2)
     72
     73#define PDMA_OUT_DESCRQ_PUSH_LEN		GENMASK(5, 0)
     74#define PDMA_OUT_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
     75
     76#define PDMA_OUT_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
     77#define PDMA_OUT_DESCRQ_STAT_SIZE		GENMASK(7, 4)
     78
     79#define PDMA_IN_CFG_EN				BIT(0)
     80
     81#define PDMA_IN_BUF_CFG_DATA_BUF_SIZE		GENMASK(4, 0)
     82#define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE		GENMASK(9, 5)
     83#define PDMA_IN_BUF_CFG_STAT_BUF_SIZE		GENMASK(14, 10)
     84
     85#define PDMA_IN_CMD_START			BIT(0)
     86#define A6_PDMA_IN_CMD_FLUSH_STAT		BIT(2)
     87#define A6_PDMA_IN_CMD_STOP			BIT(3)
     88#define A7_PDMA_IN_CMD_FLUSH_STAT		BIT(1)
     89#define A7_PDMA_IN_CMD_STOP			BIT(2)
     90
     91#define PDMA_IN_STATQ_PUSH_LEN			GENMASK(5, 0)
     92#define PDMA_IN_STATQ_PUSH_ADDR			GENMASK(31, 6)
     93
     94#define PDMA_IN_DESCRQ_PUSH_LEN			GENMASK(5, 0)
     95#define PDMA_IN_DESCRQ_PUSH_ADDR		GENMASK(31, 6)
     96
     97#define PDMA_IN_DESCRQ_STAT_LEVEL		GENMASK(3, 0)
     98#define PDMA_IN_DESCRQ_STAT_SIZE		GENMASK(7, 4)
     99
    100#define A6_PDMA_INTR_MASK_IN_DATA		BIT(2)
    101#define A6_PDMA_INTR_MASK_IN_EOP		BIT(3)
    102#define A6_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(4)
    103
    104#define A7_PDMA_INTR_MASK_IN_DATA		BIT(3)
    105#define A7_PDMA_INTR_MASK_IN_EOP		BIT(4)
    106#define A7_PDMA_INTR_MASK_IN_EOP_FLUSH		BIT(5)
    107
    108#define A6_CRY_MD_OPER		GENMASK(19, 16)
    109
    110#define A6_CRY_MD_HASH_SEL_CTX	GENMASK(21, 20)
    111#define A6_CRY_MD_HASH_HMAC_FIN	BIT(23)
    112
    113#define A6_CRY_MD_CIPHER_LEN	GENMASK(21, 20)
    114#define A6_CRY_MD_CIPHER_DECR	BIT(22)
    115#define A6_CRY_MD_CIPHER_TWEAK	BIT(23)
    116#define A6_CRY_MD_CIPHER_DSEQ	BIT(24)
    117
    118#define A7_CRY_MD_OPER		GENMASK(11, 8)
    119
    120#define A7_CRY_MD_HASH_SEL_CTX	GENMASK(13, 12)
    121#define A7_CRY_MD_HASH_HMAC_FIN	BIT(15)
    122
    123#define A7_CRY_MD_CIPHER_LEN	GENMASK(13, 12)
    124#define A7_CRY_MD_CIPHER_DECR	BIT(14)
    125#define A7_CRY_MD_CIPHER_TWEAK	BIT(15)
    126#define A7_CRY_MD_CIPHER_DSEQ	BIT(16)
    127
    128/* DMA metadata constants */
    129#define regk_crypto_aes_cbc     0x00000002
    130#define regk_crypto_aes_ctr     0x00000003
    131#define regk_crypto_aes_ecb     0x00000001
    132#define regk_crypto_aes_gcm     0x00000004
    133#define regk_crypto_aes_xts     0x00000005
    134#define regk_crypto_cache       0x00000002
    135#define a6_regk_crypto_dlkey    0x0000000a
    136#define a7_regk_crypto_dlkey    0x0000000e
    137#define regk_crypto_ext         0x00000001
    138#define regk_crypto_hmac_sha1   0x00000007
    139#define regk_crypto_hmac_sha256 0x00000009
    140#define regk_crypto_init        0x00000000
    141#define regk_crypto_key_128     0x00000000
    142#define regk_crypto_key_192     0x00000001
    143#define regk_crypto_key_256     0x00000002
    144#define regk_crypto_null        0x00000000
    145#define regk_crypto_sha1        0x00000006
    146#define regk_crypto_sha256      0x00000008
    147
    148/* DMA descriptor structures */
    149struct pdma_descr_ctrl  {
    150	unsigned char short_descr : 1;
    151	unsigned char pad1        : 1;
    152	unsigned char eop         : 1;
    153	unsigned char intr        : 1;
    154	unsigned char short_len   : 3;
    155	unsigned char pad2        : 1;
    156} __packed;
    157
    158struct pdma_data_descr {
    159	unsigned int len : 24;
    160	unsigned int buf : 32;
    161} __packed;
    162
    163struct pdma_short_descr {
    164	unsigned char data[7];
    165} __packed;
    166
    167struct pdma_descr {
    168	struct pdma_descr_ctrl ctrl;
    169	union {
    170		struct pdma_data_descr   data;
    171		struct pdma_short_descr  shrt;
    172	};
    173};
    174
    175struct pdma_stat_descr {
    176	unsigned char pad1        : 1;
    177	unsigned char pad2        : 1;
    178	unsigned char eop         : 1;
    179	unsigned char pad3        : 5;
    180	unsigned int  len         : 24;
    181};
    182
    183/* Each descriptor array can hold max 64 entries */
    184#define PDMA_DESCR_COUNT	64
    185
    186#define MODULE_NAME   "Artpec-6 CA"
    187
    188/* Hash modes (including HMAC variants) */
    189#define ARTPEC6_CRYPTO_HASH_SHA1	1
    190#define ARTPEC6_CRYPTO_HASH_SHA256	2
    191
    192/* Crypto modes */
    193#define ARTPEC6_CRYPTO_CIPHER_AES_ECB	1
    194#define ARTPEC6_CRYPTO_CIPHER_AES_CBC	2
    195#define ARTPEC6_CRYPTO_CIPHER_AES_CTR	3
    196#define ARTPEC6_CRYPTO_CIPHER_AES_XTS	5
    197
    198/* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
    199 * It operates on a descriptor array with up to 64 descriptor entries.
    200 * The arrays must be 64 byte aligned in memory.
    201 *
    202 * The ciphering unit has no registers and is completely controlled by
    203 * a 4-byte metadata that is inserted at the beginning of each dma packet.
    204 *
    205 * A dma packet is a sequence of descriptors terminated by setting the .eop
    206 * field in the final descriptor of the packet.
    207 *
    208 * Multiple packets are used for providing context data, key data and
    209 * the plain/ciphertext.
    210 *
    211 *   PDMA Descriptors (Array)
    212 *  +------+------+------+~~+-------+------+----
    213 *  |  0   |  1   |  2   |~~| 11 EOP|  12  |  ....
    214 *  +--+---+--+---+----+-+~~+-------+----+-+----
    215 *     |      |        |       |         |
    216 *     |      |        |       |         |
    217 *   __|__  +-------++-------++-------+ +----+
    218 *  | MD  | |Payload||Payload||Payload| | MD |
    219 *  +-----+ +-------++-------++-------+ +----+
    220 */
    221
    222struct artpec6_crypto_bounce_buffer {
    223	struct list_head list;
    224	size_t length;
    225	struct scatterlist *sg;
    226	size_t offset;
    227	/* buf is aligned to ARTPEC_CACHE_LINE_MAX and
    228	 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
    229	 */
    230	void *buf;
    231};
    232
    233struct artpec6_crypto_dma_map {
    234	dma_addr_t dma_addr;
    235	size_t size;
    236	enum dma_data_direction dir;
    237};
    238
    239struct artpec6_crypto_dma_descriptors {
    240	struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
    241	struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
    242	u32 stat[PDMA_DESCR_COUNT] __aligned(64);
    243	struct list_head bounce_buffers;
    244	/* Enough maps for all out/in buffers, and all three descr. arrays */
    245	struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
    246	dma_addr_t out_dma_addr;
    247	dma_addr_t in_dma_addr;
    248	dma_addr_t stat_dma_addr;
    249	size_t out_cnt;
    250	size_t in_cnt;
    251	size_t map_count;
    252};
    253
    254enum artpec6_crypto_variant {
    255	ARTPEC6_CRYPTO,
    256	ARTPEC7_CRYPTO,
    257};
    258
    259struct artpec6_crypto {
    260	void __iomem *base;
    261	spinlock_t queue_lock;
    262	struct list_head queue; /* waiting for pdma fifo space */
    263	struct list_head pending; /* submitted to pdma fifo */
    264	struct tasklet_struct task;
    265	struct kmem_cache *dma_cache;
    266	int pending_count;
    267	struct timer_list timer;
    268	enum artpec6_crypto_variant variant;
    269	void *pad_buffer; /* cache-aligned block padding buffer */
    270	void *zero_buffer;
    271};
    272
    273enum artpec6_crypto_hash_flags {
    274	HASH_FLAG_INIT_CTX = 2,
    275	HASH_FLAG_UPDATE = 4,
    276	HASH_FLAG_FINALIZE = 8,
    277	HASH_FLAG_HMAC = 16,
    278	HASH_FLAG_UPDATE_KEY = 32,
    279};
    280
    281struct artpec6_crypto_req_common {
    282	struct list_head list;
    283	struct list_head complete_in_progress;
    284	struct artpec6_crypto_dma_descriptors *dma;
    285	struct crypto_async_request *req;
    286	void (*complete)(struct crypto_async_request *req);
    287	gfp_t gfp_flags;
    288};
    289
    290struct artpec6_hash_request_context {
    291	char partial_buffer[SHA256_BLOCK_SIZE];
    292	char partial_buffer_out[SHA256_BLOCK_SIZE];
    293	char key_buffer[SHA256_BLOCK_SIZE];
    294	char pad_buffer[SHA256_BLOCK_SIZE + 32];
    295	unsigned char digeststate[SHA256_DIGEST_SIZE];
    296	size_t partial_bytes;
    297	u64 digcnt;
    298	u32 key_md;
    299	u32 hash_md;
    300	enum artpec6_crypto_hash_flags hash_flags;
    301	struct artpec6_crypto_req_common common;
    302};
    303
    304struct artpec6_hash_export_state {
    305	char partial_buffer[SHA256_BLOCK_SIZE];
    306	unsigned char digeststate[SHA256_DIGEST_SIZE];
    307	size_t partial_bytes;
    308	u64 digcnt;
    309	int oper;
    310	unsigned int hash_flags;
    311};
    312
    313struct artpec6_hashalg_context {
    314	char hmac_key[SHA256_BLOCK_SIZE];
    315	size_t hmac_key_length;
    316	struct crypto_shash *child_hash;
    317};
    318
    319struct artpec6_crypto_request_context {
    320	u32 cipher_md;
    321	bool decrypt;
    322	struct artpec6_crypto_req_common common;
    323};
    324
    325struct artpec6_cryptotfm_context {
    326	unsigned char aes_key[2*AES_MAX_KEY_SIZE];
    327	size_t key_length;
    328	u32 key_md;
    329	int crypto_type;
    330	struct crypto_sync_skcipher *fallback;
    331};
    332
    333struct artpec6_crypto_aead_hw_ctx {
    334	__be64	aad_length_bits;
    335	__be64  text_length_bits;
    336	__u8	J0[AES_BLOCK_SIZE];
    337};
    338
    339struct artpec6_crypto_aead_req_ctx {
    340	struct artpec6_crypto_aead_hw_ctx hw_ctx;
    341	u32 cipher_md;
    342	bool decrypt;
    343	struct artpec6_crypto_req_common common;
    344	__u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
    345};
    346
    347/* The crypto framework makes it hard to avoid this global. */
    348static struct device *artpec6_crypto_dev;
    349
    350#ifdef CONFIG_FAULT_INJECTION
    351static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
    352static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
    353#endif
    354
    355enum {
    356	ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
    357	ARTPEC6_CRYPTO_PREPARE_HASH_START,
    358};
    359
    360static int artpec6_crypto_prepare_aead(struct aead_request *areq);
    361static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
    362static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
    363
    364static void
    365artpec6_crypto_complete_crypto(struct crypto_async_request *req);
    366static void
    367artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
    368static void
    369artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
    370static void
    371artpec6_crypto_complete_aead(struct crypto_async_request *req);
    372static void
    373artpec6_crypto_complete_hash(struct crypto_async_request *req);
    374
    375static int
    376artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
    377
    378static void
    379artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
    380
    381struct artpec6_crypto_walk {
    382	struct scatterlist *sg;
    383	size_t offset;
    384};
    385
    386static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
    387				     struct scatterlist *sg)
    388{
    389	awalk->sg = sg;
    390	awalk->offset = 0;
    391}
    392
    393static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
    394					  size_t nbytes)
    395{
    396	while (nbytes && awalk->sg) {
    397		size_t piece;
    398
    399		WARN_ON(awalk->offset > awalk->sg->length);
    400
    401		piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
    402		nbytes -= piece;
    403		awalk->offset += piece;
    404		if (awalk->offset == awalk->sg->length) {
    405			awalk->sg = sg_next(awalk->sg);
    406			awalk->offset = 0;
    407		}
    408
    409	}
    410
    411	return nbytes;
    412}
    413
    414static size_t
    415artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
    416{
    417	WARN_ON(awalk->sg->length == awalk->offset);
    418
    419	return awalk->sg->length - awalk->offset;
    420}
    421
    422static dma_addr_t
    423artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
    424{
    425	return sg_phys(awalk->sg) + awalk->offset;
    426}
    427
    428static void
    429artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
    430{
    431	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    432	struct artpec6_crypto_bounce_buffer *b;
    433	struct artpec6_crypto_bounce_buffer *next;
    434
    435	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
    436		pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
    437			 b, b->length, b->offset, b->buf);
    438		sg_pcopy_from_buffer(b->sg,
    439				   1,
    440				   b->buf,
    441				   b->length,
    442				   b->offset);
    443
    444		list_del(&b->list);
    445		kfree(b);
    446	}
    447}
    448
    449static inline bool artpec6_crypto_busy(void)
    450{
    451	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
    452	int fifo_count = ac->pending_count;
    453
    454	return fifo_count > 6;
    455}
    456
    457static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
    458{
    459	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
    460	int ret = -EBUSY;
    461
    462	spin_lock_bh(&ac->queue_lock);
    463
    464	if (!artpec6_crypto_busy()) {
    465		list_add_tail(&req->list, &ac->pending);
    466		artpec6_crypto_start_dma(req);
    467		ret = -EINPROGRESS;
    468	} else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
    469		list_add_tail(&req->list, &ac->queue);
    470	} else {
    471		artpec6_crypto_common_destroy(req);
    472	}
    473
    474	spin_unlock_bh(&ac->queue_lock);
    475
    476	return ret;
    477}
    478
    479static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
    480{
    481	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
    482	enum artpec6_crypto_variant variant = ac->variant;
    483	void __iomem *base = ac->base;
    484	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    485	u32 ind, statd, outd;
    486
    487	/* Make descriptor content visible to the DMA before starting it. */
    488	wmb();
    489
    490	ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
    491	      FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
    492
    493	statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
    494		FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
    495
    496	outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
    497	       FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
    498
    499	if (variant == ARTPEC6_CRYPTO) {
    500		writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
    501		writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
    502		writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
    503	} else {
    504		writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
    505		writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
    506		writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
    507	}
    508
    509	writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
    510	writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
    511
    512	ac->pending_count++;
    513}
    514
    515static void
    516artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
    517{
    518	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    519
    520	dma->out_cnt = 0;
    521	dma->in_cnt = 0;
    522	dma->map_count = 0;
    523	INIT_LIST_HEAD(&dma->bounce_buffers);
    524}
    525
    526static bool fault_inject_dma_descr(void)
    527{
    528#ifdef CONFIG_FAULT_INJECTION
    529	return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
    530#else
    531	return false;
    532#endif
    533}
    534
    535/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
    536 *                                        physical address
    537 *
    538 * @addr: The physical address of the data buffer
    539 * @len:  The length of the data buffer
    540 * @eop:  True if this is the last buffer in the packet
    541 *
    542 * @return 0 on success or -ENOSPC if there are no more descriptors available
    543 */
    544static int
    545artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
    546				    dma_addr_t addr, size_t len, bool eop)
    547{
    548	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    549	struct pdma_descr *d;
    550
    551	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
    552	    fault_inject_dma_descr()) {
    553		pr_err("No free OUT DMA descriptors available!\n");
    554		return -ENOSPC;
    555	}
    556
    557	d = &dma->out[dma->out_cnt++];
    558	memset(d, 0, sizeof(*d));
    559
    560	d->ctrl.short_descr = 0;
    561	d->ctrl.eop = eop;
    562	d->data.len = len;
    563	d->data.buf = addr;
    564	return 0;
    565}
    566
    567/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
    568 *
    569 * @dst: The virtual address of the data
    570 * @len: The length of the data, must be between 1 to 7 bytes
    571 * @eop: True if this is the last buffer in the packet
    572 *
    573 * @return 0 on success
    574 *	-ENOSPC if no more descriptors are available
    575 *	-EINVAL if the data length exceeds 7 bytes
    576 */
    577static int
    578artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
    579				     void *dst, unsigned int len, bool eop)
    580{
    581	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    582	struct pdma_descr *d;
    583
    584	if (dma->out_cnt >= PDMA_DESCR_COUNT ||
    585	    fault_inject_dma_descr()) {
    586		pr_err("No free OUT DMA descriptors available!\n");
    587		return -ENOSPC;
    588	} else if (len > 7 || len < 1) {
    589		return -EINVAL;
    590	}
    591	d = &dma->out[dma->out_cnt++];
    592	memset(d, 0, sizeof(*d));
    593
    594	d->ctrl.short_descr = 1;
    595	d->ctrl.short_len = len;
    596	d->ctrl.eop = eop;
    597	memcpy(d->shrt.data, dst, len);
    598	return 0;
    599}
    600
    601static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
    602				      struct page *page, size_t offset,
    603				      size_t size,
    604				      enum dma_data_direction dir,
    605				      dma_addr_t *dma_addr_out)
    606{
    607	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    608	struct device *dev = artpec6_crypto_dev;
    609	struct artpec6_crypto_dma_map *map;
    610	dma_addr_t dma_addr;
    611
    612	*dma_addr_out = 0;
    613
    614	if (dma->map_count >= ARRAY_SIZE(dma->maps))
    615		return -ENOMEM;
    616
    617	dma_addr = dma_map_page(dev, page, offset, size, dir);
    618	if (dma_mapping_error(dev, dma_addr))
    619		return -ENOMEM;
    620
    621	map = &dma->maps[dma->map_count++];
    622	map->size = size;
    623	map->dma_addr = dma_addr;
    624	map->dir = dir;
    625
    626	*dma_addr_out = dma_addr;
    627
    628	return 0;
    629}
    630
    631static int
    632artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
    633			      void *ptr, size_t size,
    634			      enum dma_data_direction dir,
    635			      dma_addr_t *dma_addr_out)
    636{
    637	struct page *page = virt_to_page(ptr);
    638	size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
    639
    640	return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
    641					  dma_addr_out);
    642}
    643
    644static int
    645artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
    646{
    647	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    648	int ret;
    649
    650	ret = artpec6_crypto_dma_map_single(common, dma->in,
    651				sizeof(dma->in[0]) * dma->in_cnt,
    652				DMA_TO_DEVICE, &dma->in_dma_addr);
    653	if (ret)
    654		return ret;
    655
    656	ret = artpec6_crypto_dma_map_single(common, dma->out,
    657				sizeof(dma->out[0]) * dma->out_cnt,
    658				DMA_TO_DEVICE, &dma->out_dma_addr);
    659	if (ret)
    660		return ret;
    661
    662	/* We only read one stat descriptor */
    663	dma->stat[dma->in_cnt - 1] = 0;
    664
    665	/*
    666	 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
    667	 * to be written.
    668	 */
    669	return artpec6_crypto_dma_map_single(common,
    670				dma->stat,
    671				sizeof(dma->stat[0]) * dma->in_cnt,
    672				DMA_BIDIRECTIONAL,
    673				&dma->stat_dma_addr);
    674}
    675
    676static void
    677artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
    678{
    679	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    680	struct device *dev = artpec6_crypto_dev;
    681	int i;
    682
    683	for (i = 0; i < dma->map_count; i++) {
    684		struct artpec6_crypto_dma_map *map = &dma->maps[i];
    685
    686		dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
    687	}
    688
    689	dma->map_count = 0;
    690}
    691
    692/** artpec6_crypto_setup_out_descr - Setup an out descriptor
    693 *
    694 * @dst: The virtual address of the data
    695 * @len: The length of the data
    696 * @eop: True if this is the last buffer in the packet
    697 * @use_short: If this is true and the data length is 7 bytes or less then
    698 *	a short descriptor will be used
    699 *
    700 * @return 0 on success
    701 *	Any errors from artpec6_crypto_setup_out_descr_short() or
    702 *	setup_out_descr_phys()
    703 */
    704static int
    705artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
    706			       void *dst, unsigned int len, bool eop,
    707			       bool use_short)
    708{
    709	if (use_short && len < 7) {
    710		return artpec6_crypto_setup_out_descr_short(common, dst, len,
    711							    eop);
    712	} else {
    713		int ret;
    714		dma_addr_t dma_addr;
    715
    716		ret = artpec6_crypto_dma_map_single(common, dst, len,
    717						   DMA_TO_DEVICE,
    718						   &dma_addr);
    719		if (ret)
    720			return ret;
    721
    722		return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
    723							   len, eop);
    724	}
    725}
    726
    727/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
    728 *                                       physical address
    729 *
    730 * @addr: The physical address of the data buffer
    731 * @len:  The length of the data buffer
    732 * @intr: True if an interrupt should be fired after HW processing of this
    733 *	  descriptor
    734 *
    735 */
    736static int
    737artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
    738			       dma_addr_t addr, unsigned int len, bool intr)
    739{
    740	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    741	struct pdma_descr *d;
    742
    743	if (dma->in_cnt >= PDMA_DESCR_COUNT ||
    744	    fault_inject_dma_descr()) {
    745		pr_err("No free IN DMA descriptors available!\n");
    746		return -ENOSPC;
    747	}
    748	d = &dma->in[dma->in_cnt++];
    749	memset(d, 0, sizeof(*d));
    750
    751	d->ctrl.intr = intr;
    752	d->data.len = len;
    753	d->data.buf = addr;
    754	return 0;
    755}
    756
    757/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
    758 *
    759 * @buffer: The virtual address to of the data buffer
    760 * @len:    The length of the data buffer
    761 * @last:   If this is the last data buffer in the request (i.e. an interrupt
    762 *	    is needed
    763 *
    764 * Short descriptors are not used for the in channel
    765 */
    766static int
    767artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
    768			  void *buffer, unsigned int len, bool last)
    769{
    770	dma_addr_t dma_addr;
    771	int ret;
    772
    773	ret = artpec6_crypto_dma_map_single(common, buffer, len,
    774					   DMA_FROM_DEVICE, &dma_addr);
    775	if (ret)
    776		return ret;
    777
    778	return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
    779}
    780
    781static struct artpec6_crypto_bounce_buffer *
    782artpec6_crypto_alloc_bounce(gfp_t flags)
    783{
    784	void *base;
    785	size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
    786			    2 * ARTPEC_CACHE_LINE_MAX;
    787	struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
    788
    789	if (!bbuf)
    790		return NULL;
    791
    792	base = bbuf + 1;
    793	bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
    794	return bbuf;
    795}
    796
    797static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
    798				  struct artpec6_crypto_walk *walk, size_t size)
    799{
    800	struct artpec6_crypto_bounce_buffer *bbuf;
    801	int ret;
    802
    803	bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
    804	if (!bbuf)
    805		return -ENOMEM;
    806
    807	bbuf->length = size;
    808	bbuf->sg = walk->sg;
    809	bbuf->offset = walk->offset;
    810
    811	ret =  artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
    812	if (ret) {
    813		kfree(bbuf);
    814		return ret;
    815	}
    816
    817	pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
    818	list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
    819	return 0;
    820}
    821
    822static int
    823artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
    824				  struct artpec6_crypto_walk *walk,
    825				  size_t count)
    826{
    827	size_t chunk;
    828	int ret;
    829	dma_addr_t addr;
    830
    831	while (walk->sg && count) {
    832		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
    833		addr = artpec6_crypto_walk_chunk_phys(walk);
    834
    835		/* When destination buffers are not aligned to the cache line
    836		 * size we need bounce buffers. The DMA-API requires that the
    837		 * entire line is owned by the DMA buffer and this holds also
    838		 * for the case when coherent DMA is used.
    839		 */
    840		if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
    841			chunk = min_t(dma_addr_t, chunk,
    842				      ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
    843				      addr);
    844
    845			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
    846			ret = setup_bounce_buffer_in(common, walk, chunk);
    847		} else if (chunk < ARTPEC_CACHE_LINE_MAX) {
    848			pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
    849			ret = setup_bounce_buffer_in(common, walk, chunk);
    850		} else {
    851			dma_addr_t dma_addr;
    852
    853			chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
    854
    855			pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
    856
    857			ret = artpec6_crypto_dma_map_page(common,
    858							 sg_page(walk->sg),
    859							 walk->sg->offset +
    860							 walk->offset,
    861							 chunk,
    862							 DMA_FROM_DEVICE,
    863							 &dma_addr);
    864			if (ret)
    865				return ret;
    866
    867			ret = artpec6_crypto_setup_in_descr_phys(common,
    868								 dma_addr,
    869								 chunk, false);
    870		}
    871
    872		if (ret)
    873			return ret;
    874
    875		count = count - chunk;
    876		artpec6_crypto_walk_advance(walk, chunk);
    877	}
    878
    879	if (count)
    880		pr_err("EOL unexpected %zu bytes left\n", count);
    881
    882	return count ? -EINVAL : 0;
    883}
    884
    885static int
    886artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
    887				   struct artpec6_crypto_walk *walk,
    888				   size_t count)
    889{
    890	size_t chunk;
    891	int ret;
    892	dma_addr_t addr;
    893
    894	while (walk->sg && count) {
    895		chunk = min(count, artpec6_crypto_walk_chunklen(walk));
    896		addr = artpec6_crypto_walk_chunk_phys(walk);
    897
    898		pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
    899
    900		if (addr & 3) {
    901			char buf[3];
    902
    903			chunk = min_t(size_t, chunk, (4-(addr&3)));
    904
    905			sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
    906					   walk->offset);
    907
    908			ret = artpec6_crypto_setup_out_descr_short(common, buf,
    909								   chunk,
    910								   false);
    911		} else {
    912			dma_addr_t dma_addr;
    913
    914			ret = artpec6_crypto_dma_map_page(common,
    915							 sg_page(walk->sg),
    916							 walk->sg->offset +
    917							 walk->offset,
    918							 chunk,
    919							 DMA_TO_DEVICE,
    920							 &dma_addr);
    921			if (ret)
    922				return ret;
    923
    924			ret = artpec6_crypto_setup_out_descr_phys(common,
    925								 dma_addr,
    926								 chunk, false);
    927		}
    928
    929		if (ret)
    930			return ret;
    931
    932		count = count - chunk;
    933		artpec6_crypto_walk_advance(walk, chunk);
    934	}
    935
    936	if (count)
    937		pr_err("EOL unexpected %zu bytes left\n", count);
    938
    939	return count ? -EINVAL : 0;
    940}
    941
    942
    943/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
    944 *
    945 * If the out descriptor list is non-empty, then the eop flag on the
    946 * last used out descriptor will be set.
    947 *
    948 * @return  0 on success
    949 *	-EINVAL if the out descriptor is empty or has overflown
    950 */
    951static int
    952artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
    953{
    954	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    955	struct pdma_descr *d;
    956
    957	if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
    958		pr_err("%s: OUT descriptor list is %s\n",
    959			MODULE_NAME, dma->out_cnt ? "empty" : "full");
    960		return -EINVAL;
    961
    962	}
    963
    964	d = &dma->out[dma->out_cnt-1];
    965	d->ctrl.eop = 1;
    966
    967	return 0;
    968}
    969
    970/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
    971 *                                       in descriptor
    972 *
    973 * See artpec6_crypto_terminate_out_descrs() for return values
    974 */
    975static int
    976artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
    977{
    978	struct artpec6_crypto_dma_descriptors *dma = common->dma;
    979	struct pdma_descr *d;
    980
    981	if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
    982		pr_err("%s: IN descriptor list is %s\n",
    983			MODULE_NAME, dma->in_cnt ? "empty" : "full");
    984		return -EINVAL;
    985	}
    986
    987	d = &dma->in[dma->in_cnt-1];
    988	d->ctrl.intr = 1;
    989	return 0;
    990}
    991
    992/** create_hash_pad - Create a Secure Hash conformant pad
    993 *
    994 * @dst:      The destination buffer to write the pad. Must be at least 64 bytes
    995 * @dgstlen:  The total length of the hash digest in bytes
    996 * @bitcount: The total length of the digest in bits
    997 *
    998 * @return The total number of padding bytes written to @dst
    999 */
   1000static size_t
   1001create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
   1002{
   1003	unsigned int mod, target, diff, pad_bytes, size_bytes;
   1004	__be64 bits = __cpu_to_be64(bitcount);
   1005
   1006	switch (oper) {
   1007	case regk_crypto_sha1:
   1008	case regk_crypto_sha256:
   1009	case regk_crypto_hmac_sha1:
   1010	case regk_crypto_hmac_sha256:
   1011		target = 448 / 8;
   1012		mod = 512 / 8;
   1013		size_bytes = 8;
   1014		break;
   1015	default:
   1016		target = 896 / 8;
   1017		mod = 1024 / 8;
   1018		size_bytes = 16;
   1019		break;
   1020	}
   1021
   1022	target -= 1;
   1023	diff = dgstlen & (mod - 1);
   1024	pad_bytes = diff > target ? target + mod - diff : target - diff;
   1025
   1026	memset(dst + 1, 0, pad_bytes);
   1027	dst[0] = 0x80;
   1028
   1029	if (size_bytes == 16) {
   1030		memset(dst + 1 + pad_bytes, 0, 8);
   1031		memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
   1032	} else {
   1033		memcpy(dst + 1 + pad_bytes, &bits, 8);
   1034	}
   1035
   1036	return pad_bytes + size_bytes + 1;
   1037}
   1038
   1039static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
   1040		struct crypto_async_request *parent,
   1041		void (*complete)(struct crypto_async_request *req),
   1042		struct scatterlist *dstsg, unsigned int nbytes)
   1043{
   1044	gfp_t flags;
   1045	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   1046
   1047	flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
   1048		 GFP_KERNEL : GFP_ATOMIC;
   1049
   1050	common->gfp_flags = flags;
   1051	common->dma = kmem_cache_alloc(ac->dma_cache, flags);
   1052	if (!common->dma)
   1053		return -ENOMEM;
   1054
   1055	common->req = parent;
   1056	common->complete = complete;
   1057	return 0;
   1058}
   1059
   1060static void
   1061artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
   1062{
   1063	struct artpec6_crypto_bounce_buffer *b;
   1064	struct artpec6_crypto_bounce_buffer *next;
   1065
   1066	list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
   1067		kfree(b);
   1068	}
   1069}
   1070
   1071static int
   1072artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
   1073{
   1074	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   1075
   1076	artpec6_crypto_dma_unmap_all(common);
   1077	artpec6_crypto_bounce_destroy(common->dma);
   1078	kmem_cache_free(ac->dma_cache, common->dma);
   1079	common->dma = NULL;
   1080	return 0;
   1081}
   1082
   1083/*
   1084 * Ciphering functions.
   1085 */
   1086static int artpec6_crypto_encrypt(struct skcipher_request *req)
   1087{
   1088	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
   1089	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
   1090	struct artpec6_crypto_request_context *req_ctx = NULL;
   1091	void (*complete)(struct crypto_async_request *req);
   1092	int ret;
   1093
   1094	req_ctx = skcipher_request_ctx(req);
   1095
   1096	switch (ctx->crypto_type) {
   1097	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
   1098	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
   1099	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
   1100		req_ctx->decrypt = 0;
   1101		break;
   1102	default:
   1103		break;
   1104	}
   1105
   1106	switch (ctx->crypto_type) {
   1107	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
   1108		complete = artpec6_crypto_complete_cbc_encrypt;
   1109		break;
   1110	default:
   1111		complete = artpec6_crypto_complete_crypto;
   1112		break;
   1113	}
   1114
   1115	ret = artpec6_crypto_common_init(&req_ctx->common,
   1116				  &req->base,
   1117				  complete,
   1118				  req->dst, req->cryptlen);
   1119	if (ret)
   1120		return ret;
   1121
   1122	ret = artpec6_crypto_prepare_crypto(req);
   1123	if (ret) {
   1124		artpec6_crypto_common_destroy(&req_ctx->common);
   1125		return ret;
   1126	}
   1127
   1128	return artpec6_crypto_submit(&req_ctx->common);
   1129}
   1130
   1131static int artpec6_crypto_decrypt(struct skcipher_request *req)
   1132{
   1133	int ret;
   1134	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
   1135	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
   1136	struct artpec6_crypto_request_context *req_ctx = NULL;
   1137	void (*complete)(struct crypto_async_request *req);
   1138
   1139	req_ctx = skcipher_request_ctx(req);
   1140
   1141	switch (ctx->crypto_type) {
   1142	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
   1143	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
   1144	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
   1145		req_ctx->decrypt = 1;
   1146		break;
   1147	default:
   1148		break;
   1149	}
   1150
   1151
   1152	switch (ctx->crypto_type) {
   1153	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
   1154		complete = artpec6_crypto_complete_cbc_decrypt;
   1155		break;
   1156	default:
   1157		complete = artpec6_crypto_complete_crypto;
   1158		break;
   1159	}
   1160
   1161	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
   1162				  complete,
   1163				  req->dst, req->cryptlen);
   1164	if (ret)
   1165		return ret;
   1166
   1167	ret = artpec6_crypto_prepare_crypto(req);
   1168	if (ret) {
   1169		artpec6_crypto_common_destroy(&req_ctx->common);
   1170		return ret;
   1171	}
   1172
   1173	return artpec6_crypto_submit(&req_ctx->common);
   1174}
   1175
   1176static int
   1177artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
   1178{
   1179	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
   1180	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
   1181	size_t iv_len = crypto_skcipher_ivsize(cipher);
   1182	unsigned int counter = be32_to_cpup((__be32 *)
   1183					    (req->iv + iv_len - 4));
   1184	unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
   1185			     AES_BLOCK_SIZE;
   1186
   1187	/*
   1188	 * The hardware uses only the last 32-bits as the counter while the
   1189	 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
   1190	 * the whole IV is a counter.  So fallback if the counter is going to
   1191	 * overlow.
   1192	 */
   1193	if (counter + nblks < counter) {
   1194		int ret;
   1195
   1196		pr_debug("counter %x will overflow (nblks %u), falling back\n",
   1197			 counter, counter + nblks);
   1198
   1199		ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
   1200						  ctx->key_length);
   1201		if (ret)
   1202			return ret;
   1203
   1204		{
   1205			SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
   1206
   1207			skcipher_request_set_sync_tfm(subreq, ctx->fallback);
   1208			skcipher_request_set_callback(subreq, req->base.flags,
   1209						      NULL, NULL);
   1210			skcipher_request_set_crypt(subreq, req->src, req->dst,
   1211						   req->cryptlen, req->iv);
   1212			ret = encrypt ? crypto_skcipher_encrypt(subreq)
   1213				      : crypto_skcipher_decrypt(subreq);
   1214			skcipher_request_zero(subreq);
   1215		}
   1216		return ret;
   1217	}
   1218
   1219	return encrypt ? artpec6_crypto_encrypt(req)
   1220		       : artpec6_crypto_decrypt(req);
   1221}
   1222
   1223static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
   1224{
   1225	return artpec6_crypto_ctr_crypt(req, true);
   1226}
   1227
   1228static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
   1229{
   1230	return artpec6_crypto_ctr_crypt(req, false);
   1231}
   1232
   1233/*
   1234 * AEAD functions
   1235 */
   1236static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
   1237{
   1238	struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
   1239
   1240	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
   1241
   1242	crypto_aead_set_reqsize(tfm,
   1243				sizeof(struct artpec6_crypto_aead_req_ctx));
   1244
   1245	return 0;
   1246}
   1247
   1248static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
   1249			       unsigned int len)
   1250{
   1251	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
   1252
   1253	if (len != 16 && len != 24 && len != 32)
   1254		return -EINVAL;
   1255
   1256	ctx->key_length = len;
   1257
   1258	memcpy(ctx->aes_key, key, len);
   1259	return 0;
   1260}
   1261
   1262static int artpec6_crypto_aead_encrypt(struct aead_request *req)
   1263{
   1264	int ret;
   1265	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
   1266
   1267	req_ctx->decrypt = false;
   1268	ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
   1269				  artpec6_crypto_complete_aead,
   1270				  NULL, 0);
   1271	if (ret)
   1272		return ret;
   1273
   1274	ret = artpec6_crypto_prepare_aead(req);
   1275	if (ret) {
   1276		artpec6_crypto_common_destroy(&req_ctx->common);
   1277		return ret;
   1278	}
   1279
   1280	return artpec6_crypto_submit(&req_ctx->common);
   1281}
   1282
   1283static int artpec6_crypto_aead_decrypt(struct aead_request *req)
   1284{
   1285	int ret;
   1286	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
   1287
   1288	req_ctx->decrypt = true;
   1289	if (req->cryptlen < AES_BLOCK_SIZE)
   1290		return -EINVAL;
   1291
   1292	ret = artpec6_crypto_common_init(&req_ctx->common,
   1293				  &req->base,
   1294				  artpec6_crypto_complete_aead,
   1295				  NULL, 0);
   1296	if (ret)
   1297		return ret;
   1298
   1299	ret = artpec6_crypto_prepare_aead(req);
   1300	if (ret) {
   1301		artpec6_crypto_common_destroy(&req_ctx->common);
   1302		return ret;
   1303	}
   1304
   1305	return artpec6_crypto_submit(&req_ctx->common);
   1306}
   1307
   1308static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
   1309{
   1310	struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
   1311	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
   1312	size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
   1313	size_t contextsize = digestsize;
   1314	size_t blocksize = crypto_tfm_alg_blocksize(
   1315		crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
   1316	struct artpec6_crypto_req_common *common = &req_ctx->common;
   1317	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   1318	enum artpec6_crypto_variant variant = ac->variant;
   1319	u32 sel_ctx;
   1320	bool ext_ctx = false;
   1321	bool run_hw = false;
   1322	int error = 0;
   1323
   1324	artpec6_crypto_init_dma_operation(common);
   1325
   1326	/* Upload HMAC key, must be first the first packet */
   1327	if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
   1328		if (variant == ARTPEC6_CRYPTO) {
   1329			req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
   1330						     a6_regk_crypto_dlkey);
   1331		} else {
   1332			req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
   1333						     a7_regk_crypto_dlkey);
   1334		}
   1335
   1336		/* Copy and pad up the key */
   1337		memcpy(req_ctx->key_buffer, ctx->hmac_key,
   1338		       ctx->hmac_key_length);
   1339		memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
   1340		       blocksize - ctx->hmac_key_length);
   1341
   1342		error = artpec6_crypto_setup_out_descr(common,
   1343					(void *)&req_ctx->key_md,
   1344					sizeof(req_ctx->key_md), false, false);
   1345		if (error)
   1346			return error;
   1347
   1348		error = artpec6_crypto_setup_out_descr(common,
   1349					req_ctx->key_buffer, blocksize,
   1350					true, false);
   1351		if (error)
   1352			return error;
   1353	}
   1354
   1355	if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
   1356		/* Restore context */
   1357		sel_ctx = regk_crypto_ext;
   1358		ext_ctx = true;
   1359	} else {
   1360		sel_ctx = regk_crypto_init;
   1361	}
   1362
   1363	if (variant == ARTPEC6_CRYPTO) {
   1364		req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
   1365		req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
   1366
   1367		/* If this is the final round, set the final flag */
   1368		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
   1369			req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
   1370	} else {
   1371		req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
   1372		req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
   1373
   1374		/* If this is the final round, set the final flag */
   1375		if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
   1376			req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
   1377	}
   1378
   1379	/* Setup up metadata descriptors */
   1380	error = artpec6_crypto_setup_out_descr(common,
   1381				(void *)&req_ctx->hash_md,
   1382				sizeof(req_ctx->hash_md), false, false);
   1383	if (error)
   1384		return error;
   1385
   1386	error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
   1387	if (error)
   1388		return error;
   1389
   1390	if (ext_ctx) {
   1391		error = artpec6_crypto_setup_out_descr(common,
   1392					req_ctx->digeststate,
   1393					contextsize, false, false);
   1394
   1395		if (error)
   1396			return error;
   1397	}
   1398
   1399	if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
   1400		size_t done_bytes = 0;
   1401		size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
   1402		size_t ready_bytes = round_down(total_bytes, blocksize);
   1403		struct artpec6_crypto_walk walk;
   1404
   1405		run_hw = ready_bytes > 0;
   1406		if (req_ctx->partial_bytes && ready_bytes) {
   1407			/* We have a partial buffer and will at least some bytes
   1408			 * to the HW. Empty this partial buffer before tackling
   1409			 * the SG lists
   1410			 */
   1411			memcpy(req_ctx->partial_buffer_out,
   1412				req_ctx->partial_buffer,
   1413				req_ctx->partial_bytes);
   1414
   1415			error = artpec6_crypto_setup_out_descr(common,
   1416						req_ctx->partial_buffer_out,
   1417						req_ctx->partial_bytes,
   1418						false, true);
   1419			if (error)
   1420				return error;
   1421
   1422			/* Reset partial buffer */
   1423			done_bytes += req_ctx->partial_bytes;
   1424			req_ctx->partial_bytes = 0;
   1425		}
   1426
   1427		artpec6_crypto_walk_init(&walk, areq->src);
   1428
   1429		error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
   1430							   ready_bytes -
   1431							   done_bytes);
   1432		if (error)
   1433			return error;
   1434
   1435		if (walk.sg) {
   1436			size_t sg_skip = ready_bytes - done_bytes;
   1437			size_t sg_rem = areq->nbytes - sg_skip;
   1438
   1439			sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
   1440					   req_ctx->partial_buffer +
   1441					   req_ctx->partial_bytes,
   1442					   sg_rem, sg_skip);
   1443
   1444			req_ctx->partial_bytes += sg_rem;
   1445		}
   1446
   1447		req_ctx->digcnt += ready_bytes;
   1448		req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
   1449	}
   1450
   1451	/* Finalize */
   1452	if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
   1453		size_t hash_pad_len;
   1454		u64 digest_bits;
   1455		u32 oper;
   1456
   1457		if (variant == ARTPEC6_CRYPTO)
   1458			oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
   1459		else
   1460			oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
   1461
   1462		/* Write out the partial buffer if present */
   1463		if (req_ctx->partial_bytes) {
   1464			memcpy(req_ctx->partial_buffer_out,
   1465			       req_ctx->partial_buffer,
   1466			       req_ctx->partial_bytes);
   1467			error = artpec6_crypto_setup_out_descr(common,
   1468						req_ctx->partial_buffer_out,
   1469						req_ctx->partial_bytes,
   1470						false, true);
   1471			if (error)
   1472				return error;
   1473
   1474			req_ctx->digcnt += req_ctx->partial_bytes;
   1475			req_ctx->partial_bytes = 0;
   1476		}
   1477
   1478		if (req_ctx->hash_flags & HASH_FLAG_HMAC)
   1479			digest_bits = 8 * (req_ctx->digcnt + blocksize);
   1480		else
   1481			digest_bits = 8 * req_ctx->digcnt;
   1482
   1483		/* Add the hash pad */
   1484		hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
   1485					       req_ctx->digcnt, digest_bits);
   1486		error = artpec6_crypto_setup_out_descr(common,
   1487						      req_ctx->pad_buffer,
   1488						      hash_pad_len, false,
   1489						      true);
   1490		req_ctx->digcnt = 0;
   1491
   1492		if (error)
   1493			return error;
   1494
   1495		/* Descriptor for the final result */
   1496		error = artpec6_crypto_setup_in_descr(common, areq->result,
   1497						      digestsize,
   1498						      true);
   1499		if (error)
   1500			return error;
   1501
   1502	} else { /* This is not the final operation for this request */
   1503		if (!run_hw)
   1504			return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
   1505
   1506		/* Save the result to the context */
   1507		error = artpec6_crypto_setup_in_descr(common,
   1508						      req_ctx->digeststate,
   1509						      contextsize, false);
   1510		if (error)
   1511			return error;
   1512		/* fall through */
   1513	}
   1514
   1515	req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
   1516				 HASH_FLAG_FINALIZE);
   1517
   1518	error = artpec6_crypto_terminate_in_descrs(common);
   1519	if (error)
   1520		return error;
   1521
   1522	error = artpec6_crypto_terminate_out_descrs(common);
   1523	if (error)
   1524		return error;
   1525
   1526	error = artpec6_crypto_dma_map_descs(common);
   1527	if (error)
   1528		return error;
   1529
   1530	return ARTPEC6_CRYPTO_PREPARE_HASH_START;
   1531}
   1532
   1533
   1534static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
   1535{
   1536	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1537
   1538	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
   1539	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
   1540
   1541	return 0;
   1542}
   1543
   1544static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
   1545{
   1546	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1547
   1548	ctx->fallback =
   1549		crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
   1550					   0, CRYPTO_ALG_NEED_FALLBACK);
   1551	if (IS_ERR(ctx->fallback))
   1552		return PTR_ERR(ctx->fallback);
   1553
   1554	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
   1555	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
   1556
   1557	return 0;
   1558}
   1559
   1560static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
   1561{
   1562	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1563
   1564	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
   1565	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
   1566
   1567	return 0;
   1568}
   1569
   1570static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
   1571{
   1572	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1573
   1574	tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
   1575	ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
   1576
   1577	return 0;
   1578}
   1579
   1580static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
   1581{
   1582	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1583
   1584	memset(ctx, 0, sizeof(*ctx));
   1585}
   1586
   1587static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
   1588{
   1589	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
   1590
   1591	crypto_free_sync_skcipher(ctx->fallback);
   1592	artpec6_crypto_aes_exit(tfm);
   1593}
   1594
   1595static int
   1596artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
   1597			      unsigned int keylen)
   1598{
   1599	struct artpec6_cryptotfm_context *ctx =
   1600		crypto_skcipher_ctx(cipher);
   1601
   1602	switch (keylen) {
   1603	case 16:
   1604	case 24:
   1605	case 32:
   1606		break;
   1607	default:
   1608		return -EINVAL;
   1609	}
   1610
   1611	memcpy(ctx->aes_key, key, keylen);
   1612	ctx->key_length = keylen;
   1613	return 0;
   1614}
   1615
   1616static int
   1617artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
   1618			      unsigned int keylen)
   1619{
   1620	struct artpec6_cryptotfm_context *ctx =
   1621		crypto_skcipher_ctx(cipher);
   1622	int ret;
   1623
   1624	ret = xts_check_key(&cipher->base, key, keylen);
   1625	if (ret)
   1626		return ret;
   1627
   1628	switch (keylen) {
   1629	case 32:
   1630	case 48:
   1631	case 64:
   1632		break;
   1633	default:
   1634		return -EINVAL;
   1635	}
   1636
   1637	memcpy(ctx->aes_key, key, keylen);
   1638	ctx->key_length = keylen;
   1639	return 0;
   1640}
   1641
   1642/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
   1643 *
   1644 * @req: The asynch request to process
   1645 *
   1646 * @return 0 if the dma job was successfully prepared
   1647 *	  <0 on error
   1648 *
   1649 * This function sets up the PDMA descriptors for a block cipher request.
   1650 *
   1651 * The required padding is added for AES-CTR using a statically defined
   1652 * buffer.
   1653 *
   1654 * The PDMA descriptor list will be as follows:
   1655 *
   1656 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
   1657 * IN:  <CIPHER_MD><data_0>...[data_n]<intr>
   1658 *
   1659 */
   1660static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
   1661{
   1662	int ret;
   1663	struct artpec6_crypto_walk walk;
   1664	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
   1665	struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
   1666	struct artpec6_crypto_request_context *req_ctx = NULL;
   1667	size_t iv_len = crypto_skcipher_ivsize(cipher);
   1668	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   1669	enum artpec6_crypto_variant variant = ac->variant;
   1670	struct artpec6_crypto_req_common *common;
   1671	bool cipher_decr = false;
   1672	size_t cipher_klen;
   1673	u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
   1674	u32 oper;
   1675
   1676	req_ctx = skcipher_request_ctx(areq);
   1677	common = &req_ctx->common;
   1678
   1679	artpec6_crypto_init_dma_operation(common);
   1680
   1681	if (variant == ARTPEC6_CRYPTO)
   1682		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
   1683	else
   1684		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
   1685
   1686	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
   1687					     sizeof(ctx->key_md), false, false);
   1688	if (ret)
   1689		return ret;
   1690
   1691	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
   1692					      ctx->key_length, true, false);
   1693	if (ret)
   1694		return ret;
   1695
   1696	req_ctx->cipher_md = 0;
   1697
   1698	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
   1699		cipher_klen = ctx->key_length/2;
   1700	else
   1701		cipher_klen =  ctx->key_length;
   1702
   1703	/* Metadata */
   1704	switch (cipher_klen) {
   1705	case 16:
   1706		cipher_len = regk_crypto_key_128;
   1707		break;
   1708	case 24:
   1709		cipher_len = regk_crypto_key_192;
   1710		break;
   1711	case 32:
   1712		cipher_len = regk_crypto_key_256;
   1713		break;
   1714	default:
   1715		pr_err("%s: Invalid key length %d!\n",
   1716			MODULE_NAME, ctx->key_length);
   1717		return -EINVAL;
   1718	}
   1719
   1720	switch (ctx->crypto_type) {
   1721	case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
   1722		oper = regk_crypto_aes_ecb;
   1723		cipher_decr = req_ctx->decrypt;
   1724		break;
   1725
   1726	case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
   1727		oper = regk_crypto_aes_cbc;
   1728		cipher_decr = req_ctx->decrypt;
   1729		break;
   1730
   1731	case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
   1732		oper = regk_crypto_aes_ctr;
   1733		cipher_decr = false;
   1734		break;
   1735
   1736	case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
   1737		oper = regk_crypto_aes_xts;
   1738		cipher_decr = req_ctx->decrypt;
   1739
   1740		if (variant == ARTPEC6_CRYPTO)
   1741			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
   1742		else
   1743			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
   1744		break;
   1745
   1746	default:
   1747		pr_err("%s: Invalid cipher mode %d!\n",
   1748			MODULE_NAME, ctx->crypto_type);
   1749		return -EINVAL;
   1750	}
   1751
   1752	if (variant == ARTPEC6_CRYPTO) {
   1753		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
   1754		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
   1755						 cipher_len);
   1756		if (cipher_decr)
   1757			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
   1758	} else {
   1759		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
   1760		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
   1761						 cipher_len);
   1762		if (cipher_decr)
   1763			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
   1764	}
   1765
   1766	ret = artpec6_crypto_setup_out_descr(common,
   1767					    &req_ctx->cipher_md,
   1768					    sizeof(req_ctx->cipher_md),
   1769					    false, false);
   1770	if (ret)
   1771		return ret;
   1772
   1773	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
   1774	if (ret)
   1775		return ret;
   1776
   1777	if (iv_len) {
   1778		ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
   1779						     false, false);
   1780		if (ret)
   1781			return ret;
   1782	}
   1783	/* Data out */
   1784	artpec6_crypto_walk_init(&walk, areq->src);
   1785	ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
   1786	if (ret)
   1787		return ret;
   1788
   1789	/* Data in */
   1790	artpec6_crypto_walk_init(&walk, areq->dst);
   1791	ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
   1792	if (ret)
   1793		return ret;
   1794
   1795	/* CTR-mode padding required by the HW. */
   1796	if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
   1797	    ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
   1798		size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
   1799			     areq->cryptlen;
   1800
   1801		if (pad) {
   1802			ret = artpec6_crypto_setup_out_descr(common,
   1803							     ac->pad_buffer,
   1804							     pad, false, false);
   1805			if (ret)
   1806				return ret;
   1807
   1808			ret = artpec6_crypto_setup_in_descr(common,
   1809							    ac->pad_buffer, pad,
   1810							    false);
   1811			if (ret)
   1812				return ret;
   1813		}
   1814	}
   1815
   1816	ret = artpec6_crypto_terminate_out_descrs(common);
   1817	if (ret)
   1818		return ret;
   1819
   1820	ret = artpec6_crypto_terminate_in_descrs(common);
   1821	if (ret)
   1822		return ret;
   1823
   1824	return artpec6_crypto_dma_map_descs(common);
   1825}
   1826
   1827static int artpec6_crypto_prepare_aead(struct aead_request *areq)
   1828{
   1829	size_t count;
   1830	int ret;
   1831	size_t input_length;
   1832	struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
   1833	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
   1834	struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
   1835	struct artpec6_crypto_req_common *common = &req_ctx->common;
   1836	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   1837	enum artpec6_crypto_variant variant = ac->variant;
   1838	u32 md_cipher_len;
   1839
   1840	artpec6_crypto_init_dma_operation(common);
   1841
   1842	/* Key */
   1843	if (variant == ARTPEC6_CRYPTO) {
   1844		ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
   1845					 a6_regk_crypto_dlkey);
   1846	} else {
   1847		ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
   1848					 a7_regk_crypto_dlkey);
   1849	}
   1850	ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
   1851					     sizeof(ctx->key_md), false, false);
   1852	if (ret)
   1853		return ret;
   1854
   1855	ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
   1856					     ctx->key_length, true, false);
   1857	if (ret)
   1858		return ret;
   1859
   1860	req_ctx->cipher_md = 0;
   1861
   1862	switch (ctx->key_length) {
   1863	case 16:
   1864		md_cipher_len = regk_crypto_key_128;
   1865		break;
   1866	case 24:
   1867		md_cipher_len = regk_crypto_key_192;
   1868		break;
   1869	case 32:
   1870		md_cipher_len = regk_crypto_key_256;
   1871		break;
   1872	default:
   1873		return -EINVAL;
   1874	}
   1875
   1876	if (variant == ARTPEC6_CRYPTO) {
   1877		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
   1878						 regk_crypto_aes_gcm);
   1879		req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
   1880						 md_cipher_len);
   1881		if (req_ctx->decrypt)
   1882			req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
   1883	} else {
   1884		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
   1885						 regk_crypto_aes_gcm);
   1886		req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
   1887						 md_cipher_len);
   1888		if (req_ctx->decrypt)
   1889			req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
   1890	}
   1891
   1892	ret = artpec6_crypto_setup_out_descr(common,
   1893					    (void *) &req_ctx->cipher_md,
   1894					    sizeof(req_ctx->cipher_md), false,
   1895					    false);
   1896	if (ret)
   1897		return ret;
   1898
   1899	ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
   1900	if (ret)
   1901		return ret;
   1902
   1903	/* For the decryption, cryptlen includes the tag. */
   1904	input_length = areq->cryptlen;
   1905	if (req_ctx->decrypt)
   1906		input_length -= crypto_aead_authsize(cipher);
   1907
   1908	/* Prepare the context buffer */
   1909	req_ctx->hw_ctx.aad_length_bits =
   1910		__cpu_to_be64(8*areq->assoclen);
   1911
   1912	req_ctx->hw_ctx.text_length_bits =
   1913		__cpu_to_be64(8*input_length);
   1914
   1915	memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
   1916	// The HW omits the initial increment of the counter field.
   1917	memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
   1918
   1919	ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
   1920		sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
   1921	if (ret)
   1922		return ret;
   1923
   1924	{
   1925		struct artpec6_crypto_walk walk;
   1926
   1927		artpec6_crypto_walk_init(&walk, areq->src);
   1928
   1929		/* Associated data */
   1930		count = areq->assoclen;
   1931		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
   1932		if (ret)
   1933			return ret;
   1934
   1935		if (!IS_ALIGNED(areq->assoclen, 16)) {
   1936			size_t assoc_pad = 16 - (areq->assoclen % 16);
   1937			/* The HW mandates zero padding here */
   1938			ret = artpec6_crypto_setup_out_descr(common,
   1939							     ac->zero_buffer,
   1940							     assoc_pad, false,
   1941							     false);
   1942			if (ret)
   1943				return ret;
   1944		}
   1945
   1946		/* Data to crypto */
   1947		count = input_length;
   1948		ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
   1949		if (ret)
   1950			return ret;
   1951
   1952		if (!IS_ALIGNED(input_length, 16)) {
   1953			size_t crypto_pad = 16 - (input_length % 16);
   1954			/* The HW mandates zero padding here */
   1955			ret = artpec6_crypto_setup_out_descr(common,
   1956							     ac->zero_buffer,
   1957							     crypto_pad,
   1958							     false,
   1959							     false);
   1960			if (ret)
   1961				return ret;
   1962		}
   1963	}
   1964
   1965	/* Data from crypto */
   1966	{
   1967		struct artpec6_crypto_walk walk;
   1968		size_t output_len = areq->cryptlen;
   1969
   1970		if (req_ctx->decrypt)
   1971			output_len -= crypto_aead_authsize(cipher);
   1972
   1973		artpec6_crypto_walk_init(&walk, areq->dst);
   1974
   1975		/* skip associated data in the output */
   1976		count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
   1977		if (count)
   1978			return -EINVAL;
   1979
   1980		count = output_len;
   1981		ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
   1982		if (ret)
   1983			return ret;
   1984
   1985		/* Put padding between the cryptotext and the auth tag */
   1986		if (!IS_ALIGNED(output_len, 16)) {
   1987			size_t crypto_pad = 16 - (output_len % 16);
   1988
   1989			ret = artpec6_crypto_setup_in_descr(common,
   1990							    ac->pad_buffer,
   1991							    crypto_pad, false);
   1992			if (ret)
   1993				return ret;
   1994		}
   1995
   1996		/* The authentication tag shall follow immediately after
   1997		 * the output ciphertext. For decryption it is put in a context
   1998		 * buffer for later compare against the input tag.
   1999		 */
   2000
   2001		if (req_ctx->decrypt) {
   2002			ret = artpec6_crypto_setup_in_descr(common,
   2003				req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
   2004			if (ret)
   2005				return ret;
   2006
   2007		} else {
   2008			/* For encryption the requested tag size may be smaller
   2009			 * than the hardware's generated tag.
   2010			 */
   2011			size_t authsize = crypto_aead_authsize(cipher);
   2012
   2013			ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
   2014								authsize);
   2015			if (ret)
   2016				return ret;
   2017
   2018			if (authsize < AES_BLOCK_SIZE) {
   2019				count = AES_BLOCK_SIZE - authsize;
   2020				ret = artpec6_crypto_setup_in_descr(common,
   2021					ac->pad_buffer,
   2022					count, false);
   2023				if (ret)
   2024					return ret;
   2025			}
   2026		}
   2027
   2028	}
   2029
   2030	ret = artpec6_crypto_terminate_in_descrs(common);
   2031	if (ret)
   2032		return ret;
   2033
   2034	ret = artpec6_crypto_terminate_out_descrs(common);
   2035	if (ret)
   2036		return ret;
   2037
   2038	return artpec6_crypto_dma_map_descs(common);
   2039}
   2040
   2041static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
   2042	    struct list_head *completions)
   2043{
   2044	struct artpec6_crypto_req_common *req;
   2045
   2046	while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
   2047		req = list_first_entry(&ac->queue,
   2048				       struct artpec6_crypto_req_common,
   2049				       list);
   2050		list_move_tail(&req->list, &ac->pending);
   2051		artpec6_crypto_start_dma(req);
   2052
   2053		list_add_tail(&req->complete_in_progress, completions);
   2054	}
   2055
   2056	/*
   2057	 * In some cases, the hardware can raise an in_eop_flush interrupt
   2058	 * before actually updating the status, so we have an timer which will
   2059	 * recheck the status on timeout.  Since the cases are expected to be
   2060	 * very rare, we use a relatively large timeout value.  There should be
   2061	 * no noticeable negative effect if we timeout spuriously.
   2062	 */
   2063	if (ac->pending_count)
   2064		mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
   2065	else
   2066		del_timer(&ac->timer);
   2067}
   2068
   2069static void artpec6_crypto_timeout(struct timer_list *t)
   2070{
   2071	struct artpec6_crypto *ac = from_timer(ac, t, timer);
   2072
   2073	dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
   2074
   2075	tasklet_schedule(&ac->task);
   2076}
   2077
   2078static void artpec6_crypto_task(unsigned long data)
   2079{
   2080	struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
   2081	struct artpec6_crypto_req_common *req;
   2082	struct artpec6_crypto_req_common *n;
   2083	struct list_head complete_done;
   2084	struct list_head complete_in_progress;
   2085
   2086	INIT_LIST_HEAD(&complete_done);
   2087	INIT_LIST_HEAD(&complete_in_progress);
   2088
   2089	if (list_empty(&ac->pending)) {
   2090		pr_debug("Spurious IRQ\n");
   2091		return;
   2092	}
   2093
   2094	spin_lock_bh(&ac->queue_lock);
   2095
   2096	list_for_each_entry_safe(req, n, &ac->pending, list) {
   2097		struct artpec6_crypto_dma_descriptors *dma = req->dma;
   2098		u32 stat;
   2099		dma_addr_t stataddr;
   2100
   2101		stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
   2102		dma_sync_single_for_cpu(artpec6_crypto_dev,
   2103					stataddr,
   2104					4,
   2105					DMA_BIDIRECTIONAL);
   2106
   2107		stat = req->dma->stat[req->dma->in_cnt-1];
   2108
   2109		/* A non-zero final status descriptor indicates
   2110		 * this job has finished.
   2111		 */
   2112		pr_debug("Request %p status is %X\n", req, stat);
   2113		if (!stat)
   2114			break;
   2115
   2116		/* Allow testing of timeout handling with fault injection */
   2117#ifdef CONFIG_FAULT_INJECTION
   2118		if (should_fail(&artpec6_crypto_fail_status_read, 1))
   2119			continue;
   2120#endif
   2121
   2122		pr_debug("Completing request %p\n", req);
   2123
   2124		list_move_tail(&req->list, &complete_done);
   2125
   2126		ac->pending_count--;
   2127	}
   2128
   2129	artpec6_crypto_process_queue(ac, &complete_in_progress);
   2130
   2131	spin_unlock_bh(&ac->queue_lock);
   2132
   2133	/* Perform the completion callbacks without holding the queue lock
   2134	 * to allow new request submissions from the callbacks.
   2135	 */
   2136	list_for_each_entry_safe(req, n, &complete_done, list) {
   2137		artpec6_crypto_dma_unmap_all(req);
   2138		artpec6_crypto_copy_bounce_buffers(req);
   2139		artpec6_crypto_common_destroy(req);
   2140
   2141		req->complete(req->req);
   2142	}
   2143
   2144	list_for_each_entry_safe(req, n, &complete_in_progress,
   2145				 complete_in_progress) {
   2146		req->req->complete(req->req, -EINPROGRESS);
   2147	}
   2148}
   2149
   2150static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
   2151{
   2152	req->complete(req, 0);
   2153}
   2154
   2155static void
   2156artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
   2157{
   2158	struct skcipher_request *cipher_req = container_of(req,
   2159		struct skcipher_request, base);
   2160
   2161	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
   2162				 cipher_req->cryptlen - AES_BLOCK_SIZE,
   2163				 AES_BLOCK_SIZE, 0);
   2164	req->complete(req, 0);
   2165}
   2166
   2167static void
   2168artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
   2169{
   2170	struct skcipher_request *cipher_req = container_of(req,
   2171		struct skcipher_request, base);
   2172
   2173	scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
   2174				 cipher_req->cryptlen - AES_BLOCK_SIZE,
   2175				 AES_BLOCK_SIZE, 0);
   2176	req->complete(req, 0);
   2177}
   2178
   2179static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
   2180{
   2181	int result = 0;
   2182
   2183	/* Verify GCM hashtag. */
   2184	struct aead_request *areq = container_of(req,
   2185		struct aead_request, base);
   2186	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
   2187	struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
   2188
   2189	if (req_ctx->decrypt) {
   2190		u8 input_tag[AES_BLOCK_SIZE];
   2191		unsigned int authsize = crypto_aead_authsize(aead);
   2192
   2193		sg_pcopy_to_buffer(areq->src,
   2194				   sg_nents(areq->src),
   2195				   input_tag,
   2196				   authsize,
   2197				   areq->assoclen + areq->cryptlen -
   2198				   authsize);
   2199
   2200		if (crypto_memneq(req_ctx->decryption_tag,
   2201				  input_tag,
   2202				  authsize)) {
   2203			pr_debug("***EBADMSG:\n");
   2204			print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
   2205					     input_tag, authsize, true);
   2206			print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
   2207					     req_ctx->decryption_tag,
   2208					     authsize, true);
   2209
   2210			result = -EBADMSG;
   2211		}
   2212	}
   2213
   2214	req->complete(req, result);
   2215}
   2216
   2217static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
   2218{
   2219	req->complete(req, 0);
   2220}
   2221
   2222
   2223/*------------------- Hash functions -----------------------------------------*/
   2224static int
   2225artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
   2226		    const u8 *key, unsigned int keylen)
   2227{
   2228	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
   2229	size_t blocksize;
   2230	int ret;
   2231
   2232	if (!keylen) {
   2233		pr_err("Invalid length (%d) of HMAC key\n",
   2234			keylen);
   2235		return -EINVAL;
   2236	}
   2237
   2238	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
   2239
   2240	blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
   2241
   2242	if (keylen > blocksize) {
   2243		tfm_ctx->hmac_key_length = blocksize;
   2244
   2245		ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
   2246					      tfm_ctx->hmac_key);
   2247		if (ret)
   2248			return ret;
   2249	} else {
   2250		memcpy(tfm_ctx->hmac_key, key, keylen);
   2251		tfm_ctx->hmac_key_length = keylen;
   2252	}
   2253
   2254	return 0;
   2255}
   2256
   2257static int
   2258artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
   2259{
   2260	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   2261	enum artpec6_crypto_variant variant = ac->variant;
   2262	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2263	u32 oper;
   2264
   2265	memset(req_ctx, 0, sizeof(*req_ctx));
   2266
   2267	req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
   2268	if (hmac)
   2269		req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
   2270
   2271	switch (type) {
   2272	case ARTPEC6_CRYPTO_HASH_SHA1:
   2273		oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
   2274		break;
   2275	case ARTPEC6_CRYPTO_HASH_SHA256:
   2276		oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
   2277		break;
   2278	default:
   2279		pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
   2280		return -EINVAL;
   2281	}
   2282
   2283	if (variant == ARTPEC6_CRYPTO)
   2284		req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
   2285	else
   2286		req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
   2287
   2288	return 0;
   2289}
   2290
   2291static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
   2292{
   2293	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2294	int ret;
   2295
   2296	if (!req_ctx->common.dma) {
   2297		ret = artpec6_crypto_common_init(&req_ctx->common,
   2298					  &req->base,
   2299					  artpec6_crypto_complete_hash,
   2300					  NULL, 0);
   2301
   2302		if (ret)
   2303			return ret;
   2304	}
   2305
   2306	ret = artpec6_crypto_prepare_hash(req);
   2307	switch (ret) {
   2308	case ARTPEC6_CRYPTO_PREPARE_HASH_START:
   2309		ret = artpec6_crypto_submit(&req_ctx->common);
   2310		break;
   2311
   2312	case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
   2313		ret = 0;
   2314		fallthrough;
   2315
   2316	default:
   2317		artpec6_crypto_common_destroy(&req_ctx->common);
   2318		break;
   2319	}
   2320
   2321	return ret;
   2322}
   2323
   2324static int artpec6_crypto_hash_final(struct ahash_request *req)
   2325{
   2326	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2327
   2328	req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
   2329
   2330	return artpec6_crypto_prepare_submit_hash(req);
   2331}
   2332
   2333static int artpec6_crypto_hash_update(struct ahash_request *req)
   2334{
   2335	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2336
   2337	req_ctx->hash_flags |= HASH_FLAG_UPDATE;
   2338
   2339	return artpec6_crypto_prepare_submit_hash(req);
   2340}
   2341
   2342static int artpec6_crypto_sha1_init(struct ahash_request *req)
   2343{
   2344	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
   2345}
   2346
   2347static int artpec6_crypto_sha1_digest(struct ahash_request *req)
   2348{
   2349	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2350
   2351	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
   2352
   2353	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
   2354
   2355	return artpec6_crypto_prepare_submit_hash(req);
   2356}
   2357
   2358static int artpec6_crypto_sha256_init(struct ahash_request *req)
   2359{
   2360	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
   2361}
   2362
   2363static int artpec6_crypto_sha256_digest(struct ahash_request *req)
   2364{
   2365	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2366
   2367	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
   2368	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
   2369
   2370	return artpec6_crypto_prepare_submit_hash(req);
   2371}
   2372
   2373static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
   2374{
   2375	return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
   2376}
   2377
   2378static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
   2379{
   2380	struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
   2381
   2382	artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
   2383	req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
   2384
   2385	return artpec6_crypto_prepare_submit_hash(req);
   2386}
   2387
   2388static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
   2389				    const char *base_hash_name)
   2390{
   2391	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
   2392
   2393	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
   2394				 sizeof(struct artpec6_hash_request_context));
   2395	memset(tfm_ctx, 0, sizeof(*tfm_ctx));
   2396
   2397	if (base_hash_name) {
   2398		struct crypto_shash *child;
   2399
   2400		child = crypto_alloc_shash(base_hash_name, 0,
   2401					   CRYPTO_ALG_NEED_FALLBACK);
   2402
   2403		if (IS_ERR(child))
   2404			return PTR_ERR(child);
   2405
   2406		tfm_ctx->child_hash = child;
   2407	}
   2408
   2409	return 0;
   2410}
   2411
   2412static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
   2413{
   2414	return artpec6_crypto_ahash_init_common(tfm, NULL);
   2415}
   2416
   2417static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
   2418{
   2419	return artpec6_crypto_ahash_init_common(tfm, "sha256");
   2420}
   2421
   2422static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
   2423{
   2424	struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
   2425
   2426	if (tfm_ctx->child_hash)
   2427		crypto_free_shash(tfm_ctx->child_hash);
   2428
   2429	memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
   2430	tfm_ctx->hmac_key_length = 0;
   2431}
   2432
   2433static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
   2434{
   2435	const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
   2436	struct artpec6_hash_export_state *state = out;
   2437	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   2438	enum artpec6_crypto_variant variant = ac->variant;
   2439
   2440	BUILD_BUG_ON(sizeof(state->partial_buffer) !=
   2441		     sizeof(ctx->partial_buffer));
   2442	BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
   2443
   2444	state->digcnt = ctx->digcnt;
   2445	state->partial_bytes = ctx->partial_bytes;
   2446	state->hash_flags = ctx->hash_flags;
   2447
   2448	if (variant == ARTPEC6_CRYPTO)
   2449		state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
   2450	else
   2451		state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
   2452
   2453	memcpy(state->partial_buffer, ctx->partial_buffer,
   2454	       sizeof(state->partial_buffer));
   2455	memcpy(state->digeststate, ctx->digeststate,
   2456	       sizeof(state->digeststate));
   2457
   2458	return 0;
   2459}
   2460
   2461static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
   2462{
   2463	struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
   2464	const struct artpec6_hash_export_state *state = in;
   2465	struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
   2466	enum artpec6_crypto_variant variant = ac->variant;
   2467
   2468	memset(ctx, 0, sizeof(*ctx));
   2469
   2470	ctx->digcnt = state->digcnt;
   2471	ctx->partial_bytes = state->partial_bytes;
   2472	ctx->hash_flags = state->hash_flags;
   2473
   2474	if (variant == ARTPEC6_CRYPTO)
   2475		ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
   2476	else
   2477		ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
   2478
   2479	memcpy(ctx->partial_buffer, state->partial_buffer,
   2480	       sizeof(state->partial_buffer));
   2481	memcpy(ctx->digeststate, state->digeststate,
   2482	       sizeof(state->digeststate));
   2483
   2484	return 0;
   2485}
   2486
   2487static int init_crypto_hw(struct artpec6_crypto *ac)
   2488{
   2489	enum artpec6_crypto_variant variant = ac->variant;
   2490	void __iomem *base = ac->base;
   2491	u32 out_descr_buf_size;
   2492	u32 out_data_buf_size;
   2493	u32 in_data_buf_size;
   2494	u32 in_descr_buf_size;
   2495	u32 in_stat_buf_size;
   2496	u32 in, out;
   2497
   2498	/*
   2499	 * The PDMA unit contains 1984 bytes of internal memory for the OUT
   2500	 * channels and 1024 bytes for the IN channel. This is an elastic
   2501	 * memory used to internally store the descriptors and data. The values
   2502	 * ares specified in 64 byte incremements.  Trustzone buffers are not
   2503	 * used at this stage.
   2504	 */
   2505	out_data_buf_size = 16;  /* 1024 bytes for data */
   2506	out_descr_buf_size = 15; /* 960 bytes for descriptors */
   2507	in_data_buf_size = 8;    /* 512 bytes for data */
   2508	in_descr_buf_size = 4;   /* 256 bytes for descriptors */
   2509	in_stat_buf_size = 4;   /* 256 bytes for stat descrs */
   2510
   2511	BUILD_BUG_ON_MSG((out_data_buf_size
   2512				+ out_descr_buf_size) * 64 > 1984,
   2513			  "Invalid OUT configuration");
   2514
   2515	BUILD_BUG_ON_MSG((in_data_buf_size
   2516				+ in_descr_buf_size
   2517				+ in_stat_buf_size) * 64 > 1024,
   2518			  "Invalid IN configuration");
   2519
   2520	in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
   2521	     FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
   2522	     FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
   2523
   2524	out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
   2525	      FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
   2526
   2527	writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
   2528	writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
   2529
   2530	if (variant == ARTPEC6_CRYPTO) {
   2531		writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
   2532		writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
   2533		writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
   2534			       A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
   2535			       base + A6_PDMA_INTR_MASK);
   2536	} else {
   2537		writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
   2538		writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
   2539		writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
   2540			       A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
   2541			       base + A7_PDMA_INTR_MASK);
   2542	}
   2543
   2544	return 0;
   2545}
   2546
   2547static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
   2548{
   2549	enum artpec6_crypto_variant variant = ac->variant;
   2550	void __iomem *base = ac->base;
   2551
   2552	if (variant == ARTPEC6_CRYPTO) {
   2553		writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
   2554		writel_relaxed(0, base + A6_PDMA_IN_CFG);
   2555		writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
   2556	} else {
   2557		writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
   2558		writel_relaxed(0, base + A7_PDMA_IN_CFG);
   2559		writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
   2560	}
   2561
   2562	writel_relaxed(0, base + PDMA_OUT_CFG);
   2563
   2564}
   2565
   2566static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
   2567{
   2568	struct artpec6_crypto *ac = dev_id;
   2569	enum artpec6_crypto_variant variant = ac->variant;
   2570	void __iomem *base = ac->base;
   2571	u32 mask_in_data, mask_in_eop_flush;
   2572	u32 in_cmd_flush_stat, in_cmd_reg;
   2573	u32 ack_intr_reg;
   2574	u32 ack = 0;
   2575	u32 intr;
   2576
   2577	if (variant == ARTPEC6_CRYPTO) {
   2578		intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
   2579		mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
   2580		mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
   2581		in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
   2582		in_cmd_reg = A6_PDMA_IN_CMD;
   2583		ack_intr_reg = A6_PDMA_ACK_INTR;
   2584	} else {
   2585		intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
   2586		mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
   2587		mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
   2588		in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
   2589		in_cmd_reg = A7_PDMA_IN_CMD;
   2590		ack_intr_reg = A7_PDMA_ACK_INTR;
   2591	}
   2592
   2593	/* We get two interrupt notifications from each job.
   2594	 * The in_data means all data was sent to memory and then
   2595	 * we request a status flush command to write the per-job
   2596	 * status to its status vector. This ensures that the
   2597	 * tasklet can detect exactly how many submitted jobs
   2598	 * that have finished.
   2599	 */
   2600	if (intr & mask_in_data)
   2601		ack |= mask_in_data;
   2602
   2603	if (intr & mask_in_eop_flush)
   2604		ack |= mask_in_eop_flush;
   2605	else
   2606		writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
   2607
   2608	writel_relaxed(ack, base + ack_intr_reg);
   2609
   2610	if (intr & mask_in_eop_flush)
   2611		tasklet_schedule(&ac->task);
   2612
   2613	return IRQ_HANDLED;
   2614}
   2615
   2616/*------------------- Algorithm definitions ----------------------------------*/
   2617
   2618/* Hashes */
   2619static struct ahash_alg hash_algos[] = {
   2620	/* SHA-1 */
   2621	{
   2622		.init = artpec6_crypto_sha1_init,
   2623		.update = artpec6_crypto_hash_update,
   2624		.final = artpec6_crypto_hash_final,
   2625		.digest = artpec6_crypto_sha1_digest,
   2626		.import = artpec6_crypto_hash_import,
   2627		.export = artpec6_crypto_hash_export,
   2628		.halg.digestsize = SHA1_DIGEST_SIZE,
   2629		.halg.statesize = sizeof(struct artpec6_hash_export_state),
   2630		.halg.base = {
   2631			.cra_name = "sha1",
   2632			.cra_driver_name = "artpec-sha1",
   2633			.cra_priority = 300,
   2634			.cra_flags = CRYPTO_ALG_ASYNC |
   2635				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2636			.cra_blocksize = SHA1_BLOCK_SIZE,
   2637			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
   2638			.cra_alignmask = 3,
   2639			.cra_module = THIS_MODULE,
   2640			.cra_init = artpec6_crypto_ahash_init,
   2641			.cra_exit = artpec6_crypto_ahash_exit,
   2642		}
   2643	},
   2644	/* SHA-256 */
   2645	{
   2646		.init = artpec6_crypto_sha256_init,
   2647		.update = artpec6_crypto_hash_update,
   2648		.final = artpec6_crypto_hash_final,
   2649		.digest = artpec6_crypto_sha256_digest,
   2650		.import = artpec6_crypto_hash_import,
   2651		.export = artpec6_crypto_hash_export,
   2652		.halg.digestsize = SHA256_DIGEST_SIZE,
   2653		.halg.statesize = sizeof(struct artpec6_hash_export_state),
   2654		.halg.base = {
   2655			.cra_name = "sha256",
   2656			.cra_driver_name = "artpec-sha256",
   2657			.cra_priority = 300,
   2658			.cra_flags = CRYPTO_ALG_ASYNC |
   2659				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2660			.cra_blocksize = SHA256_BLOCK_SIZE,
   2661			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
   2662			.cra_alignmask = 3,
   2663			.cra_module = THIS_MODULE,
   2664			.cra_init = artpec6_crypto_ahash_init,
   2665			.cra_exit = artpec6_crypto_ahash_exit,
   2666		}
   2667	},
   2668	/* HMAC SHA-256 */
   2669	{
   2670		.init = artpec6_crypto_hmac_sha256_init,
   2671		.update = artpec6_crypto_hash_update,
   2672		.final = artpec6_crypto_hash_final,
   2673		.digest = artpec6_crypto_hmac_sha256_digest,
   2674		.import = artpec6_crypto_hash_import,
   2675		.export = artpec6_crypto_hash_export,
   2676		.setkey = artpec6_crypto_hash_set_key,
   2677		.halg.digestsize = SHA256_DIGEST_SIZE,
   2678		.halg.statesize = sizeof(struct artpec6_hash_export_state),
   2679		.halg.base = {
   2680			.cra_name = "hmac(sha256)",
   2681			.cra_driver_name = "artpec-hmac-sha256",
   2682			.cra_priority = 300,
   2683			.cra_flags = CRYPTO_ALG_ASYNC |
   2684				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2685			.cra_blocksize = SHA256_BLOCK_SIZE,
   2686			.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
   2687			.cra_alignmask = 3,
   2688			.cra_module = THIS_MODULE,
   2689			.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
   2690			.cra_exit = artpec6_crypto_ahash_exit,
   2691		}
   2692	},
   2693};
   2694
   2695/* Crypto */
   2696static struct skcipher_alg crypto_algos[] = {
   2697	/* AES - ECB */
   2698	{
   2699		.base = {
   2700			.cra_name = "ecb(aes)",
   2701			.cra_driver_name = "artpec6-ecb-aes",
   2702			.cra_priority = 300,
   2703			.cra_flags = CRYPTO_ALG_ASYNC |
   2704				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2705			.cra_blocksize = AES_BLOCK_SIZE,
   2706			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
   2707			.cra_alignmask = 3,
   2708			.cra_module = THIS_MODULE,
   2709		},
   2710		.min_keysize = AES_MIN_KEY_SIZE,
   2711		.max_keysize = AES_MAX_KEY_SIZE,
   2712		.setkey = artpec6_crypto_cipher_set_key,
   2713		.encrypt = artpec6_crypto_encrypt,
   2714		.decrypt = artpec6_crypto_decrypt,
   2715		.init = artpec6_crypto_aes_ecb_init,
   2716		.exit = artpec6_crypto_aes_exit,
   2717	},
   2718	/* AES - CTR */
   2719	{
   2720		.base = {
   2721			.cra_name = "ctr(aes)",
   2722			.cra_driver_name = "artpec6-ctr-aes",
   2723			.cra_priority = 300,
   2724			.cra_flags = CRYPTO_ALG_ASYNC |
   2725				     CRYPTO_ALG_ALLOCATES_MEMORY |
   2726				     CRYPTO_ALG_NEED_FALLBACK,
   2727			.cra_blocksize = 1,
   2728			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
   2729			.cra_alignmask = 3,
   2730			.cra_module = THIS_MODULE,
   2731		},
   2732		.min_keysize = AES_MIN_KEY_SIZE,
   2733		.max_keysize = AES_MAX_KEY_SIZE,
   2734		.ivsize = AES_BLOCK_SIZE,
   2735		.setkey = artpec6_crypto_cipher_set_key,
   2736		.encrypt = artpec6_crypto_ctr_encrypt,
   2737		.decrypt = artpec6_crypto_ctr_decrypt,
   2738		.init = artpec6_crypto_aes_ctr_init,
   2739		.exit = artpec6_crypto_aes_ctr_exit,
   2740	},
   2741	/* AES - CBC */
   2742	{
   2743		.base = {
   2744			.cra_name = "cbc(aes)",
   2745			.cra_driver_name = "artpec6-cbc-aes",
   2746			.cra_priority = 300,
   2747			.cra_flags = CRYPTO_ALG_ASYNC |
   2748				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2749			.cra_blocksize = AES_BLOCK_SIZE,
   2750			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
   2751			.cra_alignmask = 3,
   2752			.cra_module = THIS_MODULE,
   2753		},
   2754		.min_keysize = AES_MIN_KEY_SIZE,
   2755		.max_keysize = AES_MAX_KEY_SIZE,
   2756		.ivsize = AES_BLOCK_SIZE,
   2757		.setkey = artpec6_crypto_cipher_set_key,
   2758		.encrypt = artpec6_crypto_encrypt,
   2759		.decrypt = artpec6_crypto_decrypt,
   2760		.init = artpec6_crypto_aes_cbc_init,
   2761		.exit = artpec6_crypto_aes_exit
   2762	},
   2763	/* AES - XTS */
   2764	{
   2765		.base = {
   2766			.cra_name = "xts(aes)",
   2767			.cra_driver_name = "artpec6-xts-aes",
   2768			.cra_priority = 300,
   2769			.cra_flags = CRYPTO_ALG_ASYNC |
   2770				     CRYPTO_ALG_ALLOCATES_MEMORY,
   2771			.cra_blocksize = 1,
   2772			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
   2773			.cra_alignmask = 3,
   2774			.cra_module = THIS_MODULE,
   2775		},
   2776		.min_keysize = 2*AES_MIN_KEY_SIZE,
   2777		.max_keysize = 2*AES_MAX_KEY_SIZE,
   2778		.ivsize = 16,
   2779		.setkey = artpec6_crypto_xts_set_key,
   2780		.encrypt = artpec6_crypto_encrypt,
   2781		.decrypt = artpec6_crypto_decrypt,
   2782		.init = artpec6_crypto_aes_xts_init,
   2783		.exit = artpec6_crypto_aes_exit,
   2784	},
   2785};
   2786
   2787static struct aead_alg aead_algos[] = {
   2788	{
   2789		.init   = artpec6_crypto_aead_init,
   2790		.setkey = artpec6_crypto_aead_set_key,
   2791		.encrypt = artpec6_crypto_aead_encrypt,
   2792		.decrypt = artpec6_crypto_aead_decrypt,
   2793		.ivsize = GCM_AES_IV_SIZE,
   2794		.maxauthsize = AES_BLOCK_SIZE,
   2795
   2796		.base = {
   2797			.cra_name = "gcm(aes)",
   2798			.cra_driver_name = "artpec-gcm-aes",
   2799			.cra_priority = 300,
   2800			.cra_flags = CRYPTO_ALG_ASYNC |
   2801				     CRYPTO_ALG_ALLOCATES_MEMORY |
   2802				     CRYPTO_ALG_KERN_DRIVER_ONLY,
   2803			.cra_blocksize = 1,
   2804			.cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
   2805			.cra_alignmask = 3,
   2806			.cra_module = THIS_MODULE,
   2807		},
   2808	}
   2809};
   2810
   2811#ifdef CONFIG_DEBUG_FS
   2812
   2813struct dbgfs_u32 {
   2814	char *name;
   2815	mode_t mode;
   2816	u32 *flag;
   2817	char *desc;
   2818};
   2819
   2820static struct dentry *dbgfs_root;
   2821
   2822static void artpec6_crypto_init_debugfs(void)
   2823{
   2824	dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
   2825
   2826#ifdef CONFIG_FAULT_INJECTION
   2827	fault_create_debugfs_attr("fail_status_read", dbgfs_root,
   2828				  &artpec6_crypto_fail_status_read);
   2829
   2830	fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
   2831				  &artpec6_crypto_fail_dma_array_full);
   2832#endif
   2833}
   2834
   2835static void artpec6_crypto_free_debugfs(void)
   2836{
   2837	debugfs_remove_recursive(dbgfs_root);
   2838	dbgfs_root = NULL;
   2839}
   2840#endif
   2841
   2842static const struct of_device_id artpec6_crypto_of_match[] = {
   2843	{ .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
   2844	{ .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
   2845	{}
   2846};
   2847MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
   2848
   2849static int artpec6_crypto_probe(struct platform_device *pdev)
   2850{
   2851	const struct of_device_id *match;
   2852	enum artpec6_crypto_variant variant;
   2853	struct artpec6_crypto *ac;
   2854	struct device *dev = &pdev->dev;
   2855	void __iomem *base;
   2856	int irq;
   2857	int err;
   2858
   2859	if (artpec6_crypto_dev)
   2860		return -ENODEV;
   2861
   2862	match = of_match_node(artpec6_crypto_of_match, dev->of_node);
   2863	if (!match)
   2864		return -EINVAL;
   2865
   2866	variant = (enum artpec6_crypto_variant)match->data;
   2867
   2868	base = devm_platform_ioremap_resource(pdev, 0);
   2869	if (IS_ERR(base))
   2870		return PTR_ERR(base);
   2871
   2872	irq = platform_get_irq(pdev, 0);
   2873	if (irq < 0)
   2874		return -ENODEV;
   2875
   2876	ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
   2877			  GFP_KERNEL);
   2878	if (!ac)
   2879		return -ENOMEM;
   2880
   2881	platform_set_drvdata(pdev, ac);
   2882	ac->variant = variant;
   2883
   2884	spin_lock_init(&ac->queue_lock);
   2885	INIT_LIST_HEAD(&ac->queue);
   2886	INIT_LIST_HEAD(&ac->pending);
   2887	timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
   2888
   2889	ac->base = base;
   2890
   2891	ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
   2892		sizeof(struct artpec6_crypto_dma_descriptors),
   2893		64,
   2894		0,
   2895		NULL);
   2896	if (!ac->dma_cache)
   2897		return -ENOMEM;
   2898
   2899#ifdef CONFIG_DEBUG_FS
   2900	artpec6_crypto_init_debugfs();
   2901#endif
   2902
   2903	tasklet_init(&ac->task, artpec6_crypto_task,
   2904		     (unsigned long)ac);
   2905
   2906	ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
   2907				      GFP_KERNEL);
   2908	if (!ac->pad_buffer)
   2909		return -ENOMEM;
   2910	ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
   2911
   2912	ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
   2913				      GFP_KERNEL);
   2914	if (!ac->zero_buffer)
   2915		return -ENOMEM;
   2916	ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
   2917
   2918	err = init_crypto_hw(ac);
   2919	if (err)
   2920		goto free_cache;
   2921
   2922	err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
   2923			       "artpec6-crypto", ac);
   2924	if (err)
   2925		goto disable_hw;
   2926
   2927	artpec6_crypto_dev = &pdev->dev;
   2928
   2929	err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
   2930	if (err) {
   2931		dev_err(dev, "Failed to register ahashes\n");
   2932		goto disable_hw;
   2933	}
   2934
   2935	err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
   2936	if (err) {
   2937		dev_err(dev, "Failed to register ciphers\n");
   2938		goto unregister_ahashes;
   2939	}
   2940
   2941	err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
   2942	if (err) {
   2943		dev_err(dev, "Failed to register aeads\n");
   2944		goto unregister_algs;
   2945	}
   2946
   2947	return 0;
   2948
   2949unregister_algs:
   2950	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
   2951unregister_ahashes:
   2952	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
   2953disable_hw:
   2954	artpec6_crypto_disable_hw(ac);
   2955free_cache:
   2956	kmem_cache_destroy(ac->dma_cache);
   2957	return err;
   2958}
   2959
   2960static int artpec6_crypto_remove(struct platform_device *pdev)
   2961{
   2962	struct artpec6_crypto *ac = platform_get_drvdata(pdev);
   2963	int irq = platform_get_irq(pdev, 0);
   2964
   2965	crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
   2966	crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
   2967	crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
   2968
   2969	tasklet_disable(&ac->task);
   2970	devm_free_irq(&pdev->dev, irq, ac);
   2971	tasklet_kill(&ac->task);
   2972	del_timer_sync(&ac->timer);
   2973
   2974	artpec6_crypto_disable_hw(ac);
   2975
   2976	kmem_cache_destroy(ac->dma_cache);
   2977#ifdef CONFIG_DEBUG_FS
   2978	artpec6_crypto_free_debugfs();
   2979#endif
   2980	return 0;
   2981}
   2982
   2983static struct platform_driver artpec6_crypto_driver = {
   2984	.probe   = artpec6_crypto_probe,
   2985	.remove  = artpec6_crypto_remove,
   2986	.driver  = {
   2987		.name  = "artpec6-crypto",
   2988		.of_match_table = artpec6_crypto_of_match,
   2989	},
   2990};
   2991
   2992module_platform_driver(artpec6_crypto_driver);
   2993
   2994MODULE_AUTHOR("Axis Communications AB");
   2995MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
   2996MODULE_LICENSE("GPL");