cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

s5p-sss.c (60625B)


      1// SPDX-License-Identifier: GPL-2.0
      2//
      3// Cryptographic API.
      4//
      5// Support for Samsung S5PV210 and Exynos HW acceleration.
      6//
      7// Copyright (C) 2011 NetUP Inc. All rights reserved.
      8// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
      9//
     10// Hash part based on omap-sham.c driver.
     11
     12#include <linux/clk.h>
     13#include <linux/crypto.h>
     14#include <linux/dma-mapping.h>
     15#include <linux/err.h>
     16#include <linux/errno.h>
     17#include <linux/init.h>
     18#include <linux/interrupt.h>
     19#include <linux/io.h>
     20#include <linux/kernel.h>
     21#include <linux/module.h>
     22#include <linux/of.h>
     23#include <linux/of_device.h>
     24#include <linux/platform_device.h>
     25#include <linux/scatterlist.h>
     26
     27#include <crypto/ctr.h>
     28#include <crypto/aes.h>
     29#include <crypto/algapi.h>
     30#include <crypto/scatterwalk.h>
     31
     32#include <crypto/hash.h>
     33#include <crypto/md5.h>
     34#include <crypto/sha1.h>
     35#include <crypto/sha2.h>
     36#include <crypto/internal/hash.h>
     37
     38#define _SBF(s, v)			((v) << (s))
     39
     40/* Feed control registers */
     41#define SSS_REG_FCINTSTAT		0x0000
     42#define SSS_FCINTSTAT_HPARTINT		BIT(7)
     43#define SSS_FCINTSTAT_HDONEINT		BIT(5)
     44#define SSS_FCINTSTAT_BRDMAINT		BIT(3)
     45#define SSS_FCINTSTAT_BTDMAINT		BIT(2)
     46#define SSS_FCINTSTAT_HRDMAINT		BIT(1)
     47#define SSS_FCINTSTAT_PKDMAINT		BIT(0)
     48
     49#define SSS_REG_FCINTENSET		0x0004
     50#define SSS_FCINTENSET_HPARTINTENSET	BIT(7)
     51#define SSS_FCINTENSET_HDONEINTENSET	BIT(5)
     52#define SSS_FCINTENSET_BRDMAINTENSET	BIT(3)
     53#define SSS_FCINTENSET_BTDMAINTENSET	BIT(2)
     54#define SSS_FCINTENSET_HRDMAINTENSET	BIT(1)
     55#define SSS_FCINTENSET_PKDMAINTENSET	BIT(0)
     56
     57#define SSS_REG_FCINTENCLR		0x0008
     58#define SSS_FCINTENCLR_HPARTINTENCLR	BIT(7)
     59#define SSS_FCINTENCLR_HDONEINTENCLR	BIT(5)
     60#define SSS_FCINTENCLR_BRDMAINTENCLR	BIT(3)
     61#define SSS_FCINTENCLR_BTDMAINTENCLR	BIT(2)
     62#define SSS_FCINTENCLR_HRDMAINTENCLR	BIT(1)
     63#define SSS_FCINTENCLR_PKDMAINTENCLR	BIT(0)
     64
     65#define SSS_REG_FCINTPEND		0x000C
     66#define SSS_FCINTPEND_HPARTINTP		BIT(7)
     67#define SSS_FCINTPEND_HDONEINTP		BIT(5)
     68#define SSS_FCINTPEND_BRDMAINTP		BIT(3)
     69#define SSS_FCINTPEND_BTDMAINTP		BIT(2)
     70#define SSS_FCINTPEND_HRDMAINTP		BIT(1)
     71#define SSS_FCINTPEND_PKDMAINTP		BIT(0)
     72
     73#define SSS_REG_FCFIFOSTAT		0x0010
     74#define SSS_FCFIFOSTAT_BRFIFOFUL	BIT(7)
     75#define SSS_FCFIFOSTAT_BRFIFOEMP	BIT(6)
     76#define SSS_FCFIFOSTAT_BTFIFOFUL	BIT(5)
     77#define SSS_FCFIFOSTAT_BTFIFOEMP	BIT(4)
     78#define SSS_FCFIFOSTAT_HRFIFOFUL	BIT(3)
     79#define SSS_FCFIFOSTAT_HRFIFOEMP	BIT(2)
     80#define SSS_FCFIFOSTAT_PKFIFOFUL	BIT(1)
     81#define SSS_FCFIFOSTAT_PKFIFOEMP	BIT(0)
     82
     83#define SSS_REG_FCFIFOCTRL		0x0014
     84#define SSS_FCFIFOCTRL_DESSEL		BIT(2)
     85#define SSS_HASHIN_INDEPENDENT		_SBF(0, 0x00)
     86#define SSS_HASHIN_CIPHER_INPUT		_SBF(0, 0x01)
     87#define SSS_HASHIN_CIPHER_OUTPUT	_SBF(0, 0x02)
     88#define SSS_HASHIN_MASK			_SBF(0, 0x03)
     89
     90#define SSS_REG_FCBRDMAS		0x0020
     91#define SSS_REG_FCBRDMAL		0x0024
     92#define SSS_REG_FCBRDMAC		0x0028
     93#define SSS_FCBRDMAC_BYTESWAP		BIT(1)
     94#define SSS_FCBRDMAC_FLUSH		BIT(0)
     95
     96#define SSS_REG_FCBTDMAS		0x0030
     97#define SSS_REG_FCBTDMAL		0x0034
     98#define SSS_REG_FCBTDMAC		0x0038
     99#define SSS_FCBTDMAC_BYTESWAP		BIT(1)
    100#define SSS_FCBTDMAC_FLUSH		BIT(0)
    101
    102#define SSS_REG_FCHRDMAS		0x0040
    103#define SSS_REG_FCHRDMAL		0x0044
    104#define SSS_REG_FCHRDMAC		0x0048
    105#define SSS_FCHRDMAC_BYTESWAP		BIT(1)
    106#define SSS_FCHRDMAC_FLUSH		BIT(0)
    107
    108#define SSS_REG_FCPKDMAS		0x0050
    109#define SSS_REG_FCPKDMAL		0x0054
    110#define SSS_REG_FCPKDMAC		0x0058
    111#define SSS_FCPKDMAC_BYTESWAP		BIT(3)
    112#define SSS_FCPKDMAC_DESCEND		BIT(2)
    113#define SSS_FCPKDMAC_TRANSMIT		BIT(1)
    114#define SSS_FCPKDMAC_FLUSH		BIT(0)
    115
    116#define SSS_REG_FCPKDMAO		0x005C
    117
    118/* AES registers */
    119#define SSS_REG_AES_CONTROL		0x00
    120#define SSS_AES_BYTESWAP_DI		BIT(11)
    121#define SSS_AES_BYTESWAP_DO		BIT(10)
    122#define SSS_AES_BYTESWAP_IV		BIT(9)
    123#define SSS_AES_BYTESWAP_CNT		BIT(8)
    124#define SSS_AES_BYTESWAP_KEY		BIT(7)
    125#define SSS_AES_KEY_CHANGE_MODE		BIT(6)
    126#define SSS_AES_KEY_SIZE_128		_SBF(4, 0x00)
    127#define SSS_AES_KEY_SIZE_192		_SBF(4, 0x01)
    128#define SSS_AES_KEY_SIZE_256		_SBF(4, 0x02)
    129#define SSS_AES_FIFO_MODE		BIT(3)
    130#define SSS_AES_CHAIN_MODE_ECB		_SBF(1, 0x00)
    131#define SSS_AES_CHAIN_MODE_CBC		_SBF(1, 0x01)
    132#define SSS_AES_CHAIN_MODE_CTR		_SBF(1, 0x02)
    133#define SSS_AES_MODE_DECRYPT		BIT(0)
    134
    135#define SSS_REG_AES_STATUS		0x04
    136#define SSS_AES_BUSY			BIT(2)
    137#define SSS_AES_INPUT_READY		BIT(1)
    138#define SSS_AES_OUTPUT_READY		BIT(0)
    139
    140#define SSS_REG_AES_IN_DATA(s)		(0x10 + (s << 2))
    141#define SSS_REG_AES_OUT_DATA(s)		(0x20 + (s << 2))
    142#define SSS_REG_AES_IV_DATA(s)		(0x30 + (s << 2))
    143#define SSS_REG_AES_CNT_DATA(s)		(0x40 + (s << 2))
    144#define SSS_REG_AES_KEY_DATA(s)		(0x80 + (s << 2))
    145
    146#define SSS_REG(dev, reg)		((dev)->ioaddr + (SSS_REG_##reg))
    147#define SSS_READ(dev, reg)		__raw_readl(SSS_REG(dev, reg))
    148#define SSS_WRITE(dev, reg, val)	__raw_writel((val), SSS_REG(dev, reg))
    149
    150#define SSS_AES_REG(dev, reg)		((dev)->aes_ioaddr + SSS_REG_##reg)
    151#define SSS_AES_WRITE(dev, reg, val)    __raw_writel((val), \
    152						SSS_AES_REG(dev, reg))
    153
    154/* HW engine modes */
    155#define FLAGS_AES_DECRYPT		BIT(0)
    156#define FLAGS_AES_MODE_MASK		_SBF(1, 0x03)
    157#define FLAGS_AES_CBC			_SBF(1, 0x01)
    158#define FLAGS_AES_CTR			_SBF(1, 0x02)
    159
    160#define AES_KEY_LEN			16
    161#define CRYPTO_QUEUE_LEN		1
    162
    163/* HASH registers */
    164#define SSS_REG_HASH_CTRL		0x00
    165
    166#define SSS_HASH_USER_IV_EN		BIT(5)
    167#define SSS_HASH_INIT_BIT		BIT(4)
    168#define SSS_HASH_ENGINE_SHA1		_SBF(1, 0x00)
    169#define SSS_HASH_ENGINE_MD5		_SBF(1, 0x01)
    170#define SSS_HASH_ENGINE_SHA256		_SBF(1, 0x02)
    171
    172#define SSS_HASH_ENGINE_MASK		_SBF(1, 0x03)
    173
    174#define SSS_REG_HASH_CTRL_PAUSE		0x04
    175
    176#define SSS_HASH_PAUSE			BIT(0)
    177
    178#define SSS_REG_HASH_CTRL_FIFO		0x08
    179
    180#define SSS_HASH_FIFO_MODE_DMA		BIT(0)
    181#define SSS_HASH_FIFO_MODE_CPU          0
    182
    183#define SSS_REG_HASH_CTRL_SWAP		0x0C
    184
    185#define SSS_HASH_BYTESWAP_DI		BIT(3)
    186#define SSS_HASH_BYTESWAP_DO		BIT(2)
    187#define SSS_HASH_BYTESWAP_IV		BIT(1)
    188#define SSS_HASH_BYTESWAP_KEY		BIT(0)
    189
    190#define SSS_REG_HASH_STATUS		0x10
    191
    192#define SSS_HASH_STATUS_MSG_DONE	BIT(6)
    193#define SSS_HASH_STATUS_PARTIAL_DONE	BIT(4)
    194#define SSS_HASH_STATUS_BUFFER_READY	BIT(0)
    195
    196#define SSS_REG_HASH_MSG_SIZE_LOW	0x20
    197#define SSS_REG_HASH_MSG_SIZE_HIGH	0x24
    198
    199#define SSS_REG_HASH_PRE_MSG_SIZE_LOW	0x28
    200#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH	0x2C
    201
    202#define SSS_REG_HASH_IV(s)		(0xB0 + ((s) << 2))
    203#define SSS_REG_HASH_OUT(s)		(0x100 + ((s) << 2))
    204
    205#define HASH_BLOCK_SIZE			64
    206#define HASH_REG_SIZEOF			4
    207#define HASH_MD5_MAX_REG		(MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
    208#define HASH_SHA1_MAX_REG		(SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
    209#define HASH_SHA256_MAX_REG		(SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
    210
    211/*
    212 * HASH bit numbers, used by device, setting in dev->hash_flags with
    213 * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
    214 * to keep HASH state BUSY or FREE, or to signal state from irq_handler
    215 * to hash_tasklet. SGS keep track of allocated memory for scatterlist
    216 */
    217#define HASH_FLAGS_BUSY		0
    218#define HASH_FLAGS_FINAL	1
    219#define HASH_FLAGS_DMA_ACTIVE	2
    220#define HASH_FLAGS_OUTPUT_READY	3
    221#define HASH_FLAGS_DMA_READY	4
    222#define HASH_FLAGS_SGS_COPIED	5
    223#define HASH_FLAGS_SGS_ALLOCED	6
    224
    225/* HASH HW constants */
    226#define BUFLEN			HASH_BLOCK_SIZE
    227
    228#define SSS_HASH_DMA_LEN_ALIGN	8
    229#define SSS_HASH_DMA_ALIGN_MASK	(SSS_HASH_DMA_LEN_ALIGN - 1)
    230
    231#define SSS_HASH_QUEUE_LENGTH	10
    232
    233/**
    234 * struct samsung_aes_variant - platform specific SSS driver data
    235 * @aes_offset: AES register offset from SSS module's base.
    236 * @hash_offset: HASH register offset from SSS module's base.
    237 * @clk_names: names of clocks needed to run SSS IP
    238 *
    239 * Specifies platform specific configuration of SSS module.
    240 * Note: A structure for driver specific platform data is used for future
    241 * expansion of its usage.
    242 */
    243struct samsung_aes_variant {
    244	unsigned int			aes_offset;
    245	unsigned int			hash_offset;
    246	const char			*clk_names[2];
    247};
    248
    249struct s5p_aes_reqctx {
    250	unsigned long			mode;
    251};
    252
    253struct s5p_aes_ctx {
    254	struct s5p_aes_dev		*dev;
    255
    256	u8				aes_key[AES_MAX_KEY_SIZE];
    257	u8				nonce[CTR_RFC3686_NONCE_SIZE];
    258	int				keylen;
    259};
    260
    261/**
    262 * struct s5p_aes_dev - Crypto device state container
    263 * @dev:	Associated device
    264 * @clk:	Clock for accessing hardware
    265 * @pclk:	APB bus clock necessary to access the hardware
    266 * @ioaddr:	Mapped IO memory region
    267 * @aes_ioaddr:	Per-varian offset for AES block IO memory
    268 * @irq_fc:	Feed control interrupt line
    269 * @req:	Crypto request currently handled by the device
    270 * @ctx:	Configuration for currently handled crypto request
    271 * @sg_src:	Scatter list with source data for currently handled block
    272 *		in device.  This is DMA-mapped into device.
    273 * @sg_dst:	Scatter list with destination data for currently handled block
    274 *		in device. This is DMA-mapped into device.
    275 * @sg_src_cpy:	In case of unaligned access, copied scatter list
    276 *		with source data.
    277 * @sg_dst_cpy:	In case of unaligned access, copied scatter list
    278 *		with destination data.
    279 * @tasklet:	New request scheduling jib
    280 * @queue:	Crypto queue
    281 * @busy:	Indicates whether the device is currently handling some request
    282 *		thus it uses some of the fields from this state, like:
    283 *		req, ctx, sg_src/dst (and copies).  This essentially
    284 *		protects against concurrent access to these fields.
    285 * @lock:	Lock for protecting both access to device hardware registers
    286 *		and fields related to current request (including the busy field).
    287 * @res:	Resources for hash.
    288 * @io_hash_base: Per-variant offset for HASH block IO memory.
    289 * @hash_lock:	Lock for protecting hash_req, hash_queue and hash_flags
    290 *		variable.
    291 * @hash_flags:	Flags for current HASH op.
    292 * @hash_queue:	Async hash queue.
    293 * @hash_tasklet: New HASH request scheduling job.
    294 * @xmit_buf:	Buffer for current HASH request transfer into SSS block.
    295 * @hash_req:	Current request sending to SSS HASH block.
    296 * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
    297 * @hash_sg_cnt: Counter for hash_sg_iter.
    298 *
    299 * @use_hash:	true if HASH algs enabled
    300 */
    301struct s5p_aes_dev {
    302	struct device			*dev;
    303	struct clk			*clk;
    304	struct clk			*pclk;
    305	void __iomem			*ioaddr;
    306	void __iomem			*aes_ioaddr;
    307	int				irq_fc;
    308
    309	struct skcipher_request		*req;
    310	struct s5p_aes_ctx		*ctx;
    311	struct scatterlist		*sg_src;
    312	struct scatterlist		*sg_dst;
    313
    314	struct scatterlist		*sg_src_cpy;
    315	struct scatterlist		*sg_dst_cpy;
    316
    317	struct tasklet_struct		tasklet;
    318	struct crypto_queue		queue;
    319	bool				busy;
    320	spinlock_t			lock;
    321
    322	struct resource			*res;
    323	void __iomem			*io_hash_base;
    324
    325	spinlock_t			hash_lock; /* protect hash_ vars */
    326	unsigned long			hash_flags;
    327	struct crypto_queue		hash_queue;
    328	struct tasklet_struct		hash_tasklet;
    329
    330	u8				xmit_buf[BUFLEN];
    331	struct ahash_request		*hash_req;
    332	struct scatterlist		*hash_sg_iter;
    333	unsigned int			hash_sg_cnt;
    334
    335	bool				use_hash;
    336};
    337
    338/**
    339 * struct s5p_hash_reqctx - HASH request context
    340 * @dd:		Associated device
    341 * @op_update:	Current request operation (OP_UPDATE or OP_FINAL)
    342 * @digcnt:	Number of bytes processed by HW (without buffer[] ones)
    343 * @digest:	Digest message or IV for partial result
    344 * @nregs:	Number of HW registers for digest or IV read/write
    345 * @engine:	Bits for selecting type of HASH in SSS block
    346 * @sg:		sg for DMA transfer
    347 * @sg_len:	Length of sg for DMA transfer
    348 * @sgl:	sg for joining buffer and req->src scatterlist
    349 * @skip:	Skip offset in req->src for current op
    350 * @total:	Total number of bytes for current request
    351 * @finup:	Keep state for finup or final.
    352 * @error:	Keep track of error.
    353 * @bufcnt:	Number of bytes holded in buffer[]
    354 * @buffer:	For byte(s) from end of req->src in UPDATE op
    355 */
    356struct s5p_hash_reqctx {
    357	struct s5p_aes_dev	*dd;
    358	bool			op_update;
    359
    360	u64			digcnt;
    361	u8			digest[SHA256_DIGEST_SIZE];
    362
    363	unsigned int		nregs; /* digest_size / sizeof(reg) */
    364	u32			engine;
    365
    366	struct scatterlist	*sg;
    367	unsigned int		sg_len;
    368	struct scatterlist	sgl[2];
    369	unsigned int		skip;
    370	unsigned int		total;
    371	bool			finup;
    372	bool			error;
    373
    374	u32			bufcnt;
    375	u8			buffer[];
    376};
    377
    378/**
    379 * struct s5p_hash_ctx - HASH transformation context
    380 * @dd:		Associated device
    381 * @flags:	Bits for algorithm HASH.
    382 * @fallback:	Software transformation for zero message or size < BUFLEN.
    383 */
    384struct s5p_hash_ctx {
    385	struct s5p_aes_dev	*dd;
    386	unsigned long		flags;
    387	struct crypto_shash	*fallback;
    388};
    389
    390static const struct samsung_aes_variant s5p_aes_data = {
    391	.aes_offset	= 0x4000,
    392	.hash_offset	= 0x6000,
    393	.clk_names	= { "secss", },
    394};
    395
    396static const struct samsung_aes_variant exynos_aes_data = {
    397	.aes_offset	= 0x200,
    398	.hash_offset	= 0x400,
    399	.clk_names	= { "secss", },
    400};
    401
    402static const struct samsung_aes_variant exynos5433_slim_aes_data = {
    403	.aes_offset	= 0x400,
    404	.hash_offset	= 0x800,
    405	.clk_names	= { "aclk", "pclk", },
    406};
    407
    408static const struct of_device_id s5p_sss_dt_match[] = {
    409	{
    410		.compatible = "samsung,s5pv210-secss",
    411		.data = &s5p_aes_data,
    412	},
    413	{
    414		.compatible = "samsung,exynos4210-secss",
    415		.data = &exynos_aes_data,
    416	},
    417	{
    418		.compatible = "samsung,exynos5433-slim-sss",
    419		.data = &exynos5433_slim_aes_data,
    420	},
    421	{ },
    422};
    423MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
    424
    425static inline const struct samsung_aes_variant *find_s5p_sss_version
    426				   (const struct platform_device *pdev)
    427{
    428	if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
    429		return of_device_get_match_data(&pdev->dev);
    430
    431	return (const struct samsung_aes_variant *)
    432			platform_get_device_id(pdev)->driver_data;
    433}
    434
    435static struct s5p_aes_dev *s5p_dev;
    436
    437static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
    438			       const struct scatterlist *sg)
    439{
    440	SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
    441	SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
    442}
    443
    444static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
    445				const struct scatterlist *sg)
    446{
    447	SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
    448	SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
    449}
    450
    451static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
    452{
    453	int len;
    454
    455	if (!*sg)
    456		return;
    457
    458	len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
    459	free_pages((unsigned long)sg_virt(*sg), get_order(len));
    460
    461	kfree(*sg);
    462	*sg = NULL;
    463}
    464
    465static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
    466			    unsigned int nbytes, int out)
    467{
    468	struct scatter_walk walk;
    469
    470	if (!nbytes)
    471		return;
    472
    473	scatterwalk_start(&walk, sg);
    474	scatterwalk_copychunks(buf, &walk, nbytes, out);
    475	scatterwalk_done(&walk, out, 0);
    476}
    477
    478static void s5p_sg_done(struct s5p_aes_dev *dev)
    479{
    480	struct skcipher_request *req = dev->req;
    481	struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
    482
    483	if (dev->sg_dst_cpy) {
    484		dev_dbg(dev->dev,
    485			"Copying %d bytes of output data back to original place\n",
    486			dev->req->cryptlen);
    487		s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
    488				dev->req->cryptlen, 1);
    489	}
    490	s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
    491	s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
    492	if (reqctx->mode & FLAGS_AES_CBC)
    493		memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
    494
    495	else if (reqctx->mode & FLAGS_AES_CTR)
    496		memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
    497}
    498
    499/* Calls the completion. Cannot be called with dev->lock hold. */
    500static void s5p_aes_complete(struct skcipher_request *req, int err)
    501{
    502	req->base.complete(&req->base, err);
    503}
    504
    505static void s5p_unset_outdata(struct s5p_aes_dev *dev)
    506{
    507	dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
    508}
    509
    510static void s5p_unset_indata(struct s5p_aes_dev *dev)
    511{
    512	dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
    513}
    514
    515static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
    516			   struct scatterlist **dst)
    517{
    518	void *pages;
    519	int len;
    520
    521	*dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
    522	if (!*dst)
    523		return -ENOMEM;
    524
    525	len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
    526	pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
    527	if (!pages) {
    528		kfree(*dst);
    529		*dst = NULL;
    530		return -ENOMEM;
    531	}
    532
    533	s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
    534
    535	sg_init_table(*dst, 1);
    536	sg_set_buf(*dst, pages, len);
    537
    538	return 0;
    539}
    540
    541static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
    542{
    543	if (!sg->length)
    544		return -EINVAL;
    545
    546	if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
    547		return -ENOMEM;
    548
    549	dev->sg_dst = sg;
    550
    551	return 0;
    552}
    553
    554static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
    555{
    556	if (!sg->length)
    557		return -EINVAL;
    558
    559	if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
    560		return -ENOMEM;
    561
    562	dev->sg_src = sg;
    563
    564	return 0;
    565}
    566
    567/*
    568 * Returns -ERRNO on error (mapping of new data failed).
    569 * On success returns:
    570 *  - 0 if there is no more data,
    571 *  - 1 if new transmitting (output) data is ready and its address+length
    572 *     have to be written to device (by calling s5p_set_dma_outdata()).
    573 */
    574static int s5p_aes_tx(struct s5p_aes_dev *dev)
    575{
    576	int ret = 0;
    577
    578	s5p_unset_outdata(dev);
    579
    580	if (!sg_is_last(dev->sg_dst)) {
    581		ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
    582		if (!ret)
    583			ret = 1;
    584	}
    585
    586	return ret;
    587}
    588
    589/*
    590 * Returns -ERRNO on error (mapping of new data failed).
    591 * On success returns:
    592 *  - 0 if there is no more data,
    593 *  - 1 if new receiving (input) data is ready and its address+length
    594 *     have to be written to device (by calling s5p_set_dma_indata()).
    595 */
    596static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
    597{
    598	int ret = 0;
    599
    600	s5p_unset_indata(dev);
    601
    602	if (!sg_is_last(dev->sg_src)) {
    603		ret = s5p_set_indata(dev, sg_next(dev->sg_src));
    604		if (!ret)
    605			ret = 1;
    606	}
    607
    608	return ret;
    609}
    610
    611static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
    612{
    613	return __raw_readl(dd->io_hash_base + offset);
    614}
    615
    616static inline void s5p_hash_write(struct s5p_aes_dev *dd,
    617				  u32 offset, u32 value)
    618{
    619	__raw_writel(value, dd->io_hash_base + offset);
    620}
    621
    622/**
    623 * s5p_set_dma_hashdata() - start DMA with sg
    624 * @dev:	device
    625 * @sg:		scatterlist ready to DMA transmit
    626 */
    627static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
    628				 const struct scatterlist *sg)
    629{
    630	dev->hash_sg_cnt--;
    631	SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
    632	SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
    633}
    634
    635/**
    636 * s5p_hash_rx() - get next hash_sg_iter
    637 * @dev:	device
    638 *
    639 * Return:
    640 * 2	if there is no more data and it is UPDATE op
    641 * 1	if new receiving (input) data is ready and can be written to device
    642 * 0	if there is no more data and it is FINAL op
    643 */
    644static int s5p_hash_rx(struct s5p_aes_dev *dev)
    645{
    646	if (dev->hash_sg_cnt > 0) {
    647		dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
    648		return 1;
    649	}
    650
    651	set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
    652	if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
    653		return 0;
    654
    655	return 2;
    656}
    657
    658static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
    659{
    660	struct platform_device *pdev = dev_id;
    661	struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
    662	struct skcipher_request *req;
    663	int err_dma_tx = 0;
    664	int err_dma_rx = 0;
    665	int err_dma_hx = 0;
    666	bool tx_end = false;
    667	bool hx_end = false;
    668	unsigned long flags;
    669	u32 status, st_bits;
    670	int err;
    671
    672	spin_lock_irqsave(&dev->lock, flags);
    673
    674	/*
    675	 * Handle rx or tx interrupt. If there is still data (scatterlist did not
    676	 * reach end), then map next scatterlist entry.
    677	 * In case of such mapping error, s5p_aes_complete() should be called.
    678	 *
    679	 * If there is no more data in tx scatter list, call s5p_aes_complete()
    680	 * and schedule new tasklet.
    681	 *
    682	 * Handle hx interrupt. If there is still data map next entry.
    683	 */
    684	status = SSS_READ(dev, FCINTSTAT);
    685	if (status & SSS_FCINTSTAT_BRDMAINT)
    686		err_dma_rx = s5p_aes_rx(dev);
    687
    688	if (status & SSS_FCINTSTAT_BTDMAINT) {
    689		if (sg_is_last(dev->sg_dst))
    690			tx_end = true;
    691		err_dma_tx = s5p_aes_tx(dev);
    692	}
    693
    694	if (status & SSS_FCINTSTAT_HRDMAINT)
    695		err_dma_hx = s5p_hash_rx(dev);
    696
    697	st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
    698				SSS_FCINTSTAT_HRDMAINT);
    699	/* clear DMA bits */
    700	SSS_WRITE(dev, FCINTPEND, st_bits);
    701
    702	/* clear HASH irq bits */
    703	if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
    704		/* cannot have both HPART and HDONE */
    705		if (status & SSS_FCINTSTAT_HPARTINT)
    706			st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
    707
    708		if (status & SSS_FCINTSTAT_HDONEINT)
    709			st_bits = SSS_HASH_STATUS_MSG_DONE;
    710
    711		set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
    712		s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
    713		hx_end = true;
    714		/* when DONE or PART, do not handle HASH DMA */
    715		err_dma_hx = 0;
    716	}
    717
    718	if (err_dma_rx < 0) {
    719		err = err_dma_rx;
    720		goto error;
    721	}
    722	if (err_dma_tx < 0) {
    723		err = err_dma_tx;
    724		goto error;
    725	}
    726
    727	if (tx_end) {
    728		s5p_sg_done(dev);
    729		if (err_dma_hx == 1)
    730			s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
    731
    732		spin_unlock_irqrestore(&dev->lock, flags);
    733
    734		s5p_aes_complete(dev->req, 0);
    735		/* Device is still busy */
    736		tasklet_schedule(&dev->tasklet);
    737	} else {
    738		/*
    739		 * Writing length of DMA block (either receiving or
    740		 * transmitting) will start the operation immediately, so this
    741		 * should be done at the end (even after clearing pending
    742		 * interrupts to not miss the interrupt).
    743		 */
    744		if (err_dma_tx == 1)
    745			s5p_set_dma_outdata(dev, dev->sg_dst);
    746		if (err_dma_rx == 1)
    747			s5p_set_dma_indata(dev, dev->sg_src);
    748		if (err_dma_hx == 1)
    749			s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
    750
    751		spin_unlock_irqrestore(&dev->lock, flags);
    752	}
    753
    754	goto hash_irq_end;
    755
    756error:
    757	s5p_sg_done(dev);
    758	dev->busy = false;
    759	req = dev->req;
    760	if (err_dma_hx == 1)
    761		s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
    762
    763	spin_unlock_irqrestore(&dev->lock, flags);
    764	s5p_aes_complete(req, err);
    765
    766hash_irq_end:
    767	/*
    768	 * Note about else if:
    769	 *   when hash_sg_iter reaches end and its UPDATE op,
    770	 *   issue SSS_HASH_PAUSE and wait for HPART irq
    771	 */
    772	if (hx_end)
    773		tasklet_schedule(&dev->hash_tasklet);
    774	else if (err_dma_hx == 2)
    775		s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
    776			       SSS_HASH_PAUSE);
    777
    778	return IRQ_HANDLED;
    779}
    780
    781/**
    782 * s5p_hash_read_msg() - read message or IV from HW
    783 * @req:	AHASH request
    784 */
    785static void s5p_hash_read_msg(struct ahash_request *req)
    786{
    787	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
    788	struct s5p_aes_dev *dd = ctx->dd;
    789	u32 *hash = (u32 *)ctx->digest;
    790	unsigned int i;
    791
    792	for (i = 0; i < ctx->nregs; i++)
    793		hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
    794}
    795
    796/**
    797 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
    798 * @dd:		device
    799 * @ctx:	request context
    800 */
    801static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
    802				  const struct s5p_hash_reqctx *ctx)
    803{
    804	const u32 *hash = (const u32 *)ctx->digest;
    805	unsigned int i;
    806
    807	for (i = 0; i < ctx->nregs; i++)
    808		s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
    809}
    810
    811/**
    812 * s5p_hash_write_iv() - write IV for next partial/finup op.
    813 * @req:	AHASH request
    814 */
    815static void s5p_hash_write_iv(struct ahash_request *req)
    816{
    817	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
    818
    819	s5p_hash_write_ctx_iv(ctx->dd, ctx);
    820}
    821
    822/**
    823 * s5p_hash_copy_result() - copy digest into req->result
    824 * @req:	AHASH request
    825 */
    826static void s5p_hash_copy_result(struct ahash_request *req)
    827{
    828	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
    829
    830	if (!req->result)
    831		return;
    832
    833	memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
    834}
    835
    836/**
    837 * s5p_hash_dma_flush() - flush HASH DMA
    838 * @dev:	secss device
    839 */
    840static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
    841{
    842	SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
    843}
    844
    845/**
    846 * s5p_hash_dma_enable() - enable DMA mode for HASH
    847 * @dev:	secss device
    848 *
    849 * enable DMA mode for HASH
    850 */
    851static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
    852{
    853	s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
    854}
    855
    856/**
    857 * s5p_hash_irq_disable() - disable irq HASH signals
    858 * @dev:	secss device
    859 * @flags:	bitfield with irq's to be disabled
    860 */
    861static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
    862{
    863	SSS_WRITE(dev, FCINTENCLR, flags);
    864}
    865
    866/**
    867 * s5p_hash_irq_enable() - enable irq signals
    868 * @dev:	secss device
    869 * @flags:	bitfield with irq's to be enabled
    870 */
    871static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
    872{
    873	SSS_WRITE(dev, FCINTENSET, flags);
    874}
    875
    876/**
    877 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
    878 * @dev:	secss device
    879 * @hashflow:	HASH stream flow with/without crypto AES/DES
    880 */
    881static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
    882{
    883	unsigned long flags;
    884	u32 flow;
    885
    886	spin_lock_irqsave(&dev->lock, flags);
    887
    888	flow = SSS_READ(dev, FCFIFOCTRL);
    889	flow &= ~SSS_HASHIN_MASK;
    890	flow |= hashflow;
    891	SSS_WRITE(dev, FCFIFOCTRL, flow);
    892
    893	spin_unlock_irqrestore(&dev->lock, flags);
    894}
    895
    896/**
    897 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
    898 * @dev:	secss device
    899 * @hashflow:	HASH stream flow with/without AES/DES
    900 *
    901 * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
    902 * enable HASH irq's HRDMA, HDONE, HPART
    903 */
    904static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
    905{
    906	s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
    907			     SSS_FCINTENCLR_HDONEINTENCLR |
    908			     SSS_FCINTENCLR_HPARTINTENCLR);
    909	s5p_hash_dma_flush(dev);
    910
    911	s5p_hash_dma_enable(dev);
    912	s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
    913	s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
    914			    SSS_FCINTENSET_HDONEINTENSET |
    915			    SSS_FCINTENSET_HPARTINTENSET);
    916}
    917
    918/**
    919 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
    920 * @dd:		secss device
    921 * @length:	length for request
    922 * @final:	true if final op
    923 *
    924 * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
    925 * after previous updates, fill up IV words. For final, calculate and set
    926 * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
    927 * length as 2^63 so it will be never reached and set to zero prelow and
    928 * prehigh.
    929 *
    930 * This function does not start DMA transfer.
    931 */
    932static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
    933				bool final)
    934{
    935	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
    936	u32 prelow, prehigh, low, high;
    937	u32 configflags, swapflags;
    938	u64 tmplen;
    939
    940	configflags = ctx->engine | SSS_HASH_INIT_BIT;
    941
    942	if (likely(ctx->digcnt)) {
    943		s5p_hash_write_ctx_iv(dd, ctx);
    944		configflags |= SSS_HASH_USER_IV_EN;
    945	}
    946
    947	if (final) {
    948		/* number of bytes for last part */
    949		low = length;
    950		high = 0;
    951		/* total number of bits prev hashed */
    952		tmplen = ctx->digcnt * 8;
    953		prelow = (u32)tmplen;
    954		prehigh = (u32)(tmplen >> 32);
    955	} else {
    956		prelow = 0;
    957		prehigh = 0;
    958		low = 0;
    959		high = BIT(31);
    960	}
    961
    962	swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
    963		    SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
    964
    965	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
    966	s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
    967	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
    968	s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
    969
    970	s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
    971	s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
    972}
    973
    974/**
    975 * s5p_hash_xmit_dma() - start DMA hash processing
    976 * @dd:		secss device
    977 * @length:	length for request
    978 * @final:	true if final op
    979 *
    980 * Update digcnt here, as it is needed for finup/final op.
    981 */
    982static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
    983			     bool final)
    984{
    985	struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
    986	unsigned int cnt;
    987
    988	cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
    989	if (!cnt) {
    990		dev_err(dd->dev, "dma_map_sg error\n");
    991		ctx->error = true;
    992		return -EINVAL;
    993	}
    994
    995	set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
    996	dd->hash_sg_iter = ctx->sg;
    997	dd->hash_sg_cnt = cnt;
    998	s5p_hash_write_ctrl(dd, length, final);
    999	ctx->digcnt += length;
   1000	ctx->total -= length;
   1001
   1002	/* catch last interrupt */
   1003	if (final)
   1004		set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
   1005
   1006	s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
   1007
   1008	return -EINPROGRESS;
   1009}
   1010
   1011/**
   1012 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
   1013 * @ctx:	request context
   1014 * @sg:		source scatterlist request
   1015 * @new_len:	number of bytes to process from sg
   1016 *
   1017 * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
   1018 * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
   1019 * with allocated buffer.
   1020 *
   1021 * Set bit in dd->hash_flag so we can free it after irq ends processing.
   1022 */
   1023static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
   1024			     struct scatterlist *sg, unsigned int new_len)
   1025{
   1026	unsigned int pages, len;
   1027	void *buf;
   1028
   1029	len = new_len + ctx->bufcnt;
   1030	pages = get_order(len);
   1031
   1032	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
   1033	if (!buf) {
   1034		dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
   1035		ctx->error = true;
   1036		return -ENOMEM;
   1037	}
   1038
   1039	if (ctx->bufcnt)
   1040		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
   1041
   1042	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
   1043				 new_len, 0);
   1044	sg_init_table(ctx->sgl, 1);
   1045	sg_set_buf(ctx->sgl, buf, len);
   1046	ctx->sg = ctx->sgl;
   1047	ctx->sg_len = 1;
   1048	ctx->bufcnt = 0;
   1049	ctx->skip = 0;
   1050	set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
   1051
   1052	return 0;
   1053}
   1054
   1055/**
   1056 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
   1057 * @ctx:	request context
   1058 * @sg:		source scatterlist request
   1059 * @new_len:	number of bytes to process from sg
   1060 *
   1061 * Allocate new scatterlist table, copy data for HASH into it. If there was
   1062 * xmit_buf filled, prepare it first, then copy page, length and offset from
   1063 * source sg into it, adjusting begin and/or end for skip offset and
   1064 * hash_later value.
   1065 *
   1066 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
   1067 * it after irq ends processing.
   1068 */
   1069static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
   1070				  struct scatterlist *sg, unsigned int new_len)
   1071{
   1072	unsigned int skip = ctx->skip, n = sg_nents(sg);
   1073	struct scatterlist *tmp;
   1074	unsigned int len;
   1075
   1076	if (ctx->bufcnt)
   1077		n++;
   1078
   1079	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
   1080	if (!ctx->sg) {
   1081		ctx->error = true;
   1082		return -ENOMEM;
   1083	}
   1084
   1085	sg_init_table(ctx->sg, n);
   1086
   1087	tmp = ctx->sg;
   1088
   1089	ctx->sg_len = 0;
   1090
   1091	if (ctx->bufcnt) {
   1092		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
   1093		tmp = sg_next(tmp);
   1094		ctx->sg_len++;
   1095	}
   1096
   1097	while (sg && skip >= sg->length) {
   1098		skip -= sg->length;
   1099		sg = sg_next(sg);
   1100	}
   1101
   1102	while (sg && new_len) {
   1103		len = sg->length - skip;
   1104		if (new_len < len)
   1105			len = new_len;
   1106
   1107		new_len -= len;
   1108		sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
   1109		skip = 0;
   1110		if (new_len <= 0)
   1111			sg_mark_end(tmp);
   1112
   1113		tmp = sg_next(tmp);
   1114		ctx->sg_len++;
   1115		sg = sg_next(sg);
   1116	}
   1117
   1118	set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
   1119
   1120	return 0;
   1121}
   1122
   1123/**
   1124 * s5p_hash_prepare_sgs() - prepare sg for processing
   1125 * @ctx:	request context
   1126 * @sg:		source scatterlist request
   1127 * @new_len:	number of bytes to process from sg
   1128 * @final:	final flag
   1129 *
   1130 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
   1131 * sg table have good aligned elements (list_ok). If one of this checks fails,
   1132 * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
   1133 * data into this buffer and prepare request in sgl, or (2) allocates new sg
   1134 * table and prepare sg elements.
   1135 *
   1136 * For digest or finup all conditions can be good, and we may not need any
   1137 * fixes.
   1138 */
   1139static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
   1140				struct scatterlist *sg,
   1141				unsigned int new_len, bool final)
   1142{
   1143	unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
   1144	bool aligned = true, list_ok = true;
   1145	struct scatterlist *sg_tmp = sg;
   1146
   1147	if (!sg || !sg->length || !new_len)
   1148		return 0;
   1149
   1150	if (skip || !final)
   1151		list_ok = false;
   1152
   1153	while (nbytes > 0 && sg_tmp) {
   1154		n++;
   1155		if (skip >= sg_tmp->length) {
   1156			skip -= sg_tmp->length;
   1157			if (!sg_tmp->length) {
   1158				aligned = false;
   1159				break;
   1160			}
   1161		} else {
   1162			if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
   1163				aligned = false;
   1164				break;
   1165			}
   1166
   1167			if (nbytes < sg_tmp->length - skip) {
   1168				list_ok = false;
   1169				break;
   1170			}
   1171
   1172			nbytes -= sg_tmp->length - skip;
   1173			skip = 0;
   1174		}
   1175
   1176		sg_tmp = sg_next(sg_tmp);
   1177	}
   1178
   1179	if (!aligned)
   1180		return s5p_hash_copy_sgs(ctx, sg, new_len);
   1181	else if (!list_ok)
   1182		return s5p_hash_copy_sg_lists(ctx, sg, new_len);
   1183
   1184	/*
   1185	 * Have aligned data from previous operation and/or current
   1186	 * Note: will enter here only if (digest or finup) and aligned
   1187	 */
   1188	if (ctx->bufcnt) {
   1189		ctx->sg_len = n;
   1190		sg_init_table(ctx->sgl, 2);
   1191		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
   1192		sg_chain(ctx->sgl, 2, sg);
   1193		ctx->sg = ctx->sgl;
   1194		ctx->sg_len++;
   1195	} else {
   1196		ctx->sg = sg;
   1197		ctx->sg_len = n;
   1198	}
   1199
   1200	return 0;
   1201}
   1202
   1203/**
   1204 * s5p_hash_prepare_request() - prepare request for processing
   1205 * @req:	AHASH request
   1206 * @update:	true if UPDATE op
   1207 *
   1208 * Note 1: we can have update flag _and_ final flag at the same time.
   1209 * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
   1210 *	   either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
   1211 *	   we have final op
   1212 */
   1213static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
   1214{
   1215	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1216	bool final = ctx->finup;
   1217	int xmit_len, hash_later, nbytes;
   1218	int ret;
   1219
   1220	if (update)
   1221		nbytes = req->nbytes;
   1222	else
   1223		nbytes = 0;
   1224
   1225	ctx->total = nbytes + ctx->bufcnt;
   1226	if (!ctx->total)
   1227		return 0;
   1228
   1229	if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
   1230		/* bytes left from previous request, so fill up to BUFLEN */
   1231		int len = BUFLEN - ctx->bufcnt % BUFLEN;
   1232
   1233		if (len > nbytes)
   1234			len = nbytes;
   1235
   1236		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
   1237					 0, len, 0);
   1238		ctx->bufcnt += len;
   1239		nbytes -= len;
   1240		ctx->skip = len;
   1241	} else {
   1242		ctx->skip = 0;
   1243	}
   1244
   1245	if (ctx->bufcnt)
   1246		memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
   1247
   1248	xmit_len = ctx->total;
   1249	if (final) {
   1250		hash_later = 0;
   1251	} else {
   1252		if (IS_ALIGNED(xmit_len, BUFLEN))
   1253			xmit_len -= BUFLEN;
   1254		else
   1255			xmit_len -= xmit_len & (BUFLEN - 1);
   1256
   1257		hash_later = ctx->total - xmit_len;
   1258		/* copy hash_later bytes from end of req->src */
   1259		/* previous bytes are in xmit_buf, so no overwrite */
   1260		scatterwalk_map_and_copy(ctx->buffer, req->src,
   1261					 req->nbytes - hash_later,
   1262					 hash_later, 0);
   1263	}
   1264
   1265	if (xmit_len > BUFLEN) {
   1266		ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
   1267					   final);
   1268		if (ret)
   1269			return ret;
   1270	} else {
   1271		/* have buffered data only */
   1272		if (unlikely(!ctx->bufcnt)) {
   1273			/* first update didn't fill up buffer */
   1274			scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
   1275						 0, xmit_len, 0);
   1276		}
   1277
   1278		sg_init_table(ctx->sgl, 1);
   1279		sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
   1280
   1281		ctx->sg = ctx->sgl;
   1282		ctx->sg_len = 1;
   1283	}
   1284
   1285	ctx->bufcnt = hash_later;
   1286	if (!final)
   1287		ctx->total = xmit_len;
   1288
   1289	return 0;
   1290}
   1291
   1292/**
   1293 * s5p_hash_update_dma_stop() - unmap DMA
   1294 * @dd:		secss device
   1295 *
   1296 * Unmap scatterlist ctx->sg.
   1297 */
   1298static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
   1299{
   1300	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
   1301
   1302	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
   1303	clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
   1304}
   1305
   1306/**
   1307 * s5p_hash_finish() - copy calculated digest to crypto layer
   1308 * @req:	AHASH request
   1309 */
   1310static void s5p_hash_finish(struct ahash_request *req)
   1311{
   1312	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1313	struct s5p_aes_dev *dd = ctx->dd;
   1314
   1315	if (ctx->digcnt)
   1316		s5p_hash_copy_result(req);
   1317
   1318	dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
   1319}
   1320
   1321/**
   1322 * s5p_hash_finish_req() - finish request
   1323 * @req:	AHASH request
   1324 * @err:	error
   1325 */
   1326static void s5p_hash_finish_req(struct ahash_request *req, int err)
   1327{
   1328	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1329	struct s5p_aes_dev *dd = ctx->dd;
   1330	unsigned long flags;
   1331
   1332	if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
   1333		free_pages((unsigned long)sg_virt(ctx->sg),
   1334			   get_order(ctx->sg->length));
   1335
   1336	if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
   1337		kfree(ctx->sg);
   1338
   1339	ctx->sg = NULL;
   1340	dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
   1341			    BIT(HASH_FLAGS_SGS_COPIED));
   1342
   1343	if (!err && !ctx->error) {
   1344		s5p_hash_read_msg(req);
   1345		if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
   1346			s5p_hash_finish(req);
   1347	} else {
   1348		ctx->error = true;
   1349	}
   1350
   1351	spin_lock_irqsave(&dd->hash_lock, flags);
   1352	dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
   1353			    BIT(HASH_FLAGS_DMA_READY) |
   1354			    BIT(HASH_FLAGS_OUTPUT_READY));
   1355	spin_unlock_irqrestore(&dd->hash_lock, flags);
   1356
   1357	if (req->base.complete)
   1358		req->base.complete(&req->base, err);
   1359}
   1360
   1361/**
   1362 * s5p_hash_handle_queue() - handle hash queue
   1363 * @dd:		device s5p_aes_dev
   1364 * @req:	AHASH request
   1365 *
   1366 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
   1367 * device then processes the first request from the dd->queue
   1368 *
   1369 * Returns: see s5p_hash_final below.
   1370 */
   1371static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
   1372				 struct ahash_request *req)
   1373{
   1374	struct crypto_async_request *async_req, *backlog;
   1375	struct s5p_hash_reqctx *ctx;
   1376	unsigned long flags;
   1377	int err = 0, ret = 0;
   1378
   1379retry:
   1380	spin_lock_irqsave(&dd->hash_lock, flags);
   1381	if (req)
   1382		ret = ahash_enqueue_request(&dd->hash_queue, req);
   1383
   1384	if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
   1385		spin_unlock_irqrestore(&dd->hash_lock, flags);
   1386		return ret;
   1387	}
   1388
   1389	backlog = crypto_get_backlog(&dd->hash_queue);
   1390	async_req = crypto_dequeue_request(&dd->hash_queue);
   1391	if (async_req)
   1392		set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
   1393
   1394	spin_unlock_irqrestore(&dd->hash_lock, flags);
   1395
   1396	if (!async_req)
   1397		return ret;
   1398
   1399	if (backlog)
   1400		backlog->complete(backlog, -EINPROGRESS);
   1401
   1402	req = ahash_request_cast(async_req);
   1403	dd->hash_req = req;
   1404	ctx = ahash_request_ctx(req);
   1405
   1406	err = s5p_hash_prepare_request(req, ctx->op_update);
   1407	if (err || !ctx->total)
   1408		goto out;
   1409
   1410	dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
   1411		ctx->op_update, req->nbytes);
   1412
   1413	s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
   1414	if (ctx->digcnt)
   1415		s5p_hash_write_iv(req); /* restore hash IV */
   1416
   1417	if (ctx->op_update) { /* HASH_OP_UPDATE */
   1418		err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
   1419		if (err != -EINPROGRESS && ctx->finup && !ctx->error)
   1420			/* no final() after finup() */
   1421			err = s5p_hash_xmit_dma(dd, ctx->total, true);
   1422	} else { /* HASH_OP_FINAL */
   1423		err = s5p_hash_xmit_dma(dd, ctx->total, true);
   1424	}
   1425out:
   1426	if (err != -EINPROGRESS) {
   1427		/* hash_tasklet_cb will not finish it, so do it here */
   1428		s5p_hash_finish_req(req, err);
   1429		req = NULL;
   1430
   1431		/*
   1432		 * Execute next request immediately if there is anything
   1433		 * in queue.
   1434		 */
   1435		goto retry;
   1436	}
   1437
   1438	return ret;
   1439}
   1440
   1441/**
   1442 * s5p_hash_tasklet_cb() - hash tasklet
   1443 * @data:	ptr to s5p_aes_dev
   1444 */
   1445static void s5p_hash_tasklet_cb(unsigned long data)
   1446{
   1447	struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
   1448
   1449	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
   1450		s5p_hash_handle_queue(dd, NULL);
   1451		return;
   1452	}
   1453
   1454	if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
   1455		if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
   1456				       &dd->hash_flags)) {
   1457			s5p_hash_update_dma_stop(dd);
   1458		}
   1459
   1460		if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
   1461				       &dd->hash_flags)) {
   1462			/* hash or semi-hash ready */
   1463			clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
   1464			goto finish;
   1465		}
   1466	}
   1467
   1468	return;
   1469
   1470finish:
   1471	/* finish curent request */
   1472	s5p_hash_finish_req(dd->hash_req, 0);
   1473
   1474	/* If we are not busy, process next req */
   1475	if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
   1476		s5p_hash_handle_queue(dd, NULL);
   1477}
   1478
   1479/**
   1480 * s5p_hash_enqueue() - enqueue request
   1481 * @req:	AHASH request
   1482 * @op:		operation UPDATE (true) or FINAL (false)
   1483 *
   1484 * Returns: see s5p_hash_final below.
   1485 */
   1486static int s5p_hash_enqueue(struct ahash_request *req, bool op)
   1487{
   1488	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1489	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
   1490
   1491	ctx->op_update = op;
   1492
   1493	return s5p_hash_handle_queue(tctx->dd, req);
   1494}
   1495
   1496/**
   1497 * s5p_hash_update() - process the hash input data
   1498 * @req:	AHASH request
   1499 *
   1500 * If request will fit in buffer, copy it and return immediately
   1501 * else enqueue it with OP_UPDATE.
   1502 *
   1503 * Returns: see s5p_hash_final below.
   1504 */
   1505static int s5p_hash_update(struct ahash_request *req)
   1506{
   1507	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1508
   1509	if (!req->nbytes)
   1510		return 0;
   1511
   1512	if (ctx->bufcnt + req->nbytes <= BUFLEN) {
   1513		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
   1514					 0, req->nbytes, 0);
   1515		ctx->bufcnt += req->nbytes;
   1516		return 0;
   1517	}
   1518
   1519	return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
   1520}
   1521
   1522/**
   1523 * s5p_hash_final() - close up hash and calculate digest
   1524 * @req:	AHASH request
   1525 *
   1526 * Note: in final req->src do not have any data, and req->nbytes can be
   1527 * non-zero.
   1528 *
   1529 * If there were no input data processed yet and the buffered hash data is
   1530 * less than BUFLEN (64) then calculate the final hash immediately by using
   1531 * SW algorithm fallback.
   1532 *
   1533 * Otherwise enqueues the current AHASH request with OP_FINAL operation op
   1534 * and finalize hash message in HW. Note that if digcnt!=0 then there were
   1535 * previous update op, so there are always some buffered bytes in ctx->buffer,
   1536 * which means that ctx->bufcnt!=0
   1537 *
   1538 * Returns:
   1539 * 0 if the request has been processed immediately,
   1540 * -EINPROGRESS if the operation has been queued for later execution or is set
   1541 *		to processing by HW,
   1542 * -EBUSY if queue is full and request should be resubmitted later,
   1543 * other negative values denotes an error.
   1544 */
   1545static int s5p_hash_final(struct ahash_request *req)
   1546{
   1547	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1548
   1549	ctx->finup = true;
   1550	if (ctx->error)
   1551		return -EINVAL; /* uncompleted hash is not needed */
   1552
   1553	if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
   1554		struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
   1555
   1556		return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
   1557					       ctx->bufcnt, req->result);
   1558	}
   1559
   1560	return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
   1561}
   1562
   1563/**
   1564 * s5p_hash_finup() - process last req->src and calculate digest
   1565 * @req:	AHASH request containing the last update data
   1566 *
   1567 * Return values: see s5p_hash_final above.
   1568 */
   1569static int s5p_hash_finup(struct ahash_request *req)
   1570{
   1571	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1572	int err1, err2;
   1573
   1574	ctx->finup = true;
   1575
   1576	err1 = s5p_hash_update(req);
   1577	if (err1 == -EINPROGRESS || err1 == -EBUSY)
   1578		return err1;
   1579
   1580	/*
   1581	 * final() has to be always called to cleanup resources even if
   1582	 * update() failed, except EINPROGRESS or calculate digest for small
   1583	 * size
   1584	 */
   1585	err2 = s5p_hash_final(req);
   1586
   1587	return err1 ?: err2;
   1588}
   1589
   1590/**
   1591 * s5p_hash_init() - initialize AHASH request contex
   1592 * @req:	AHASH request
   1593 *
   1594 * Init async hash request context.
   1595 */
   1596static int s5p_hash_init(struct ahash_request *req)
   1597{
   1598	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1599	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1600	struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
   1601
   1602	ctx->dd = tctx->dd;
   1603	ctx->error = false;
   1604	ctx->finup = false;
   1605	ctx->bufcnt = 0;
   1606	ctx->digcnt = 0;
   1607	ctx->total = 0;
   1608	ctx->skip = 0;
   1609
   1610	dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
   1611		crypto_ahash_digestsize(tfm));
   1612
   1613	switch (crypto_ahash_digestsize(tfm)) {
   1614	case MD5_DIGEST_SIZE:
   1615		ctx->engine = SSS_HASH_ENGINE_MD5;
   1616		ctx->nregs = HASH_MD5_MAX_REG;
   1617		break;
   1618	case SHA1_DIGEST_SIZE:
   1619		ctx->engine = SSS_HASH_ENGINE_SHA1;
   1620		ctx->nregs = HASH_SHA1_MAX_REG;
   1621		break;
   1622	case SHA256_DIGEST_SIZE:
   1623		ctx->engine = SSS_HASH_ENGINE_SHA256;
   1624		ctx->nregs = HASH_SHA256_MAX_REG;
   1625		break;
   1626	default:
   1627		ctx->error = true;
   1628		return -EINVAL;
   1629	}
   1630
   1631	return 0;
   1632}
   1633
   1634/**
   1635 * s5p_hash_digest - calculate digest from req->src
   1636 * @req:	AHASH request
   1637 *
   1638 * Return values: see s5p_hash_final above.
   1639 */
   1640static int s5p_hash_digest(struct ahash_request *req)
   1641{
   1642	return s5p_hash_init(req) ?: s5p_hash_finup(req);
   1643}
   1644
   1645/**
   1646 * s5p_hash_cra_init_alg - init crypto alg transformation
   1647 * @tfm:	crypto transformation
   1648 */
   1649static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
   1650{
   1651	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
   1652	const char *alg_name = crypto_tfm_alg_name(tfm);
   1653
   1654	tctx->dd = s5p_dev;
   1655	/* Allocate a fallback and abort if it failed. */
   1656	tctx->fallback = crypto_alloc_shash(alg_name, 0,
   1657					    CRYPTO_ALG_NEED_FALLBACK);
   1658	if (IS_ERR(tctx->fallback)) {
   1659		pr_err("fallback alloc fails for '%s'\n", alg_name);
   1660		return PTR_ERR(tctx->fallback);
   1661	}
   1662
   1663	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
   1664				 sizeof(struct s5p_hash_reqctx) + BUFLEN);
   1665
   1666	return 0;
   1667}
   1668
   1669/**
   1670 * s5p_hash_cra_init - init crypto tfm
   1671 * @tfm:	crypto transformation
   1672 */
   1673static int s5p_hash_cra_init(struct crypto_tfm *tfm)
   1674{
   1675	return s5p_hash_cra_init_alg(tfm);
   1676}
   1677
   1678/**
   1679 * s5p_hash_cra_exit - exit crypto tfm
   1680 * @tfm:	crypto transformation
   1681 *
   1682 * free allocated fallback
   1683 */
   1684static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
   1685{
   1686	struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
   1687
   1688	crypto_free_shash(tctx->fallback);
   1689	tctx->fallback = NULL;
   1690}
   1691
   1692/**
   1693 * s5p_hash_export - export hash state
   1694 * @req:	AHASH request
   1695 * @out:	buffer for exported state
   1696 */
   1697static int s5p_hash_export(struct ahash_request *req, void *out)
   1698{
   1699	const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1700
   1701	memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
   1702
   1703	return 0;
   1704}
   1705
   1706/**
   1707 * s5p_hash_import - import hash state
   1708 * @req:	AHASH request
   1709 * @in:		buffer with state to be imported from
   1710 */
   1711static int s5p_hash_import(struct ahash_request *req, const void *in)
   1712{
   1713	struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
   1714	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1715	struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
   1716	const struct s5p_hash_reqctx *ctx_in = in;
   1717
   1718	memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
   1719	if (ctx_in->bufcnt > BUFLEN) {
   1720		ctx->error = true;
   1721		return -EINVAL;
   1722	}
   1723
   1724	ctx->dd = tctx->dd;
   1725	ctx->error = false;
   1726
   1727	return 0;
   1728}
   1729
   1730static struct ahash_alg algs_sha1_md5_sha256[] = {
   1731{
   1732	.init		= s5p_hash_init,
   1733	.update		= s5p_hash_update,
   1734	.final		= s5p_hash_final,
   1735	.finup		= s5p_hash_finup,
   1736	.digest		= s5p_hash_digest,
   1737	.export		= s5p_hash_export,
   1738	.import		= s5p_hash_import,
   1739	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
   1740	.halg.digestsize	= SHA1_DIGEST_SIZE,
   1741	.halg.base	= {
   1742		.cra_name		= "sha1",
   1743		.cra_driver_name	= "exynos-sha1",
   1744		.cra_priority		= 100,
   1745		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
   1746					  CRYPTO_ALG_ASYNC |
   1747					  CRYPTO_ALG_NEED_FALLBACK,
   1748		.cra_blocksize		= HASH_BLOCK_SIZE,
   1749		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
   1750		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
   1751		.cra_module		= THIS_MODULE,
   1752		.cra_init		= s5p_hash_cra_init,
   1753		.cra_exit		= s5p_hash_cra_exit,
   1754	}
   1755},
   1756{
   1757	.init		= s5p_hash_init,
   1758	.update		= s5p_hash_update,
   1759	.final		= s5p_hash_final,
   1760	.finup		= s5p_hash_finup,
   1761	.digest		= s5p_hash_digest,
   1762	.export		= s5p_hash_export,
   1763	.import		= s5p_hash_import,
   1764	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
   1765	.halg.digestsize	= MD5_DIGEST_SIZE,
   1766	.halg.base	= {
   1767		.cra_name		= "md5",
   1768		.cra_driver_name	= "exynos-md5",
   1769		.cra_priority		= 100,
   1770		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
   1771					  CRYPTO_ALG_ASYNC |
   1772					  CRYPTO_ALG_NEED_FALLBACK,
   1773		.cra_blocksize		= HASH_BLOCK_SIZE,
   1774		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
   1775		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
   1776		.cra_module		= THIS_MODULE,
   1777		.cra_init		= s5p_hash_cra_init,
   1778		.cra_exit		= s5p_hash_cra_exit,
   1779	}
   1780},
   1781{
   1782	.init		= s5p_hash_init,
   1783	.update		= s5p_hash_update,
   1784	.final		= s5p_hash_final,
   1785	.finup		= s5p_hash_finup,
   1786	.digest		= s5p_hash_digest,
   1787	.export		= s5p_hash_export,
   1788	.import		= s5p_hash_import,
   1789	.halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
   1790	.halg.digestsize	= SHA256_DIGEST_SIZE,
   1791	.halg.base	= {
   1792		.cra_name		= "sha256",
   1793		.cra_driver_name	= "exynos-sha256",
   1794		.cra_priority		= 100,
   1795		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
   1796					  CRYPTO_ALG_ASYNC |
   1797					  CRYPTO_ALG_NEED_FALLBACK,
   1798		.cra_blocksize		= HASH_BLOCK_SIZE,
   1799		.cra_ctxsize		= sizeof(struct s5p_hash_ctx),
   1800		.cra_alignmask		= SSS_HASH_DMA_ALIGN_MASK,
   1801		.cra_module		= THIS_MODULE,
   1802		.cra_init		= s5p_hash_cra_init,
   1803		.cra_exit		= s5p_hash_cra_exit,
   1804	}
   1805}
   1806
   1807};
   1808
   1809static void s5p_set_aes(struct s5p_aes_dev *dev,
   1810			const u8 *key, const u8 *iv, const u8 *ctr,
   1811			unsigned int keylen)
   1812{
   1813	void __iomem *keystart;
   1814
   1815	if (iv)
   1816		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
   1817			    AES_BLOCK_SIZE);
   1818
   1819	if (ctr)
   1820		memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
   1821			    AES_BLOCK_SIZE);
   1822
   1823	if (keylen == AES_KEYSIZE_256)
   1824		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
   1825	else if (keylen == AES_KEYSIZE_192)
   1826		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
   1827	else
   1828		keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
   1829
   1830	memcpy_toio(keystart, key, keylen);
   1831}
   1832
   1833static bool s5p_is_sg_aligned(struct scatterlist *sg)
   1834{
   1835	while (sg) {
   1836		if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
   1837			return false;
   1838		sg = sg_next(sg);
   1839	}
   1840
   1841	return true;
   1842}
   1843
   1844static int s5p_set_indata_start(struct s5p_aes_dev *dev,
   1845				struct skcipher_request *req)
   1846{
   1847	struct scatterlist *sg;
   1848	int err;
   1849
   1850	dev->sg_src_cpy = NULL;
   1851	sg = req->src;
   1852	if (!s5p_is_sg_aligned(sg)) {
   1853		dev_dbg(dev->dev,
   1854			"At least one unaligned source scatter list, making a copy\n");
   1855		err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
   1856		if (err)
   1857			return err;
   1858
   1859		sg = dev->sg_src_cpy;
   1860	}
   1861
   1862	err = s5p_set_indata(dev, sg);
   1863	if (err) {
   1864		s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
   1865		return err;
   1866	}
   1867
   1868	return 0;
   1869}
   1870
   1871static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
   1872				 struct skcipher_request *req)
   1873{
   1874	struct scatterlist *sg;
   1875	int err;
   1876
   1877	dev->sg_dst_cpy = NULL;
   1878	sg = req->dst;
   1879	if (!s5p_is_sg_aligned(sg)) {
   1880		dev_dbg(dev->dev,
   1881			"At least one unaligned dest scatter list, making a copy\n");
   1882		err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
   1883		if (err)
   1884			return err;
   1885
   1886		sg = dev->sg_dst_cpy;
   1887	}
   1888
   1889	err = s5p_set_outdata(dev, sg);
   1890	if (err) {
   1891		s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
   1892		return err;
   1893	}
   1894
   1895	return 0;
   1896}
   1897
   1898static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
   1899{
   1900	struct skcipher_request *req = dev->req;
   1901	u32 aes_control;
   1902	unsigned long flags;
   1903	int err;
   1904	u8 *iv, *ctr;
   1905
   1906	/* This sets bit [13:12] to 00, which selects 128-bit counter */
   1907	aes_control = SSS_AES_KEY_CHANGE_MODE;
   1908	if (mode & FLAGS_AES_DECRYPT)
   1909		aes_control |= SSS_AES_MODE_DECRYPT;
   1910
   1911	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
   1912		aes_control |= SSS_AES_CHAIN_MODE_CBC;
   1913		iv = req->iv;
   1914		ctr = NULL;
   1915	} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
   1916		aes_control |= SSS_AES_CHAIN_MODE_CTR;
   1917		iv = NULL;
   1918		ctr = req->iv;
   1919	} else {
   1920		iv = NULL; /* AES_ECB */
   1921		ctr = NULL;
   1922	}
   1923
   1924	if (dev->ctx->keylen == AES_KEYSIZE_192)
   1925		aes_control |= SSS_AES_KEY_SIZE_192;
   1926	else if (dev->ctx->keylen == AES_KEYSIZE_256)
   1927		aes_control |= SSS_AES_KEY_SIZE_256;
   1928
   1929	aes_control |= SSS_AES_FIFO_MODE;
   1930
   1931	/* as a variant it is possible to use byte swapping on DMA side */
   1932	aes_control |= SSS_AES_BYTESWAP_DI
   1933		    |  SSS_AES_BYTESWAP_DO
   1934		    |  SSS_AES_BYTESWAP_IV
   1935		    |  SSS_AES_BYTESWAP_KEY
   1936		    |  SSS_AES_BYTESWAP_CNT;
   1937
   1938	spin_lock_irqsave(&dev->lock, flags);
   1939
   1940	SSS_WRITE(dev, FCINTENCLR,
   1941		  SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
   1942	SSS_WRITE(dev, FCFIFOCTRL, 0x00);
   1943
   1944	err = s5p_set_indata_start(dev, req);
   1945	if (err)
   1946		goto indata_error;
   1947
   1948	err = s5p_set_outdata_start(dev, req);
   1949	if (err)
   1950		goto outdata_error;
   1951
   1952	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
   1953	s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
   1954
   1955	s5p_set_dma_indata(dev,  dev->sg_src);
   1956	s5p_set_dma_outdata(dev, dev->sg_dst);
   1957
   1958	SSS_WRITE(dev, FCINTENSET,
   1959		  SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
   1960
   1961	spin_unlock_irqrestore(&dev->lock, flags);
   1962
   1963	return;
   1964
   1965outdata_error:
   1966	s5p_unset_indata(dev);
   1967
   1968indata_error:
   1969	s5p_sg_done(dev);
   1970	dev->busy = false;
   1971	spin_unlock_irqrestore(&dev->lock, flags);
   1972	s5p_aes_complete(req, err);
   1973}
   1974
   1975static void s5p_tasklet_cb(unsigned long data)
   1976{
   1977	struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
   1978	struct crypto_async_request *async_req, *backlog;
   1979	struct s5p_aes_reqctx *reqctx;
   1980	unsigned long flags;
   1981
   1982	spin_lock_irqsave(&dev->lock, flags);
   1983	backlog   = crypto_get_backlog(&dev->queue);
   1984	async_req = crypto_dequeue_request(&dev->queue);
   1985
   1986	if (!async_req) {
   1987		dev->busy = false;
   1988		spin_unlock_irqrestore(&dev->lock, flags);
   1989		return;
   1990	}
   1991	spin_unlock_irqrestore(&dev->lock, flags);
   1992
   1993	if (backlog)
   1994		backlog->complete(backlog, -EINPROGRESS);
   1995
   1996	dev->req = skcipher_request_cast(async_req);
   1997	dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
   1998	reqctx   = skcipher_request_ctx(dev->req);
   1999
   2000	s5p_aes_crypt_start(dev, reqctx->mode);
   2001}
   2002
   2003static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
   2004			      struct skcipher_request *req)
   2005{
   2006	unsigned long flags;
   2007	int err;
   2008
   2009	spin_lock_irqsave(&dev->lock, flags);
   2010	err = crypto_enqueue_request(&dev->queue, &req->base);
   2011	if (dev->busy) {
   2012		spin_unlock_irqrestore(&dev->lock, flags);
   2013		return err;
   2014	}
   2015	dev->busy = true;
   2016
   2017	spin_unlock_irqrestore(&dev->lock, flags);
   2018
   2019	tasklet_schedule(&dev->tasklet);
   2020
   2021	return err;
   2022}
   2023
   2024static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
   2025{
   2026	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
   2027	struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
   2028	struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
   2029	struct s5p_aes_dev *dev = ctx->dev;
   2030
   2031	if (!req->cryptlen)
   2032		return 0;
   2033
   2034	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
   2035			((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
   2036		dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
   2037		return -EINVAL;
   2038	}
   2039
   2040	reqctx->mode = mode;
   2041
   2042	return s5p_aes_handle_req(dev, req);
   2043}
   2044
   2045static int s5p_aes_setkey(struct crypto_skcipher *cipher,
   2046			  const u8 *key, unsigned int keylen)
   2047{
   2048	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
   2049	struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
   2050
   2051	if (keylen != AES_KEYSIZE_128 &&
   2052	    keylen != AES_KEYSIZE_192 &&
   2053	    keylen != AES_KEYSIZE_256)
   2054		return -EINVAL;
   2055
   2056	memcpy(ctx->aes_key, key, keylen);
   2057	ctx->keylen = keylen;
   2058
   2059	return 0;
   2060}
   2061
   2062static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
   2063{
   2064	return s5p_aes_crypt(req, 0);
   2065}
   2066
   2067static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
   2068{
   2069	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
   2070}
   2071
   2072static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
   2073{
   2074	return s5p_aes_crypt(req, FLAGS_AES_CBC);
   2075}
   2076
   2077static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
   2078{
   2079	return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
   2080}
   2081
   2082static int s5p_aes_ctr_crypt(struct skcipher_request *req)
   2083{
   2084	return s5p_aes_crypt(req, FLAGS_AES_CTR);
   2085}
   2086
   2087static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
   2088{
   2089	struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
   2090
   2091	ctx->dev = s5p_dev;
   2092	crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
   2093
   2094	return 0;
   2095}
   2096
   2097static struct skcipher_alg algs[] = {
   2098	{
   2099		.base.cra_name		= "ecb(aes)",
   2100		.base.cra_driver_name	= "ecb-aes-s5p",
   2101		.base.cra_priority	= 100,
   2102		.base.cra_flags		= CRYPTO_ALG_ASYNC |
   2103					  CRYPTO_ALG_KERN_DRIVER_ONLY,
   2104		.base.cra_blocksize	= AES_BLOCK_SIZE,
   2105		.base.cra_ctxsize	= sizeof(struct s5p_aes_ctx),
   2106		.base.cra_alignmask	= 0x0f,
   2107		.base.cra_module	= THIS_MODULE,
   2108
   2109		.min_keysize		= AES_MIN_KEY_SIZE,
   2110		.max_keysize		= AES_MAX_KEY_SIZE,
   2111		.setkey			= s5p_aes_setkey,
   2112		.encrypt		= s5p_aes_ecb_encrypt,
   2113		.decrypt		= s5p_aes_ecb_decrypt,
   2114		.init			= s5p_aes_init_tfm,
   2115	},
   2116	{
   2117		.base.cra_name		= "cbc(aes)",
   2118		.base.cra_driver_name	= "cbc-aes-s5p",
   2119		.base.cra_priority	= 100,
   2120		.base.cra_flags		= CRYPTO_ALG_ASYNC |
   2121					  CRYPTO_ALG_KERN_DRIVER_ONLY,
   2122		.base.cra_blocksize	= AES_BLOCK_SIZE,
   2123		.base.cra_ctxsize	= sizeof(struct s5p_aes_ctx),
   2124		.base.cra_alignmask	= 0x0f,
   2125		.base.cra_module	= THIS_MODULE,
   2126
   2127		.min_keysize		= AES_MIN_KEY_SIZE,
   2128		.max_keysize		= AES_MAX_KEY_SIZE,
   2129		.ivsize			= AES_BLOCK_SIZE,
   2130		.setkey			= s5p_aes_setkey,
   2131		.encrypt		= s5p_aes_cbc_encrypt,
   2132		.decrypt		= s5p_aes_cbc_decrypt,
   2133		.init			= s5p_aes_init_tfm,
   2134	},
   2135	{
   2136		.base.cra_name		= "ctr(aes)",
   2137		.base.cra_driver_name	= "ctr-aes-s5p",
   2138		.base.cra_priority	= 100,
   2139		.base.cra_flags		= CRYPTO_ALG_ASYNC |
   2140					  CRYPTO_ALG_KERN_DRIVER_ONLY,
   2141		.base.cra_blocksize	= 1,
   2142		.base.cra_ctxsize	= sizeof(struct s5p_aes_ctx),
   2143		.base.cra_alignmask	= 0x0f,
   2144		.base.cra_module	= THIS_MODULE,
   2145
   2146		.min_keysize		= AES_MIN_KEY_SIZE,
   2147		.max_keysize		= AES_MAX_KEY_SIZE,
   2148		.ivsize			= AES_BLOCK_SIZE,
   2149		.setkey			= s5p_aes_setkey,
   2150		.encrypt		= s5p_aes_ctr_crypt,
   2151		.decrypt		= s5p_aes_ctr_crypt,
   2152		.init			= s5p_aes_init_tfm,
   2153	},
   2154};
   2155
   2156static int s5p_aes_probe(struct platform_device *pdev)
   2157{
   2158	struct device *dev = &pdev->dev;
   2159	int i, j, err;
   2160	const struct samsung_aes_variant *variant;
   2161	struct s5p_aes_dev *pdata;
   2162	struct resource *res;
   2163	unsigned int hash_i;
   2164
   2165	if (s5p_dev)
   2166		return -EEXIST;
   2167
   2168	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
   2169	if (!pdata)
   2170		return -ENOMEM;
   2171
   2172	variant = find_s5p_sss_version(pdev);
   2173	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   2174	if (!res)
   2175		return -EINVAL;
   2176
   2177	/*
   2178	 * Note: HASH and PRNG uses the same registers in secss, avoid
   2179	 * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
   2180	 * is enabled in config. We need larger size for HASH registers in
   2181	 * secss, current describe only AES/DES
   2182	 */
   2183	if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
   2184		if (variant == &exynos_aes_data) {
   2185			res->end += 0x300;
   2186			pdata->use_hash = true;
   2187		}
   2188	}
   2189
   2190	pdata->res = res;
   2191	pdata->ioaddr = devm_ioremap_resource(dev, res);
   2192	if (IS_ERR(pdata->ioaddr)) {
   2193		if (!pdata->use_hash)
   2194			return PTR_ERR(pdata->ioaddr);
   2195		/* try AES without HASH */
   2196		res->end -= 0x300;
   2197		pdata->use_hash = false;
   2198		pdata->ioaddr = devm_ioremap_resource(dev, res);
   2199		if (IS_ERR(pdata->ioaddr))
   2200			return PTR_ERR(pdata->ioaddr);
   2201	}
   2202
   2203	pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
   2204	if (IS_ERR(pdata->clk))
   2205		return dev_err_probe(dev, PTR_ERR(pdata->clk),
   2206				     "failed to find secss clock %s\n",
   2207				     variant->clk_names[0]);
   2208
   2209	err = clk_prepare_enable(pdata->clk);
   2210	if (err < 0) {
   2211		dev_err(dev, "Enabling clock %s failed, err %d\n",
   2212			variant->clk_names[0], err);
   2213		return err;
   2214	}
   2215
   2216	if (variant->clk_names[1]) {
   2217		pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
   2218		if (IS_ERR(pdata->pclk)) {
   2219			err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
   2220					    "failed to find clock %s\n",
   2221					    variant->clk_names[1]);
   2222			goto err_clk;
   2223		}
   2224
   2225		err = clk_prepare_enable(pdata->pclk);
   2226		if (err < 0) {
   2227			dev_err(dev, "Enabling clock %s failed, err %d\n",
   2228				variant->clk_names[0], err);
   2229			goto err_clk;
   2230		}
   2231	} else {
   2232		pdata->pclk = NULL;
   2233	}
   2234
   2235	spin_lock_init(&pdata->lock);
   2236	spin_lock_init(&pdata->hash_lock);
   2237
   2238	pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
   2239	pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
   2240
   2241	pdata->irq_fc = platform_get_irq(pdev, 0);
   2242	if (pdata->irq_fc < 0) {
   2243		err = pdata->irq_fc;
   2244		dev_warn(dev, "feed control interrupt is not available.\n");
   2245		goto err_irq;
   2246	}
   2247	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
   2248					s5p_aes_interrupt, IRQF_ONESHOT,
   2249					pdev->name, pdev);
   2250	if (err < 0) {
   2251		dev_warn(dev, "feed control interrupt is not available.\n");
   2252		goto err_irq;
   2253	}
   2254
   2255	pdata->busy = false;
   2256	pdata->dev = dev;
   2257	platform_set_drvdata(pdev, pdata);
   2258	s5p_dev = pdata;
   2259
   2260	tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
   2261	crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
   2262
   2263	for (i = 0; i < ARRAY_SIZE(algs); i++) {
   2264		err = crypto_register_skcipher(&algs[i]);
   2265		if (err)
   2266			goto err_algs;
   2267	}
   2268
   2269	if (pdata->use_hash) {
   2270		tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
   2271			     (unsigned long)pdata);
   2272		crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
   2273
   2274		for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
   2275		     hash_i++) {
   2276			struct ahash_alg *alg;
   2277
   2278			alg = &algs_sha1_md5_sha256[hash_i];
   2279			err = crypto_register_ahash(alg);
   2280			if (err) {
   2281				dev_err(dev, "can't register '%s': %d\n",
   2282					alg->halg.base.cra_driver_name, err);
   2283				goto err_hash;
   2284			}
   2285		}
   2286	}
   2287
   2288	dev_info(dev, "s5p-sss driver registered\n");
   2289
   2290	return 0;
   2291
   2292err_hash:
   2293	for (j = hash_i - 1; j >= 0; j--)
   2294		crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
   2295
   2296	tasklet_kill(&pdata->hash_tasklet);
   2297	res->end -= 0x300;
   2298
   2299err_algs:
   2300	if (i < ARRAY_SIZE(algs))
   2301		dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
   2302			err);
   2303
   2304	for (j = 0; j < i; j++)
   2305		crypto_unregister_skcipher(&algs[j]);
   2306
   2307	tasklet_kill(&pdata->tasklet);
   2308
   2309err_irq:
   2310	clk_disable_unprepare(pdata->pclk);
   2311
   2312err_clk:
   2313	clk_disable_unprepare(pdata->clk);
   2314	s5p_dev = NULL;
   2315
   2316	return err;
   2317}
   2318
   2319static int s5p_aes_remove(struct platform_device *pdev)
   2320{
   2321	struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
   2322	int i;
   2323
   2324	if (!pdata)
   2325		return -ENODEV;
   2326
   2327	for (i = 0; i < ARRAY_SIZE(algs); i++)
   2328		crypto_unregister_skcipher(&algs[i]);
   2329
   2330	tasklet_kill(&pdata->tasklet);
   2331	if (pdata->use_hash) {
   2332		for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
   2333			crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
   2334
   2335		pdata->res->end -= 0x300;
   2336		tasklet_kill(&pdata->hash_tasklet);
   2337		pdata->use_hash = false;
   2338	}
   2339
   2340	clk_disable_unprepare(pdata->pclk);
   2341
   2342	clk_disable_unprepare(pdata->clk);
   2343	s5p_dev = NULL;
   2344
   2345	return 0;
   2346}
   2347
   2348static struct platform_driver s5p_aes_crypto = {
   2349	.probe	= s5p_aes_probe,
   2350	.remove	= s5p_aes_remove,
   2351	.driver	= {
   2352		.name	= "s5p-secss",
   2353		.of_match_table = s5p_sss_dt_match,
   2354	},
   2355};
   2356
   2357module_platform_driver(s5p_aes_crypto);
   2358
   2359MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
   2360MODULE_LICENSE("GPL v2");
   2361MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
   2362MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");