cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

img-hash.c (27695B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2014 Imagination Technologies
      4 * Authors:  Will Thomas, James Hartley
      5 *
      6 *	Interface structure taken from omap-sham driver
      7 */
      8
      9#include <linux/clk.h>
     10#include <linux/dma-mapping.h>
     11#include <linux/dmaengine.h>
     12#include <linux/interrupt.h>
     13#include <linux/io.h>
     14#include <linux/kernel.h>
     15#include <linux/module.h>
     16#include <linux/of_device.h>
     17#include <linux/platform_device.h>
     18#include <linux/scatterlist.h>
     19
     20#include <crypto/internal/hash.h>
     21#include <crypto/md5.h>
     22#include <crypto/sha1.h>
     23#include <crypto/sha2.h>
     24
     25#define CR_RESET			0
     26#define CR_RESET_SET			1
     27#define CR_RESET_UNSET			0
     28
     29#define CR_MESSAGE_LENGTH_H		0x4
     30#define CR_MESSAGE_LENGTH_L		0x8
     31
     32#define CR_CONTROL			0xc
     33#define CR_CONTROL_BYTE_ORDER_3210	0
     34#define CR_CONTROL_BYTE_ORDER_0123	1
     35#define CR_CONTROL_BYTE_ORDER_2310	2
     36#define CR_CONTROL_BYTE_ORDER_1032	3
     37#define CR_CONTROL_BYTE_ORDER_SHIFT	8
     38#define CR_CONTROL_ALGO_MD5	0
     39#define CR_CONTROL_ALGO_SHA1	1
     40#define CR_CONTROL_ALGO_SHA224	2
     41#define CR_CONTROL_ALGO_SHA256	3
     42
     43#define CR_INTSTAT			0x10
     44#define CR_INTENAB			0x14
     45#define CR_INTCLEAR			0x18
     46#define CR_INT_RESULTS_AVAILABLE	BIT(0)
     47#define CR_INT_NEW_RESULTS_SET		BIT(1)
     48#define CR_INT_RESULT_READ_ERR		BIT(2)
     49#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
     50#define CR_INT_STATUS			BIT(8)
     51
     52#define CR_RESULT_QUEUE		0x1c
     53#define CR_RSD0				0x40
     54#define CR_CORE_REV			0x50
     55#define CR_CORE_DES1		0x60
     56#define CR_CORE_DES2		0x70
     57
     58#define DRIVER_FLAGS_BUSY		BIT(0)
     59#define DRIVER_FLAGS_FINAL		BIT(1)
     60#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
     61#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
     62#define DRIVER_FLAGS_INIT		BIT(4)
     63#define DRIVER_FLAGS_CPU		BIT(5)
     64#define DRIVER_FLAGS_DMA_READY		BIT(6)
     65#define DRIVER_FLAGS_ERROR		BIT(7)
     66#define DRIVER_FLAGS_SG			BIT(8)
     67#define DRIVER_FLAGS_SHA1		BIT(18)
     68#define DRIVER_FLAGS_SHA224		BIT(19)
     69#define DRIVER_FLAGS_SHA256		BIT(20)
     70#define DRIVER_FLAGS_MD5		BIT(21)
     71
     72#define IMG_HASH_QUEUE_LENGTH		20
     73#define IMG_HASH_DMA_BURST		4
     74#define IMG_HASH_DMA_THRESHOLD		64
     75
     76#ifdef __LITTLE_ENDIAN
     77#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
     78#else
     79#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
     80#endif
     81
     82struct img_hash_dev;
     83
     84struct img_hash_request_ctx {
     85	struct img_hash_dev	*hdev;
     86	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
     87	unsigned long		flags;
     88	size_t			digsize;
     89
     90	dma_addr_t		dma_addr;
     91	size_t			dma_ct;
     92
     93	/* sg root */
     94	struct scatterlist	*sgfirst;
     95	/* walk state */
     96	struct scatterlist	*sg;
     97	size_t			nents;
     98	size_t			offset;
     99	unsigned int		total;
    100	size_t			sent;
    101
    102	unsigned long		op;
    103
    104	size_t			bufcnt;
    105	struct ahash_request	fallback_req;
    106
    107	/* Zero length buffer must remain last member of struct */
    108	u8 buffer[] __aligned(sizeof(u32));
    109};
    110
    111struct img_hash_ctx {
    112	struct img_hash_dev	*hdev;
    113	unsigned long		flags;
    114	struct crypto_ahash	*fallback;
    115};
    116
    117struct img_hash_dev {
    118	struct list_head	list;
    119	struct device		*dev;
    120	struct clk		*hash_clk;
    121	struct clk		*sys_clk;
    122	void __iomem		*io_base;
    123
    124	phys_addr_t		bus_addr;
    125	void __iomem		*cpu_addr;
    126
    127	spinlock_t		lock;
    128	int			err;
    129	struct tasklet_struct	done_task;
    130	struct tasklet_struct	dma_task;
    131
    132	unsigned long		flags;
    133	struct crypto_queue	queue;
    134	struct ahash_request	*req;
    135
    136	struct dma_chan		*dma_lch;
    137};
    138
    139struct img_hash_drv {
    140	struct list_head dev_list;
    141	spinlock_t lock;
    142};
    143
    144static struct img_hash_drv img_hash = {
    145	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
    146	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
    147};
    148
    149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
    150{
    151	return readl_relaxed(hdev->io_base + offset);
    152}
    153
    154static inline void img_hash_write(struct img_hash_dev *hdev,
    155				  u32 offset, u32 value)
    156{
    157	writel_relaxed(value, hdev->io_base + offset);
    158}
    159
    160static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
    161{
    162	return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
    163}
    164
    165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
    166{
    167	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    168	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
    169
    170	if (ctx->flags & DRIVER_FLAGS_MD5)
    171		cr |= CR_CONTROL_ALGO_MD5;
    172	else if (ctx->flags & DRIVER_FLAGS_SHA1)
    173		cr |= CR_CONTROL_ALGO_SHA1;
    174	else if (ctx->flags & DRIVER_FLAGS_SHA224)
    175		cr |= CR_CONTROL_ALGO_SHA224;
    176	else if (ctx->flags & DRIVER_FLAGS_SHA256)
    177		cr |= CR_CONTROL_ALGO_SHA256;
    178	dev_dbg(hdev->dev, "Starting hash process\n");
    179	img_hash_write(hdev, CR_CONTROL, cr);
    180
    181	/*
    182	 * The hardware block requires two cycles between writing the control
    183	 * register and writing the first word of data in non DMA mode, to
    184	 * ensure the first data write is not grouped in burst with the control
    185	 * register write a read is issued to 'flush' the bus.
    186	 */
    187	if (!dma)
    188		img_hash_read(hdev, CR_CONTROL);
    189}
    190
    191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
    192			     size_t length, int final)
    193{
    194	u32 count, len32;
    195	const u32 *buffer = (const u32 *)buf;
    196
    197	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
    198
    199	if (final)
    200		hdev->flags |= DRIVER_FLAGS_FINAL;
    201
    202	len32 = DIV_ROUND_UP(length, sizeof(u32));
    203
    204	for (count = 0; count < len32; count++)
    205		writel_relaxed(buffer[count], hdev->cpu_addr);
    206
    207	return -EINPROGRESS;
    208}
    209
    210static void img_hash_dma_callback(void *data)
    211{
    212	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
    213	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    214
    215	if (ctx->bufcnt) {
    216		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
    217		ctx->bufcnt = 0;
    218	}
    219	if (ctx->sg)
    220		tasklet_schedule(&hdev->dma_task);
    221}
    222
    223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
    224{
    225	struct dma_async_tx_descriptor *desc;
    226	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    227
    228	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
    229	if (ctx->dma_ct == 0) {
    230		dev_err(hdev->dev, "Invalid DMA sg\n");
    231		hdev->err = -EINVAL;
    232		return -EINVAL;
    233	}
    234
    235	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
    236				       sg,
    237				       ctx->dma_ct,
    238				       DMA_MEM_TO_DEV,
    239				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    240	if (!desc) {
    241		dev_err(hdev->dev, "Null DMA descriptor\n");
    242		hdev->err = -EINVAL;
    243		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
    244		return -EINVAL;
    245	}
    246	desc->callback = img_hash_dma_callback;
    247	desc->callback_param = hdev;
    248	dmaengine_submit(desc);
    249	dma_async_issue_pending(hdev->dma_lch);
    250
    251	return 0;
    252}
    253
    254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
    255{
    256	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    257
    258	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
    259					ctx->buffer, hdev->req->nbytes);
    260
    261	ctx->total = hdev->req->nbytes;
    262	ctx->bufcnt = 0;
    263
    264	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
    265
    266	img_hash_start(hdev, false);
    267
    268	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
    269}
    270
    271static int img_hash_finish(struct ahash_request *req)
    272{
    273	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
    274
    275	if (!req->result)
    276		return -EINVAL;
    277
    278	memcpy(req->result, ctx->digest, ctx->digsize);
    279
    280	return 0;
    281}
    282
    283static void img_hash_copy_hash(struct ahash_request *req)
    284{
    285	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
    286	u32 *hash = (u32 *)ctx->digest;
    287	int i;
    288
    289	for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
    290		hash[i] = img_hash_read_result_queue(ctx->hdev);
    291}
    292
    293static void img_hash_finish_req(struct ahash_request *req, int err)
    294{
    295	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
    296	struct img_hash_dev *hdev =  ctx->hdev;
    297
    298	if (!err) {
    299		img_hash_copy_hash(req);
    300		if (DRIVER_FLAGS_FINAL & hdev->flags)
    301			err = img_hash_finish(req);
    302	} else {
    303		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
    304		ctx->flags |= DRIVER_FLAGS_ERROR;
    305	}
    306
    307	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
    308		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
    309
    310	if (req->base.complete)
    311		req->base.complete(&req->base, err);
    312}
    313
    314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
    315{
    316	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    317
    318	img_hash_start(hdev, true);
    319
    320	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
    321
    322	if (!ctx->total)
    323		hdev->flags |= DRIVER_FLAGS_FINAL;
    324
    325	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
    326
    327	tasklet_schedule(&hdev->dma_task);
    328
    329	return -EINPROGRESS;
    330}
    331
    332static int img_hash_dma_init(struct img_hash_dev *hdev)
    333{
    334	struct dma_slave_config dma_conf;
    335	int err;
    336
    337	hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
    338	if (IS_ERR(hdev->dma_lch)) {
    339		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
    340		return PTR_ERR(hdev->dma_lch);
    341	}
    342	dma_conf.direction = DMA_MEM_TO_DEV;
    343	dma_conf.dst_addr = hdev->bus_addr;
    344	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
    345	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
    346	dma_conf.device_fc = false;
    347
    348	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
    349	if (err) {
    350		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
    351		dma_release_channel(hdev->dma_lch);
    352		return err;
    353	}
    354
    355	return 0;
    356}
    357
    358static void img_hash_dma_task(unsigned long d)
    359{
    360	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
    361	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    362	u8 *addr;
    363	size_t nbytes, bleft, wsend, len, tbc;
    364	struct scatterlist tsg;
    365
    366	if (!hdev->req || !ctx->sg)
    367		return;
    368
    369	addr = sg_virt(ctx->sg);
    370	nbytes = ctx->sg->length - ctx->offset;
    371
    372	/*
    373	 * The hash accelerator does not support a data valid mask. This means
    374	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
    375	 * padding bytes in the last word written by that dma would erroneously
    376	 * be included in the hash. To avoid this we round down the transfer,
    377	 * and add the excess to the start of the next dma. It does not matter
    378	 * that the final dma may not be a multiple of 4 bytes as the hashing
    379	 * block is programmed to accept the correct number of bytes.
    380	 */
    381
    382	bleft = nbytes % 4;
    383	wsend = (nbytes / 4);
    384
    385	if (wsend) {
    386		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
    387		if (img_hash_xmit_dma(hdev, &tsg)) {
    388			dev_err(hdev->dev, "DMA failed, falling back to CPU");
    389			ctx->flags |= DRIVER_FLAGS_CPU;
    390			hdev->err = 0;
    391			img_hash_xmit_cpu(hdev, addr + ctx->offset,
    392					  wsend * 4, 0);
    393			ctx->sent += wsend * 4;
    394			wsend = 0;
    395		} else {
    396			ctx->sent += wsend * 4;
    397		}
    398	}
    399
    400	if (bleft) {
    401		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
    402						 ctx->buffer, bleft, ctx->sent);
    403		tbc = 0;
    404		ctx->sg = sg_next(ctx->sg);
    405		while (ctx->sg && (ctx->bufcnt < 4)) {
    406			len = ctx->sg->length;
    407			if (likely(len > (4 - ctx->bufcnt)))
    408				len = 4 - ctx->bufcnt;
    409			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
    410						 ctx->buffer + ctx->bufcnt, len,
    411					ctx->sent + ctx->bufcnt);
    412			ctx->bufcnt += tbc;
    413			if (tbc >= ctx->sg->length) {
    414				ctx->sg = sg_next(ctx->sg);
    415				tbc = 0;
    416			}
    417		}
    418
    419		ctx->sent += ctx->bufcnt;
    420		ctx->offset = tbc;
    421
    422		if (!wsend)
    423			img_hash_dma_callback(hdev);
    424	} else {
    425		ctx->offset = 0;
    426		ctx->sg = sg_next(ctx->sg);
    427	}
    428}
    429
    430static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
    431{
    432	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
    433
    434	if (ctx->flags & DRIVER_FLAGS_SG)
    435		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
    436
    437	return 0;
    438}
    439
    440static int img_hash_process_data(struct img_hash_dev *hdev)
    441{
    442	struct ahash_request *req = hdev->req;
    443	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
    444	int err = 0;
    445
    446	ctx->bufcnt = 0;
    447
    448	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
    449		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
    450			req->nbytes);
    451		err = img_hash_write_via_dma(hdev);
    452	} else {
    453		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
    454			req->nbytes);
    455		err = img_hash_write_via_cpu(hdev);
    456	}
    457	return err;
    458}
    459
    460static int img_hash_hw_init(struct img_hash_dev *hdev)
    461{
    462	unsigned long long nbits;
    463	u32 u, l;
    464
    465	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
    466	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
    467	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
    468
    469	nbits = (u64)hdev->req->nbytes << 3;
    470	u = nbits >> 32;
    471	l = nbits;
    472	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
    473	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
    474
    475	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
    476		hdev->flags |= DRIVER_FLAGS_INIT;
    477		hdev->err = 0;
    478	}
    479	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
    480	return 0;
    481}
    482
    483static int img_hash_init(struct ahash_request *req)
    484{
    485	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    486	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    487	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    488
    489	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    490	rctx->fallback_req.base.flags =	req->base.flags
    491		& CRYPTO_TFM_REQ_MAY_SLEEP;
    492
    493	return crypto_ahash_init(&rctx->fallback_req);
    494}
    495
    496static int img_hash_handle_queue(struct img_hash_dev *hdev,
    497				 struct ahash_request *req)
    498{
    499	struct crypto_async_request *async_req, *backlog;
    500	struct img_hash_request_ctx *ctx;
    501	unsigned long flags;
    502	int err = 0, res = 0;
    503
    504	spin_lock_irqsave(&hdev->lock, flags);
    505
    506	if (req)
    507		res = ahash_enqueue_request(&hdev->queue, req);
    508
    509	if (DRIVER_FLAGS_BUSY & hdev->flags) {
    510		spin_unlock_irqrestore(&hdev->lock, flags);
    511		return res;
    512	}
    513
    514	backlog = crypto_get_backlog(&hdev->queue);
    515	async_req = crypto_dequeue_request(&hdev->queue);
    516	if (async_req)
    517		hdev->flags |= DRIVER_FLAGS_BUSY;
    518
    519	spin_unlock_irqrestore(&hdev->lock, flags);
    520
    521	if (!async_req)
    522		return res;
    523
    524	if (backlog)
    525		backlog->complete(backlog, -EINPROGRESS);
    526
    527	req = ahash_request_cast(async_req);
    528	hdev->req = req;
    529
    530	ctx = ahash_request_ctx(req);
    531
    532	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
    533		 ctx->op, req->nbytes);
    534
    535	err = img_hash_hw_init(hdev);
    536
    537	if (!err)
    538		err = img_hash_process_data(hdev);
    539
    540	if (err != -EINPROGRESS) {
    541		/* done_task will not finish so do it here */
    542		img_hash_finish_req(req, err);
    543	}
    544	return res;
    545}
    546
    547static int img_hash_update(struct ahash_request *req)
    548{
    549	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    551	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    552
    553	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    554	rctx->fallback_req.base.flags = req->base.flags
    555		& CRYPTO_TFM_REQ_MAY_SLEEP;
    556	rctx->fallback_req.nbytes = req->nbytes;
    557	rctx->fallback_req.src = req->src;
    558
    559	return crypto_ahash_update(&rctx->fallback_req);
    560}
    561
    562static int img_hash_final(struct ahash_request *req)
    563{
    564	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    565	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    566	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    567
    568	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    569	rctx->fallback_req.base.flags = req->base.flags
    570		& CRYPTO_TFM_REQ_MAY_SLEEP;
    571	rctx->fallback_req.result = req->result;
    572
    573	return crypto_ahash_final(&rctx->fallback_req);
    574}
    575
    576static int img_hash_finup(struct ahash_request *req)
    577{
    578	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    579	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    580	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    581
    582	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    583	rctx->fallback_req.base.flags = req->base.flags
    584		& CRYPTO_TFM_REQ_MAY_SLEEP;
    585	rctx->fallback_req.nbytes = req->nbytes;
    586	rctx->fallback_req.src = req->src;
    587	rctx->fallback_req.result = req->result;
    588
    589	return crypto_ahash_finup(&rctx->fallback_req);
    590}
    591
    592static int img_hash_import(struct ahash_request *req, const void *in)
    593{
    594	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    595	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    596	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    597
    598	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    599	rctx->fallback_req.base.flags = req->base.flags
    600		& CRYPTO_TFM_REQ_MAY_SLEEP;
    601
    602	return crypto_ahash_import(&rctx->fallback_req, in);
    603}
    604
    605static int img_hash_export(struct ahash_request *req, void *out)
    606{
    607	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
    608	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    609	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
    610
    611	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
    612	rctx->fallback_req.base.flags = req->base.flags
    613		& CRYPTO_TFM_REQ_MAY_SLEEP;
    614
    615	return crypto_ahash_export(&rctx->fallback_req, out);
    616}
    617
    618static int img_hash_digest(struct ahash_request *req)
    619{
    620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    621	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
    622	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
    623	struct img_hash_dev *hdev = NULL;
    624	struct img_hash_dev *tmp;
    625	int err;
    626
    627	spin_lock(&img_hash.lock);
    628	if (!tctx->hdev) {
    629		list_for_each_entry(tmp, &img_hash.dev_list, list) {
    630			hdev = tmp;
    631			break;
    632		}
    633		tctx->hdev = hdev;
    634
    635	} else {
    636		hdev = tctx->hdev;
    637	}
    638
    639	spin_unlock(&img_hash.lock);
    640	ctx->hdev = hdev;
    641	ctx->flags = 0;
    642	ctx->digsize = crypto_ahash_digestsize(tfm);
    643
    644	switch (ctx->digsize) {
    645	case SHA1_DIGEST_SIZE:
    646		ctx->flags |= DRIVER_FLAGS_SHA1;
    647		break;
    648	case SHA256_DIGEST_SIZE:
    649		ctx->flags |= DRIVER_FLAGS_SHA256;
    650		break;
    651	case SHA224_DIGEST_SIZE:
    652		ctx->flags |= DRIVER_FLAGS_SHA224;
    653		break;
    654	case MD5_DIGEST_SIZE:
    655		ctx->flags |= DRIVER_FLAGS_MD5;
    656		break;
    657	default:
    658		return -EINVAL;
    659	}
    660
    661	ctx->bufcnt = 0;
    662	ctx->offset = 0;
    663	ctx->sent = 0;
    664	ctx->total = req->nbytes;
    665	ctx->sg = req->src;
    666	ctx->sgfirst = req->src;
    667	ctx->nents = sg_nents(ctx->sg);
    668
    669	err = img_hash_handle_queue(tctx->hdev, req);
    670
    671	return err;
    672}
    673
    674static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
    675{
    676	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
    677
    678	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
    679					   CRYPTO_ALG_NEED_FALLBACK);
    680	if (IS_ERR(ctx->fallback)) {
    681		pr_err("img_hash: Could not load fallback driver.\n");
    682		return PTR_ERR(ctx->fallback);
    683	}
    684	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
    685				 sizeof(struct img_hash_request_ctx) +
    686				 crypto_ahash_reqsize(ctx->fallback) +
    687				 IMG_HASH_DMA_THRESHOLD);
    688
    689	return 0;
    690}
    691
    692static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
    693{
    694	return img_hash_cra_init(tfm, "md5-generic");
    695}
    696
    697static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
    698{
    699	return img_hash_cra_init(tfm, "sha1-generic");
    700}
    701
    702static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
    703{
    704	return img_hash_cra_init(tfm, "sha224-generic");
    705}
    706
    707static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
    708{
    709	return img_hash_cra_init(tfm, "sha256-generic");
    710}
    711
    712static void img_hash_cra_exit(struct crypto_tfm *tfm)
    713{
    714	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
    715
    716	crypto_free_ahash(tctx->fallback);
    717}
    718
    719static irqreturn_t img_irq_handler(int irq, void *dev_id)
    720{
    721	struct img_hash_dev *hdev = dev_id;
    722	u32 reg;
    723
    724	reg = img_hash_read(hdev, CR_INTSTAT);
    725	img_hash_write(hdev, CR_INTCLEAR, reg);
    726
    727	if (reg & CR_INT_NEW_RESULTS_SET) {
    728		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
    729		if (DRIVER_FLAGS_BUSY & hdev->flags) {
    730			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
    731			if (!(DRIVER_FLAGS_CPU & hdev->flags))
    732				hdev->flags |= DRIVER_FLAGS_DMA_READY;
    733			tasklet_schedule(&hdev->done_task);
    734		} else {
    735			dev_warn(hdev->dev,
    736				 "HASH interrupt when no active requests.\n");
    737		}
    738	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
    739		dev_warn(hdev->dev,
    740			 "IRQ triggered before the hash had completed\n");
    741	} else if (reg & CR_INT_RESULT_READ_ERR) {
    742		dev_warn(hdev->dev,
    743			 "Attempt to read from an empty result queue\n");
    744	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
    745		dev_warn(hdev->dev,
    746			 "Data written before the hardware was configured\n");
    747	}
    748	return IRQ_HANDLED;
    749}
    750
    751static struct ahash_alg img_algs[] = {
    752	{
    753		.init = img_hash_init,
    754		.update = img_hash_update,
    755		.final = img_hash_final,
    756		.finup = img_hash_finup,
    757		.export = img_hash_export,
    758		.import = img_hash_import,
    759		.digest = img_hash_digest,
    760		.halg = {
    761			.digestsize = MD5_DIGEST_SIZE,
    762			.statesize = sizeof(struct md5_state),
    763			.base = {
    764				.cra_name = "md5",
    765				.cra_driver_name = "img-md5",
    766				.cra_priority = 300,
    767				.cra_flags =
    768				CRYPTO_ALG_ASYNC |
    769				CRYPTO_ALG_NEED_FALLBACK,
    770				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
    771				.cra_ctxsize = sizeof(struct img_hash_ctx),
    772				.cra_init = img_hash_cra_md5_init,
    773				.cra_exit = img_hash_cra_exit,
    774				.cra_module = THIS_MODULE,
    775			}
    776		}
    777	},
    778	{
    779		.init = img_hash_init,
    780		.update = img_hash_update,
    781		.final = img_hash_final,
    782		.finup = img_hash_finup,
    783		.export = img_hash_export,
    784		.import = img_hash_import,
    785		.digest = img_hash_digest,
    786		.halg = {
    787			.digestsize = SHA1_DIGEST_SIZE,
    788			.statesize = sizeof(struct sha1_state),
    789			.base = {
    790				.cra_name = "sha1",
    791				.cra_driver_name = "img-sha1",
    792				.cra_priority = 300,
    793				.cra_flags =
    794				CRYPTO_ALG_ASYNC |
    795				CRYPTO_ALG_NEED_FALLBACK,
    796				.cra_blocksize = SHA1_BLOCK_SIZE,
    797				.cra_ctxsize = sizeof(struct img_hash_ctx),
    798				.cra_init = img_hash_cra_sha1_init,
    799				.cra_exit = img_hash_cra_exit,
    800				.cra_module = THIS_MODULE,
    801			}
    802		}
    803	},
    804	{
    805		.init = img_hash_init,
    806		.update = img_hash_update,
    807		.final = img_hash_final,
    808		.finup = img_hash_finup,
    809		.export = img_hash_export,
    810		.import = img_hash_import,
    811		.digest = img_hash_digest,
    812		.halg = {
    813			.digestsize = SHA224_DIGEST_SIZE,
    814			.statesize = sizeof(struct sha256_state),
    815			.base = {
    816				.cra_name = "sha224",
    817				.cra_driver_name = "img-sha224",
    818				.cra_priority = 300,
    819				.cra_flags =
    820				CRYPTO_ALG_ASYNC |
    821				CRYPTO_ALG_NEED_FALLBACK,
    822				.cra_blocksize = SHA224_BLOCK_SIZE,
    823				.cra_ctxsize = sizeof(struct img_hash_ctx),
    824				.cra_init = img_hash_cra_sha224_init,
    825				.cra_exit = img_hash_cra_exit,
    826				.cra_module = THIS_MODULE,
    827			}
    828		}
    829	},
    830	{
    831		.init = img_hash_init,
    832		.update = img_hash_update,
    833		.final = img_hash_final,
    834		.finup = img_hash_finup,
    835		.export = img_hash_export,
    836		.import = img_hash_import,
    837		.digest = img_hash_digest,
    838		.halg = {
    839			.digestsize = SHA256_DIGEST_SIZE,
    840			.statesize = sizeof(struct sha256_state),
    841			.base = {
    842				.cra_name = "sha256",
    843				.cra_driver_name = "img-sha256",
    844				.cra_priority = 300,
    845				.cra_flags =
    846				CRYPTO_ALG_ASYNC |
    847				CRYPTO_ALG_NEED_FALLBACK,
    848				.cra_blocksize = SHA256_BLOCK_SIZE,
    849				.cra_ctxsize = sizeof(struct img_hash_ctx),
    850				.cra_init = img_hash_cra_sha256_init,
    851				.cra_exit = img_hash_cra_exit,
    852				.cra_module = THIS_MODULE,
    853			}
    854		}
    855	}
    856};
    857
    858static int img_register_algs(struct img_hash_dev *hdev)
    859{
    860	int i, err;
    861
    862	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
    863		err = crypto_register_ahash(&img_algs[i]);
    864		if (err)
    865			goto err_reg;
    866	}
    867	return 0;
    868
    869err_reg:
    870	for (; i--; )
    871		crypto_unregister_ahash(&img_algs[i]);
    872
    873	return err;
    874}
    875
    876static int img_unregister_algs(struct img_hash_dev *hdev)
    877{
    878	int i;
    879
    880	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
    881		crypto_unregister_ahash(&img_algs[i]);
    882	return 0;
    883}
    884
    885static void img_hash_done_task(unsigned long data)
    886{
    887	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
    888	int err = 0;
    889
    890	if (hdev->err == -EINVAL) {
    891		err = hdev->err;
    892		goto finish;
    893	}
    894
    895	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
    896		img_hash_handle_queue(hdev, NULL);
    897		return;
    898	}
    899
    900	if (DRIVER_FLAGS_CPU & hdev->flags) {
    901		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
    902			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
    903			goto finish;
    904		}
    905	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
    906		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
    907			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
    908			img_hash_write_via_dma_stop(hdev);
    909			if (hdev->err) {
    910				err = hdev->err;
    911				goto finish;
    912			}
    913		}
    914		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
    915			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
    916					DRIVER_FLAGS_OUTPUT_READY);
    917			goto finish;
    918		}
    919	}
    920	return;
    921
    922finish:
    923	img_hash_finish_req(hdev->req, err);
    924}
    925
    926static const struct of_device_id img_hash_match[] = {
    927	{ .compatible = "img,hash-accelerator" },
    928	{}
    929};
    930MODULE_DEVICE_TABLE(of, img_hash_match);
    931
    932static int img_hash_probe(struct platform_device *pdev)
    933{
    934	struct img_hash_dev *hdev;
    935	struct device *dev = &pdev->dev;
    936	struct resource *hash_res;
    937	int	irq;
    938	int err;
    939
    940	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
    941	if (hdev == NULL)
    942		return -ENOMEM;
    943
    944	spin_lock_init(&hdev->lock);
    945
    946	hdev->dev = dev;
    947
    948	platform_set_drvdata(pdev, hdev);
    949
    950	INIT_LIST_HEAD(&hdev->list);
    951
    952	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
    953	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
    954
    955	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
    956
    957	/* Register bank */
    958	hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
    959	if (IS_ERR(hdev->io_base)) {
    960		err = PTR_ERR(hdev->io_base);
    961		goto res_err;
    962	}
    963
    964	/* Write port (DMA or CPU) */
    965	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    966	hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
    967	if (IS_ERR(hdev->cpu_addr)) {
    968		err = PTR_ERR(hdev->cpu_addr);
    969		goto res_err;
    970	}
    971	hdev->bus_addr = hash_res->start;
    972
    973	irq = platform_get_irq(pdev, 0);
    974	if (irq < 0) {
    975		err = irq;
    976		goto res_err;
    977	}
    978
    979	err = devm_request_irq(dev, irq, img_irq_handler, 0,
    980			       dev_name(dev), hdev);
    981	if (err) {
    982		dev_err(dev, "unable to request irq\n");
    983		goto res_err;
    984	}
    985	dev_dbg(dev, "using IRQ channel %d\n", irq);
    986
    987	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
    988	if (IS_ERR(hdev->hash_clk)) {
    989		dev_err(dev, "clock initialization failed.\n");
    990		err = PTR_ERR(hdev->hash_clk);
    991		goto res_err;
    992	}
    993
    994	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
    995	if (IS_ERR(hdev->sys_clk)) {
    996		dev_err(dev, "clock initialization failed.\n");
    997		err = PTR_ERR(hdev->sys_clk);
    998		goto res_err;
    999	}
   1000
   1001	err = clk_prepare_enable(hdev->hash_clk);
   1002	if (err)
   1003		goto res_err;
   1004
   1005	err = clk_prepare_enable(hdev->sys_clk);
   1006	if (err)
   1007		goto clk_err;
   1008
   1009	err = img_hash_dma_init(hdev);
   1010	if (err)
   1011		goto dma_err;
   1012
   1013	dev_dbg(dev, "using %s for DMA transfers\n",
   1014		dma_chan_name(hdev->dma_lch));
   1015
   1016	spin_lock(&img_hash.lock);
   1017	list_add_tail(&hdev->list, &img_hash.dev_list);
   1018	spin_unlock(&img_hash.lock);
   1019
   1020	err = img_register_algs(hdev);
   1021	if (err)
   1022		goto err_algs;
   1023	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
   1024
   1025	return 0;
   1026
   1027err_algs:
   1028	spin_lock(&img_hash.lock);
   1029	list_del(&hdev->list);
   1030	spin_unlock(&img_hash.lock);
   1031	dma_release_channel(hdev->dma_lch);
   1032dma_err:
   1033	clk_disable_unprepare(hdev->sys_clk);
   1034clk_err:
   1035	clk_disable_unprepare(hdev->hash_clk);
   1036res_err:
   1037	tasklet_kill(&hdev->done_task);
   1038	tasklet_kill(&hdev->dma_task);
   1039
   1040	return err;
   1041}
   1042
   1043static int img_hash_remove(struct platform_device *pdev)
   1044{
   1045	struct img_hash_dev *hdev;
   1046
   1047	hdev = platform_get_drvdata(pdev);
   1048	spin_lock(&img_hash.lock);
   1049	list_del(&hdev->list);
   1050	spin_unlock(&img_hash.lock);
   1051
   1052	img_unregister_algs(hdev);
   1053
   1054	tasklet_kill(&hdev->done_task);
   1055	tasklet_kill(&hdev->dma_task);
   1056
   1057	dma_release_channel(hdev->dma_lch);
   1058
   1059	clk_disable_unprepare(hdev->hash_clk);
   1060	clk_disable_unprepare(hdev->sys_clk);
   1061
   1062	return 0;
   1063}
   1064
   1065#ifdef CONFIG_PM_SLEEP
   1066static int img_hash_suspend(struct device *dev)
   1067{
   1068	struct img_hash_dev *hdev = dev_get_drvdata(dev);
   1069
   1070	clk_disable_unprepare(hdev->hash_clk);
   1071	clk_disable_unprepare(hdev->sys_clk);
   1072
   1073	return 0;
   1074}
   1075
   1076static int img_hash_resume(struct device *dev)
   1077{
   1078	struct img_hash_dev *hdev = dev_get_drvdata(dev);
   1079	int ret;
   1080
   1081	ret = clk_prepare_enable(hdev->hash_clk);
   1082	if (ret)
   1083		return ret;
   1084
   1085	ret = clk_prepare_enable(hdev->sys_clk);
   1086	if (ret) {
   1087		clk_disable_unprepare(hdev->hash_clk);
   1088		return ret;
   1089	}
   1090
   1091	return 0;
   1092}
   1093#endif /* CONFIG_PM_SLEEP */
   1094
   1095static const struct dev_pm_ops img_hash_pm_ops = {
   1096	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
   1097};
   1098
   1099static struct platform_driver img_hash_driver = {
   1100	.probe		= img_hash_probe,
   1101	.remove		= img_hash_remove,
   1102	.driver		= {
   1103		.name	= "img-hash-accelerator",
   1104		.pm	= &img_hash_pm_ops,
   1105		.of_match_table	= of_match_ptr(img_hash_match),
   1106	}
   1107};
   1108module_platform_driver(img_hash_driver);
   1109
   1110MODULE_LICENSE("GPL v2");
   1111MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
   1112MODULE_AUTHOR("Will Thomas.");
   1113MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");