cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hash_core.c (51462B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Cryptographic API.
      4 * Support for Nomadik hardware crypto engine.
      5
      6 * Copyright (C) ST-Ericsson SA 2010
      7 * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
      8 * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
      9 * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
     10 * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
     11 * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
     12 */
     13
     14#define pr_fmt(fmt) "hashX hashX: " fmt
     15
     16#include <linux/clk.h>
     17#include <linux/device.h>
     18#include <linux/dma-mapping.h>
     19#include <linux/err.h>
     20#include <linux/init.h>
     21#include <linux/io.h>
     22#include <linux/klist.h>
     23#include <linux/kernel.h>
     24#include <linux/module.h>
     25#include <linux/mod_devicetable.h>
     26#include <linux/platform_device.h>
     27#include <linux/crypto.h>
     28
     29#include <linux/regulator/consumer.h>
     30#include <linux/dmaengine.h>
     31#include <linux/bitops.h>
     32
     33#include <crypto/internal/hash.h>
     34#include <crypto/sha1.h>
     35#include <crypto/sha2.h>
     36#include <crypto/scatterwalk.h>
     37#include <crypto/algapi.h>
     38
     39#include <linux/platform_data/crypto-ux500.h>
     40
     41#include "hash_alg.h"
     42
     43static int hash_mode;
     44module_param(hash_mode, int, 0);
     45MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
     46
     47/* HMAC-SHA1, no key */
     48static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
     49	0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
     50	0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
     51	0x70, 0x69, 0x0e, 0x1d
     52};
     53
     54/* HMAC-SHA256, no key */
     55static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
     56	0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
     57	0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
     58	0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
     59	0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
     60};
     61
     62/**
     63 * struct hash_driver_data - data specific to the driver.
     64 *
     65 * @device_list:	A list of registered devices to choose from.
     66 * @device_allocation:	A semaphore initialized with number of devices.
     67 */
     68struct hash_driver_data {
     69	struct klist		device_list;
     70	struct semaphore	device_allocation;
     71};
     72
     73static struct hash_driver_data	driver_data;
     74
     75/* Declaration of functions */
     76/**
     77 * hash_messagepad - Pads a message and write the nblw bits.
     78 * @device_data:	Structure for the hash device.
     79 * @message:		Last word of a message
     80 * @index_bytes:	The number of bytes in the last message
     81 *
     82 * This function manages the final part of the digest calculation, when less
     83 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
     84 *
     85 */
     86static void hash_messagepad(struct hash_device_data *device_data,
     87			    const u32 *message, u8 index_bytes);
     88
     89/**
     90 * release_hash_device - Releases a previously allocated hash device.
     91 * @device_data:	Structure for the hash device.
     92 *
     93 */
     94static void release_hash_device(struct hash_device_data *device_data)
     95{
     96	spin_lock(&device_data->ctx_lock);
     97	device_data->current_ctx->device = NULL;
     98	device_data->current_ctx = NULL;
     99	spin_unlock(&device_data->ctx_lock);
    100
    101	/*
    102	 * The down_interruptible part for this semaphore is called in
    103	 * cryp_get_device_data.
    104	 */
    105	up(&driver_data.device_allocation);
    106}
    107
    108static void hash_dma_setup_channel(struct hash_device_data *device_data,
    109				   struct device *dev)
    110{
    111	struct hash_platform_data *platform_data = dev->platform_data;
    112	struct dma_slave_config conf = {
    113		.direction = DMA_MEM_TO_DEV,
    114		.dst_addr = device_data->phybase + HASH_DMA_FIFO,
    115		.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
    116		.dst_maxburst = 16,
    117	};
    118
    119	dma_cap_zero(device_data->dma.mask);
    120	dma_cap_set(DMA_SLAVE, device_data->dma.mask);
    121
    122	device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
    123	device_data->dma.chan_mem2hash =
    124		dma_request_channel(device_data->dma.mask,
    125				    platform_data->dma_filter,
    126				    device_data->dma.cfg_mem2hash);
    127
    128	dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
    129
    130	init_completion(&device_data->dma.complete);
    131}
    132
    133static void hash_dma_callback(void *data)
    134{
    135	struct hash_ctx *ctx = data;
    136
    137	complete(&ctx->device->dma.complete);
    138}
    139
    140static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
    141				 int len, enum dma_data_direction direction)
    142{
    143	struct dma_async_tx_descriptor *desc = NULL;
    144	struct dma_chan *channel = NULL;
    145
    146	if (direction != DMA_TO_DEVICE) {
    147		dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
    148			__func__);
    149		return -EFAULT;
    150	}
    151
    152	sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
    153
    154	channel = ctx->device->dma.chan_mem2hash;
    155	ctx->device->dma.sg = sg;
    156	ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
    157			ctx->device->dma.sg, ctx->device->dma.nents,
    158			direction);
    159
    160	if (!ctx->device->dma.sg_len) {
    161		dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
    162			__func__);
    163		return -EFAULT;
    164	}
    165
    166	dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
    167		__func__);
    168	desc = dmaengine_prep_slave_sg(channel,
    169			ctx->device->dma.sg, ctx->device->dma.sg_len,
    170			DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
    171	if (!desc) {
    172		dev_err(ctx->device->dev,
    173			"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
    174		return -EFAULT;
    175	}
    176
    177	desc->callback = hash_dma_callback;
    178	desc->callback_param = ctx;
    179
    180	dmaengine_submit(desc);
    181	dma_async_issue_pending(channel);
    182
    183	return 0;
    184}
    185
    186static void hash_dma_done(struct hash_ctx *ctx)
    187{
    188	struct dma_chan *chan;
    189
    190	chan = ctx->device->dma.chan_mem2hash;
    191	dmaengine_terminate_all(chan);
    192	dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
    193		     ctx->device->dma.nents, DMA_TO_DEVICE);
    194}
    195
    196static int hash_dma_write(struct hash_ctx *ctx,
    197			  struct scatterlist *sg, int len)
    198{
    199	int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
    200	if (error) {
    201		dev_dbg(ctx->device->dev,
    202			"%s: hash_set_dma_transfer() failed\n", __func__);
    203		return error;
    204	}
    205
    206	return len;
    207}
    208
    209/**
    210 * get_empty_message_digest - Returns a pre-calculated digest for
    211 * the empty message.
    212 * @device_data:	Structure for the hash device.
    213 * @zero_hash:		Buffer to return the empty message digest.
    214 * @zero_hash_size:	Hash size of the empty message digest.
    215 * @zero_digest:	True if zero_digest returned.
    216 */
    217static int get_empty_message_digest(
    218		struct hash_device_data *device_data,
    219		u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
    220{
    221	int ret = 0;
    222	struct hash_ctx *ctx = device_data->current_ctx;
    223	*zero_digest = false;
    224
    225	/**
    226	 * Caller responsible for ctx != NULL.
    227	 */
    228
    229	if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
    230		if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
    231			memcpy(zero_hash, &sha1_zero_message_hash[0],
    232			       SHA1_DIGEST_SIZE);
    233			*zero_hash_size = SHA1_DIGEST_SIZE;
    234			*zero_digest = true;
    235		} else if (HASH_ALGO_SHA256 ==
    236				ctx->config.algorithm) {
    237			memcpy(zero_hash, &sha256_zero_message_hash[0],
    238			       SHA256_DIGEST_SIZE);
    239			*zero_hash_size = SHA256_DIGEST_SIZE;
    240			*zero_digest = true;
    241		} else {
    242			dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
    243				__func__);
    244			ret = -EINVAL;
    245			goto out;
    246		}
    247	} else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
    248		if (!ctx->keylen) {
    249			if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
    250				memcpy(zero_hash, &zero_message_hmac_sha1[0],
    251				       SHA1_DIGEST_SIZE);
    252				*zero_hash_size = SHA1_DIGEST_SIZE;
    253				*zero_digest = true;
    254			} else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
    255				memcpy(zero_hash, &zero_message_hmac_sha256[0],
    256				       SHA256_DIGEST_SIZE);
    257				*zero_hash_size = SHA256_DIGEST_SIZE;
    258				*zero_digest = true;
    259			} else {
    260				dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
    261					__func__);
    262				ret = -EINVAL;
    263				goto out;
    264			}
    265		} else {
    266			dev_dbg(device_data->dev,
    267				"%s: Continue hash calculation, since hmac key available\n",
    268				__func__);
    269		}
    270	}
    271out:
    272
    273	return ret;
    274}
    275
    276/**
    277 * hash_disable_power - Request to disable power and clock.
    278 * @device_data:	Structure for the hash device.
    279 * @save_device_state:	If true, saves the current hw state.
    280 *
    281 * This function request for disabling power (regulator) and clock,
    282 * and could also save current hw state.
    283 */
    284static int hash_disable_power(struct hash_device_data *device_data,
    285			      bool save_device_state)
    286{
    287	int ret = 0;
    288	struct device *dev = device_data->dev;
    289
    290	spin_lock(&device_data->power_state_lock);
    291	if (!device_data->power_state)
    292		goto out;
    293
    294	if (save_device_state) {
    295		hash_save_state(device_data,
    296				&device_data->state);
    297		device_data->restore_dev_state = true;
    298	}
    299
    300	clk_disable(device_data->clk);
    301	ret = regulator_disable(device_data->regulator);
    302	if (ret)
    303		dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
    304
    305	device_data->power_state = false;
    306
    307out:
    308	spin_unlock(&device_data->power_state_lock);
    309
    310	return ret;
    311}
    312
    313/**
    314 * hash_enable_power - Request to enable power and clock.
    315 * @device_data:		Structure for the hash device.
    316 * @restore_device_state:	If true, restores a previous saved hw state.
    317 *
    318 * This function request for enabling power (regulator) and clock,
    319 * and could also restore a previously saved hw state.
    320 */
    321static int hash_enable_power(struct hash_device_data *device_data,
    322			     bool restore_device_state)
    323{
    324	int ret = 0;
    325	struct device *dev = device_data->dev;
    326
    327	spin_lock(&device_data->power_state_lock);
    328	if (!device_data->power_state) {
    329		ret = regulator_enable(device_data->regulator);
    330		if (ret) {
    331			dev_err(dev, "%s: regulator_enable() failed!\n",
    332				__func__);
    333			goto out;
    334		}
    335		ret = clk_enable(device_data->clk);
    336		if (ret) {
    337			dev_err(dev, "%s: clk_enable() failed!\n", __func__);
    338			ret = regulator_disable(
    339					device_data->regulator);
    340			goto out;
    341		}
    342		device_data->power_state = true;
    343	}
    344
    345	if (device_data->restore_dev_state) {
    346		if (restore_device_state) {
    347			device_data->restore_dev_state = false;
    348			hash_resume_state(device_data, &device_data->state);
    349		}
    350	}
    351out:
    352	spin_unlock(&device_data->power_state_lock);
    353
    354	return ret;
    355}
    356
    357/**
    358 * hash_get_device_data - Checks for an available hash device and return it.
    359 * @ctx:		Structure for the hash context.
    360 * @device_data:	Structure for the hash device.
    361 *
    362 * This function check for an available hash device and return it to
    363 * the caller.
    364 * Note! Caller need to release the device, calling up().
    365 */
    366static int hash_get_device_data(struct hash_ctx *ctx,
    367				struct hash_device_data **device_data)
    368{
    369	int			ret;
    370	struct klist_iter	device_iterator;
    371	struct klist_node	*device_node;
    372	struct hash_device_data *local_device_data = NULL;
    373
    374	/* Wait until a device is available */
    375	ret = down_interruptible(&driver_data.device_allocation);
    376	if (ret)
    377		return ret;  /* Interrupted */
    378
    379	/* Select a device */
    380	klist_iter_init(&driver_data.device_list, &device_iterator);
    381	device_node = klist_next(&device_iterator);
    382	while (device_node) {
    383		local_device_data = container_of(device_node,
    384					   struct hash_device_data, list_node);
    385		spin_lock(&local_device_data->ctx_lock);
    386		/* current_ctx allocates a device, NULL = unallocated */
    387		if (local_device_data->current_ctx) {
    388			device_node = klist_next(&device_iterator);
    389		} else {
    390			local_device_data->current_ctx = ctx;
    391			ctx->device = local_device_data;
    392			spin_unlock(&local_device_data->ctx_lock);
    393			break;
    394		}
    395		spin_unlock(&local_device_data->ctx_lock);
    396	}
    397	klist_iter_exit(&device_iterator);
    398
    399	if (!device_node) {
    400		/**
    401		 * No free device found.
    402		 * Since we allocated a device with down_interruptible, this
    403		 * should not be able to happen.
    404		 * Number of available devices, which are contained in
    405		 * device_allocation, is therefore decremented by not doing
    406		 * an up(device_allocation).
    407		 */
    408		return -EBUSY;
    409	}
    410
    411	*device_data = local_device_data;
    412
    413	return 0;
    414}
    415
    416/**
    417 * hash_hw_write_key - Writes the key to the hardware registries.
    418 *
    419 * @device_data:	Structure for the hash device.
    420 * @key:		Key to be written.
    421 * @keylen:		The lengt of the key.
    422 *
    423 * Note! This function DOES NOT write to the NBLW registry, even though
    424 * specified in the the hw design spec. Either due to incorrect info in the
    425 * spec or due to a bug in the hw.
    426 */
    427static void hash_hw_write_key(struct hash_device_data *device_data,
    428			      const u8 *key, unsigned int keylen)
    429{
    430	u32 word = 0;
    431	int nwords = 1;
    432
    433	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
    434
    435	while (keylen >= 4) {
    436		u32 *key_word = (u32 *)key;
    437
    438		HASH_SET_DIN(key_word, nwords);
    439		keylen -= 4;
    440		key += 4;
    441	}
    442
    443	/* Take care of the remaining bytes in the last word */
    444	if (keylen) {
    445		word = 0;
    446		while (keylen) {
    447			word |= (key[keylen - 1] << (8 * (keylen - 1)));
    448			keylen--;
    449		}
    450
    451		HASH_SET_DIN(&word, nwords);
    452	}
    453
    454	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    455		cpu_relax();
    456
    457	HASH_SET_DCAL;
    458
    459	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    460		cpu_relax();
    461}
    462
    463/**
    464 * init_hash_hw - Initialise the hash hardware for a new calculation.
    465 * @device_data:	Structure for the hash device.
    466 * @ctx:		The hash context.
    467 *
    468 * This function will enable the bits needed to clear and start a new
    469 * calculation.
    470 */
    471static int init_hash_hw(struct hash_device_data *device_data,
    472			struct hash_ctx *ctx)
    473{
    474	int ret = 0;
    475
    476	ret = hash_setconfiguration(device_data, &ctx->config);
    477	if (ret) {
    478		dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
    479			__func__);
    480		return ret;
    481	}
    482
    483	hash_begin(device_data, ctx);
    484
    485	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
    486		hash_hw_write_key(device_data, ctx->key, ctx->keylen);
    487
    488	return ret;
    489}
    490
    491/**
    492 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
    493 *
    494 * @sg:		Scatterlist.
    495 * @size:	Size in bytes.
    496 * @aligned:	True if sg data aligned to work in DMA mode.
    497 *
    498 */
    499static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
    500{
    501	int nents = 0;
    502	bool aligned_data = true;
    503
    504	while (size > 0 && sg) {
    505		nents++;
    506		size -= sg->length;
    507
    508		/* hash_set_dma_transfer will align last nent */
    509		if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
    510		    (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
    511			aligned_data = false;
    512
    513		sg = sg_next(sg);
    514	}
    515
    516	if (aligned)
    517		*aligned = aligned_data;
    518
    519	if (size != 0)
    520		return -EFAULT;
    521
    522	return nents;
    523}
    524
    525/**
    526 * hash_dma_valid_data - checks for dma valid sg data.
    527 * @sg:		Scatterlist.
    528 * @datasize:	Datasize in bytes.
    529 *
    530 * NOTE! This function checks for dma valid sg data, since dma
    531 * only accept datasizes of even wordsize.
    532 */
    533static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
    534{
    535	bool aligned;
    536
    537	/* Need to include at least one nent, else error */
    538	if (hash_get_nents(sg, datasize, &aligned) < 1)
    539		return false;
    540
    541	return aligned;
    542}
    543
    544/**
    545 * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
    546 * @req: The hash request for the job.
    547 *
    548 * Initialize structures.
    549 */
    550static int ux500_hash_init(struct ahash_request *req)
    551{
    552	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    553	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
    554	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
    555
    556	if (!ctx->key)
    557		ctx->keylen = 0;
    558
    559	memset(&req_ctx->state, 0, sizeof(struct hash_state));
    560	req_ctx->updated = 0;
    561	if (hash_mode == HASH_MODE_DMA) {
    562		if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
    563			req_ctx->dma_mode = false; /* Don't use DMA */
    564
    565			pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
    566				 __func__, HASH_DMA_ALIGN_SIZE);
    567		} else {
    568			if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
    569			    hash_dma_valid_data(req->src, req->nbytes)) {
    570				req_ctx->dma_mode = true;
    571			} else {
    572				req_ctx->dma_mode = false;
    573				pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
    574					 __func__,
    575					 HASH_DMA_PERFORMANCE_MIN_SIZE);
    576			}
    577		}
    578	}
    579	return 0;
    580}
    581
    582/**
    583 * hash_processblock - This function processes a single block of 512 bits (64
    584 *                     bytes), word aligned, starting at message.
    585 * @device_data:	Structure for the hash device.
    586 * @message:		Block (512 bits) of message to be written to
    587 *			the HASH hardware.
    588 * @length:		Message length
    589 *
    590 */
    591static void hash_processblock(struct hash_device_data *device_data,
    592			      const u32 *message, int length)
    593{
    594	int len = length / HASH_BYTES_PER_WORD;
    595	/*
    596	 * NBLW bits. Reset the number of bits in last word (NBLW).
    597	 */
    598	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
    599
    600	/*
    601	 * Write message data to the HASH_DIN register.
    602	 */
    603	HASH_SET_DIN(message, len);
    604}
    605
    606/**
    607 * hash_messagepad - Pads a message and write the nblw bits.
    608 * @device_data:	Structure for the hash device.
    609 * @message:		Last word of a message.
    610 * @index_bytes:	The number of bytes in the last message.
    611 *
    612 * This function manages the final part of the digest calculation, when less
    613 * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
    614 *
    615 */
    616static void hash_messagepad(struct hash_device_data *device_data,
    617			    const u32 *message, u8 index_bytes)
    618{
    619	int nwords = 1;
    620
    621	/*
    622	 * Clear hash str register, only clear NBLW
    623	 * since DCAL will be reset by hardware.
    624	 */
    625	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
    626
    627	/* Main loop */
    628	while (index_bytes >= 4) {
    629		HASH_SET_DIN(message, nwords);
    630		index_bytes -= 4;
    631		message++;
    632	}
    633
    634	if (index_bytes)
    635		HASH_SET_DIN(message, nwords);
    636
    637	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    638		cpu_relax();
    639
    640	/* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
    641	HASH_SET_NBLW(index_bytes * 8);
    642	dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
    643		__func__, readl_relaxed(&device_data->base->din),
    644		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
    645	HASH_SET_DCAL;
    646	dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
    647		__func__, readl_relaxed(&device_data->base->din),
    648		readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
    649
    650	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    651		cpu_relax();
    652}
    653
    654/**
    655 * hash_incrementlength - Increments the length of the current message.
    656 * @ctx: Hash context
    657 * @incr: Length of message processed already
    658 *
    659 * Overflow cannot occur, because conditions for overflow are checked in
    660 * hash_hw_update.
    661 */
    662static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
    663{
    664	ctx->state.length.low_word += incr;
    665
    666	/* Check for wrap-around */
    667	if (ctx->state.length.low_word < incr)
    668		ctx->state.length.high_word++;
    669}
    670
    671/**
    672 * hash_setconfiguration - Sets the required configuration for the hash
    673 *                         hardware.
    674 * @device_data:	Structure for the hash device.
    675 * @config:		Pointer to a configuration structure.
    676 */
    677int hash_setconfiguration(struct hash_device_data *device_data,
    678			  struct hash_config *config)
    679{
    680	int ret = 0;
    681
    682	if (config->algorithm != HASH_ALGO_SHA1 &&
    683	    config->algorithm != HASH_ALGO_SHA256)
    684		return -EPERM;
    685
    686	/*
    687	 * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
    688	 * to be written to HASH_DIN is considered as 32 bits.
    689	 */
    690	HASH_SET_DATA_FORMAT(config->data_format);
    691
    692	/*
    693	 * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
    694	 */
    695	switch (config->algorithm) {
    696	case HASH_ALGO_SHA1:
    697		HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
    698		break;
    699
    700	case HASH_ALGO_SHA256:
    701		HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
    702		break;
    703
    704	default:
    705		dev_err(device_data->dev, "%s: Incorrect algorithm\n",
    706			__func__);
    707		return -EPERM;
    708	}
    709
    710	/*
    711	 * MODE bit. This bit selects between HASH or HMAC mode for the
    712	 * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
    713	 */
    714	if (HASH_OPER_MODE_HASH == config->oper_mode)
    715		HASH_CLEAR_BITS(&device_data->base->cr,
    716				HASH_CR_MODE_MASK);
    717	else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
    718		HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
    719		if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
    720			/* Truncate key to blocksize */
    721			dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
    722			HASH_SET_BITS(&device_data->base->cr,
    723				      HASH_CR_LKEY_MASK);
    724		} else {
    725			dev_dbg(device_data->dev, "%s: LKEY cleared\n",
    726				__func__);
    727			HASH_CLEAR_BITS(&device_data->base->cr,
    728					HASH_CR_LKEY_MASK);
    729		}
    730	} else {	/* Wrong hash mode */
    731		ret = -EPERM;
    732		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
    733			__func__);
    734	}
    735	return ret;
    736}
    737
    738/**
    739 * hash_begin - This routine resets some globals and initializes the hash
    740 *              hardware.
    741 * @device_data:	Structure for the hash device.
    742 * @ctx:		Hash context.
    743 */
    744void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
    745{
    746	/* HW and SW initializations */
    747	/* Note: there is no need to initialize buffer and digest members */
    748
    749	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    750		cpu_relax();
    751
    752	/*
    753	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
    754	 * prepare the initialize the HASH accelerator to compute the message
    755	 * digest of a new message.
    756	 */
    757	HASH_INITIALIZE;
    758
    759	/*
    760	 * NBLW bits. Reset the number of bits in last word (NBLW).
    761	 */
    762	HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
    763}
    764
    765static int hash_process_data(struct hash_device_data *device_data,
    766			     struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
    767			     int msg_length, u8 *data_buffer, u8 *buffer,
    768			     u8 *index)
    769{
    770	int ret = 0;
    771	u32 count;
    772
    773	do {
    774		if ((*index + msg_length) < HASH_BLOCK_SIZE) {
    775			for (count = 0; count < msg_length; count++) {
    776				buffer[*index + count] =
    777					*(data_buffer + count);
    778			}
    779			*index += msg_length;
    780			msg_length = 0;
    781		} else {
    782			if (req_ctx->updated) {
    783				ret = hash_resume_state(device_data,
    784						&device_data->state);
    785				memmove(req_ctx->state.buffer,
    786					device_data->state.buffer,
    787					HASH_BLOCK_SIZE);
    788				if (ret) {
    789					dev_err(device_data->dev,
    790						"%s: hash_resume_state() failed!\n",
    791						__func__);
    792					goto out;
    793				}
    794			} else {
    795				ret = init_hash_hw(device_data, ctx);
    796				if (ret) {
    797					dev_err(device_data->dev,
    798						"%s: init_hash_hw() failed!\n",
    799						__func__);
    800					goto out;
    801				}
    802				req_ctx->updated = 1;
    803			}
    804			/*
    805			 * If 'data_buffer' is four byte aligned and
    806			 * local buffer does not have any data, we can
    807			 * write data directly from 'data_buffer' to
    808			 * HW peripheral, otherwise we first copy data
    809			 * to a local buffer
    810			 */
    811			if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
    812			    (0 == *index))
    813				hash_processblock(device_data,
    814						  (const u32 *)data_buffer,
    815						  HASH_BLOCK_SIZE);
    816			else {
    817				for (count = 0;
    818				     count < (u32)(HASH_BLOCK_SIZE - *index);
    819				     count++) {
    820					buffer[*index + count] =
    821						*(data_buffer + count);
    822				}
    823				hash_processblock(device_data,
    824						  (const u32 *)buffer,
    825						  HASH_BLOCK_SIZE);
    826			}
    827			hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
    828			data_buffer += (HASH_BLOCK_SIZE - *index);
    829
    830			msg_length -= (HASH_BLOCK_SIZE - *index);
    831			*index = 0;
    832
    833			ret = hash_save_state(device_data,
    834					&device_data->state);
    835
    836			memmove(device_data->state.buffer,
    837				req_ctx->state.buffer,
    838				HASH_BLOCK_SIZE);
    839			if (ret) {
    840				dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
    841					__func__);
    842				goto out;
    843			}
    844		}
    845	} while (msg_length != 0);
    846out:
    847
    848	return ret;
    849}
    850
    851/**
    852 * hash_dma_final - The hash dma final function for SHA1/SHA256.
    853 * @req:	The hash request for the job.
    854 */
    855static int hash_dma_final(struct ahash_request *req)
    856{
    857	int ret = 0;
    858	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    859	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
    860	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
    861	struct hash_device_data *device_data;
    862	u8 digest[SHA256_DIGEST_SIZE];
    863	int bytes_written = 0;
    864
    865	ret = hash_get_device_data(ctx, &device_data);
    866	if (ret)
    867		return ret;
    868
    869	dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
    870		(unsigned long)ctx);
    871
    872	if (req_ctx->updated) {
    873		ret = hash_resume_state(device_data, &device_data->state);
    874
    875		if (ret) {
    876			dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
    877				__func__);
    878			goto out;
    879		}
    880	} else {
    881		ret = hash_setconfiguration(device_data, &ctx->config);
    882		if (ret) {
    883			dev_err(device_data->dev,
    884				"%s: hash_setconfiguration() failed!\n",
    885				__func__);
    886			goto out;
    887		}
    888
    889		/* Enable DMA input */
    890		if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
    891			HASH_CLEAR_BITS(&device_data->base->cr,
    892					HASH_CR_DMAE_MASK);
    893		} else {
    894			HASH_SET_BITS(&device_data->base->cr,
    895				      HASH_CR_DMAE_MASK);
    896			HASH_SET_BITS(&device_data->base->cr,
    897				      HASH_CR_PRIVN_MASK);
    898		}
    899
    900		HASH_INITIALIZE;
    901
    902		if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
    903			hash_hw_write_key(device_data, ctx->key, ctx->keylen);
    904
    905		/* Number of bits in last word = (nbytes * 8) % 32 */
    906		HASH_SET_NBLW((req->nbytes * 8) % 32);
    907		req_ctx->updated = 1;
    908	}
    909
    910	/* Store the nents in the dma struct. */
    911	ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
    912	if (!ctx->device->dma.nents) {
    913		dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
    914			__func__);
    915		ret = ctx->device->dma.nents;
    916		goto out;
    917	}
    918
    919	bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
    920	if (bytes_written != req->nbytes) {
    921		dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
    922			__func__);
    923		ret = bytes_written;
    924		goto out;
    925	}
    926
    927	wait_for_completion(&ctx->device->dma.complete);
    928	hash_dma_done(ctx);
    929
    930	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
    931		cpu_relax();
    932
    933	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
    934		unsigned int keylen = ctx->keylen;
    935		u8 *key = ctx->key;
    936
    937		dev_dbg(device_data->dev, "%s: keylen: %d\n",
    938			__func__, ctx->keylen);
    939		hash_hw_write_key(device_data, key, keylen);
    940	}
    941
    942	hash_get_digest(device_data, digest, ctx->config.algorithm);
    943	memcpy(req->result, digest, ctx->digestsize);
    944
    945out:
    946	release_hash_device(device_data);
    947
    948	/**
    949	 * Allocated in setkey, and only used in HMAC.
    950	 */
    951	kfree(ctx->key);
    952
    953	return ret;
    954}
    955
    956/**
    957 * hash_hw_final - The final hash calculation function
    958 * @req:	The hash request for the job.
    959 */
    960static int hash_hw_final(struct ahash_request *req)
    961{
    962	int ret = 0;
    963	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
    964	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
    965	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
    966	struct hash_device_data *device_data;
    967	u8 digest[SHA256_DIGEST_SIZE];
    968
    969	ret = hash_get_device_data(ctx, &device_data);
    970	if (ret)
    971		return ret;
    972
    973	dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
    974		(unsigned long)ctx);
    975
    976	if (req_ctx->updated) {
    977		ret = hash_resume_state(device_data, &device_data->state);
    978
    979		if (ret) {
    980			dev_err(device_data->dev,
    981				"%s: hash_resume_state() failed!\n", __func__);
    982			goto out;
    983		}
    984	} else if (req->nbytes == 0 && ctx->keylen == 0) {
    985		u8 zero_hash[SHA256_DIGEST_SIZE];
    986		u32 zero_hash_size = 0;
    987		bool zero_digest = false;
    988		/**
    989		 * Use a pre-calculated empty message digest
    990		 * (workaround since hw return zeroes, hw bug!?)
    991		 */
    992		ret = get_empty_message_digest(device_data, &zero_hash[0],
    993				&zero_hash_size, &zero_digest);
    994		if (!ret && likely(zero_hash_size == ctx->digestsize) &&
    995		    zero_digest) {
    996			memcpy(req->result, &zero_hash[0], ctx->digestsize);
    997			goto out;
    998		} else if (!ret && !zero_digest) {
    999			dev_dbg(device_data->dev,
   1000				"%s: HMAC zero msg with key, continue...\n",
   1001				__func__);
   1002		} else {
   1003			dev_err(device_data->dev,
   1004				"%s: ret=%d, or wrong digest size? %s\n",
   1005				__func__, ret,
   1006				zero_hash_size == ctx->digestsize ?
   1007				"true" : "false");
   1008			/* Return error */
   1009			goto out;
   1010		}
   1011	} else if (req->nbytes == 0 && ctx->keylen > 0) {
   1012		ret = -EPERM;
   1013		dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
   1014			__func__);
   1015		goto out;
   1016	}
   1017
   1018	if (!req_ctx->updated) {
   1019		ret = init_hash_hw(device_data, ctx);
   1020		if (ret) {
   1021			dev_err(device_data->dev,
   1022				"%s: init_hash_hw() failed!\n", __func__);
   1023			goto out;
   1024		}
   1025	}
   1026
   1027	if (req_ctx->state.index) {
   1028		hash_messagepad(device_data, req_ctx->state.buffer,
   1029				req_ctx->state.index);
   1030	} else {
   1031		HASH_SET_DCAL;
   1032		while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
   1033			cpu_relax();
   1034	}
   1035
   1036	if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
   1037		unsigned int keylen = ctx->keylen;
   1038		u8 *key = ctx->key;
   1039
   1040		dev_dbg(device_data->dev, "%s: keylen: %d\n",
   1041			__func__, ctx->keylen);
   1042		hash_hw_write_key(device_data, key, keylen);
   1043	}
   1044
   1045	hash_get_digest(device_data, digest, ctx->config.algorithm);
   1046	memcpy(req->result, digest, ctx->digestsize);
   1047
   1048out:
   1049	release_hash_device(device_data);
   1050
   1051	/**
   1052	 * Allocated in setkey, and only used in HMAC.
   1053	 */
   1054	kfree(ctx->key);
   1055
   1056	return ret;
   1057}
   1058
   1059/**
   1060 * hash_hw_update - Updates current HASH computation hashing another part of
   1061 *                  the message.
   1062 * @req:	Byte array containing the message to be hashed (caller
   1063 *		allocated).
   1064 */
   1065int hash_hw_update(struct ahash_request *req)
   1066{
   1067	int ret = 0;
   1068	u8 index = 0;
   1069	u8 *buffer;
   1070	struct hash_device_data *device_data;
   1071	u8 *data_buffer;
   1072	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1073	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1074	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
   1075	struct crypto_hash_walk walk;
   1076	int msg_length;
   1077
   1078	index = req_ctx->state.index;
   1079	buffer = (u8 *)req_ctx->state.buffer;
   1080
   1081	ret = hash_get_device_data(ctx, &device_data);
   1082	if (ret)
   1083		return ret;
   1084
   1085	msg_length = crypto_hash_walk_first(req, &walk);
   1086
   1087	/* Empty message ("") is correct indata */
   1088	if (msg_length == 0) {
   1089		ret = 0;
   1090		goto release_dev;
   1091	}
   1092
   1093	/* Check if ctx->state.length + msg_length
   1094	   overflows */
   1095	if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
   1096	    HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
   1097		pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
   1098		ret = crypto_hash_walk_done(&walk, -EPERM);
   1099		goto release_dev;
   1100	}
   1101
   1102	/* Main loop */
   1103	while (0 != msg_length) {
   1104		data_buffer = walk.data;
   1105		ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
   1106				data_buffer, buffer, &index);
   1107
   1108		if (ret) {
   1109			dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
   1110				__func__);
   1111			crypto_hash_walk_done(&walk, ret);
   1112			goto release_dev;
   1113		}
   1114
   1115		msg_length = crypto_hash_walk_done(&walk, 0);
   1116	}
   1117
   1118	req_ctx->state.index = index;
   1119	dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
   1120		__func__, req_ctx->state.index, req_ctx->state.bit_index);
   1121
   1122release_dev:
   1123	release_hash_device(device_data);
   1124
   1125	return ret;
   1126}
   1127
   1128/**
   1129 * hash_resume_state - Function that resumes the state of an calculation.
   1130 * @device_data:	Pointer to the device structure.
   1131 * @device_state:	The state to be restored in the hash hardware
   1132 */
   1133int hash_resume_state(struct hash_device_data *device_data,
   1134		      const struct hash_state *device_state)
   1135{
   1136	u32 temp_cr;
   1137	s32 count;
   1138	int hash_mode = HASH_OPER_MODE_HASH;
   1139
   1140	if (NULL == device_state) {
   1141		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
   1142			__func__);
   1143		return -EPERM;
   1144	}
   1145
   1146	/* Check correctness of index and length members */
   1147	if (device_state->index > HASH_BLOCK_SIZE ||
   1148	    (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
   1149		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
   1150			__func__);
   1151		return -EPERM;
   1152	}
   1153
   1154	/*
   1155	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
   1156	 * prepare the initialize the HASH accelerator to compute the message
   1157	 * digest of a new message.
   1158	 */
   1159	HASH_INITIALIZE;
   1160
   1161	temp_cr = device_state->temp_cr;
   1162	writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
   1163
   1164	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
   1165		hash_mode = HASH_OPER_MODE_HMAC;
   1166	else
   1167		hash_mode = HASH_OPER_MODE_HASH;
   1168
   1169	for (count = 0; count < HASH_CSR_COUNT; count++) {
   1170		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
   1171			break;
   1172
   1173		writel_relaxed(device_state->csr[count],
   1174			       &device_data->base->csrx[count]);
   1175	}
   1176
   1177	writel_relaxed(device_state->csfull, &device_data->base->csfull);
   1178	writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
   1179
   1180	writel_relaxed(device_state->str_reg, &device_data->base->str);
   1181	writel_relaxed(temp_cr, &device_data->base->cr);
   1182
   1183	return 0;
   1184}
   1185
   1186/**
   1187 * hash_save_state - Function that saves the state of hardware.
   1188 * @device_data:	Pointer to the device structure.
   1189 * @device_state:	The strucure where the hardware state should be saved.
   1190 */
   1191int hash_save_state(struct hash_device_data *device_data,
   1192		    struct hash_state *device_state)
   1193{
   1194	u32 temp_cr;
   1195	u32 count;
   1196	int hash_mode = HASH_OPER_MODE_HASH;
   1197
   1198	if (NULL == device_state) {
   1199		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
   1200			__func__);
   1201		return -ENOTSUPP;
   1202	}
   1203
   1204	/* Write dummy value to force digest intermediate calculation. This
   1205	 * actually makes sure that there isn't any ongoing calculation in the
   1206	 * hardware.
   1207	 */
   1208	while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
   1209		cpu_relax();
   1210
   1211	temp_cr = readl_relaxed(&device_data->base->cr);
   1212
   1213	device_state->str_reg = readl_relaxed(&device_data->base->str);
   1214
   1215	device_state->din_reg = readl_relaxed(&device_data->base->din);
   1216
   1217	if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
   1218		hash_mode = HASH_OPER_MODE_HMAC;
   1219	else
   1220		hash_mode = HASH_OPER_MODE_HASH;
   1221
   1222	for (count = 0; count < HASH_CSR_COUNT; count++) {
   1223		if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
   1224			break;
   1225
   1226		device_state->csr[count] =
   1227			readl_relaxed(&device_data->base->csrx[count]);
   1228	}
   1229
   1230	device_state->csfull = readl_relaxed(&device_data->base->csfull);
   1231	device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
   1232
   1233	device_state->temp_cr = temp_cr;
   1234
   1235	return 0;
   1236}
   1237
   1238/**
   1239 * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
   1240 * @device_data:
   1241 *
   1242 */
   1243int hash_check_hw(struct hash_device_data *device_data)
   1244{
   1245	/* Checking Peripheral Ids  */
   1246	if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
   1247	    HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
   1248	    HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
   1249	    HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
   1250	    HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
   1251	    HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
   1252	    HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
   1253	    HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
   1254		return 0;
   1255	}
   1256
   1257	dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
   1258	return -ENOTSUPP;
   1259}
   1260
   1261/**
   1262 * hash_get_digest - Gets the digest.
   1263 * @device_data:	Pointer to the device structure.
   1264 * @digest:		User allocated byte array for the calculated digest.
   1265 * @algorithm:		The algorithm in use.
   1266 */
   1267void hash_get_digest(struct hash_device_data *device_data,
   1268		     u8 *digest, int algorithm)
   1269{
   1270	u32 temp_hx_val, count;
   1271	int loop_ctr;
   1272
   1273	if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
   1274		dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
   1275			__func__, algorithm);
   1276		return;
   1277	}
   1278
   1279	if (algorithm == HASH_ALGO_SHA1)
   1280		loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
   1281	else
   1282		loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
   1283
   1284	dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
   1285		__func__, (unsigned long)digest);
   1286
   1287	/* Copy result into digest array */
   1288	for (count = 0; count < loop_ctr; count++) {
   1289		temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
   1290		digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
   1291		digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
   1292		digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
   1293		digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
   1294	}
   1295}
   1296
   1297/**
   1298 * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
   1299 * @req: The hash request for the job.
   1300 */
   1301static int ahash_update(struct ahash_request *req)
   1302{
   1303	int ret = 0;
   1304	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
   1305
   1306	if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
   1307		ret = hash_hw_update(req);
   1308	/* Skip update for DMA, all data will be passed to DMA in final */
   1309
   1310	if (ret) {
   1311		pr_err("%s: hash_hw_update() failed!\n", __func__);
   1312	}
   1313
   1314	return ret;
   1315}
   1316
   1317/**
   1318 * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
   1319 * @req:	The hash request for the job.
   1320 */
   1321static int ahash_final(struct ahash_request *req)
   1322{
   1323	int ret = 0;
   1324	struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
   1325
   1326	pr_debug("%s: data size: %d\n", __func__, req->nbytes);
   1327
   1328	if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
   1329		ret = hash_dma_final(req);
   1330	else
   1331		ret = hash_hw_final(req);
   1332
   1333	if (ret) {
   1334		pr_err("%s: hash_hw/dma_final() failed\n", __func__);
   1335	}
   1336
   1337	return ret;
   1338}
   1339
   1340static int hash_setkey(struct crypto_ahash *tfm,
   1341		       const u8 *key, unsigned int keylen, int alg)
   1342{
   1343	int ret = 0;
   1344	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1345
   1346	/**
   1347	 * Freed in final.
   1348	 */
   1349	ctx->key = kmemdup(key, keylen, GFP_KERNEL);
   1350	if (!ctx->key) {
   1351		pr_err("%s: Failed to allocate ctx->key for %d\n",
   1352		       __func__, alg);
   1353		return -ENOMEM;
   1354	}
   1355	ctx->keylen = keylen;
   1356
   1357	return ret;
   1358}
   1359
   1360static int ahash_sha1_init(struct ahash_request *req)
   1361{
   1362	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1363	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1364
   1365	ctx->config.data_format = HASH_DATA_8_BITS;
   1366	ctx->config.algorithm = HASH_ALGO_SHA1;
   1367	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
   1368	ctx->digestsize = SHA1_DIGEST_SIZE;
   1369
   1370	return ux500_hash_init(req);
   1371}
   1372
   1373static int ahash_sha256_init(struct ahash_request *req)
   1374{
   1375	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1376	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1377
   1378	ctx->config.data_format = HASH_DATA_8_BITS;
   1379	ctx->config.algorithm = HASH_ALGO_SHA256;
   1380	ctx->config.oper_mode = HASH_OPER_MODE_HASH;
   1381	ctx->digestsize = SHA256_DIGEST_SIZE;
   1382
   1383	return ux500_hash_init(req);
   1384}
   1385
   1386static int ahash_sha1_digest(struct ahash_request *req)
   1387{
   1388	int ret2, ret1;
   1389
   1390	ret1 = ahash_sha1_init(req);
   1391	if (ret1)
   1392		goto out;
   1393
   1394	ret1 = ahash_update(req);
   1395	ret2 = ahash_final(req);
   1396
   1397out:
   1398	return ret1 ? ret1 : ret2;
   1399}
   1400
   1401static int ahash_sha256_digest(struct ahash_request *req)
   1402{
   1403	int ret2, ret1;
   1404
   1405	ret1 = ahash_sha256_init(req);
   1406	if (ret1)
   1407		goto out;
   1408
   1409	ret1 = ahash_update(req);
   1410	ret2 = ahash_final(req);
   1411
   1412out:
   1413	return ret1 ? ret1 : ret2;
   1414}
   1415
   1416static int ahash_noimport(struct ahash_request *req, const void *in)
   1417{
   1418	return -ENOSYS;
   1419}
   1420
   1421static int ahash_noexport(struct ahash_request *req, void *out)
   1422{
   1423	return -ENOSYS;
   1424}
   1425
   1426static int hmac_sha1_init(struct ahash_request *req)
   1427{
   1428	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1429	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1430
   1431	ctx->config.data_format	= HASH_DATA_8_BITS;
   1432	ctx->config.algorithm	= HASH_ALGO_SHA1;
   1433	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
   1434	ctx->digestsize		= SHA1_DIGEST_SIZE;
   1435
   1436	return ux500_hash_init(req);
   1437}
   1438
   1439static int hmac_sha256_init(struct ahash_request *req)
   1440{
   1441	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   1442	struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
   1443
   1444	ctx->config.data_format	= HASH_DATA_8_BITS;
   1445	ctx->config.algorithm	= HASH_ALGO_SHA256;
   1446	ctx->config.oper_mode	= HASH_OPER_MODE_HMAC;
   1447	ctx->digestsize		= SHA256_DIGEST_SIZE;
   1448
   1449	return ux500_hash_init(req);
   1450}
   1451
   1452static int hmac_sha1_digest(struct ahash_request *req)
   1453{
   1454	int ret2, ret1;
   1455
   1456	ret1 = hmac_sha1_init(req);
   1457	if (ret1)
   1458		goto out;
   1459
   1460	ret1 = ahash_update(req);
   1461	ret2 = ahash_final(req);
   1462
   1463out:
   1464	return ret1 ? ret1 : ret2;
   1465}
   1466
   1467static int hmac_sha256_digest(struct ahash_request *req)
   1468{
   1469	int ret2, ret1;
   1470
   1471	ret1 = hmac_sha256_init(req);
   1472	if (ret1)
   1473		goto out;
   1474
   1475	ret1 = ahash_update(req);
   1476	ret2 = ahash_final(req);
   1477
   1478out:
   1479	return ret1 ? ret1 : ret2;
   1480}
   1481
   1482static int hmac_sha1_setkey(struct crypto_ahash *tfm,
   1483			    const u8 *key, unsigned int keylen)
   1484{
   1485	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
   1486}
   1487
   1488static int hmac_sha256_setkey(struct crypto_ahash *tfm,
   1489			      const u8 *key, unsigned int keylen)
   1490{
   1491	return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
   1492}
   1493
   1494struct hash_algo_template {
   1495	struct hash_config conf;
   1496	struct ahash_alg hash;
   1497};
   1498
   1499static int hash_cra_init(struct crypto_tfm *tfm)
   1500{
   1501	struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
   1502	struct crypto_alg *alg = tfm->__crt_alg;
   1503	struct hash_algo_template *hash_alg;
   1504
   1505	hash_alg = container_of(__crypto_ahash_alg(alg),
   1506			struct hash_algo_template,
   1507			hash);
   1508
   1509	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
   1510				 sizeof(struct hash_req_ctx));
   1511
   1512	ctx->config.data_format = HASH_DATA_8_BITS;
   1513	ctx->config.algorithm = hash_alg->conf.algorithm;
   1514	ctx->config.oper_mode = hash_alg->conf.oper_mode;
   1515
   1516	ctx->digestsize = hash_alg->hash.halg.digestsize;
   1517
   1518	return 0;
   1519}
   1520
   1521static struct hash_algo_template hash_algs[] = {
   1522	{
   1523		.conf.algorithm = HASH_ALGO_SHA1,
   1524		.conf.oper_mode = HASH_OPER_MODE_HASH,
   1525		.hash = {
   1526			.init = ux500_hash_init,
   1527			.update = ahash_update,
   1528			.final = ahash_final,
   1529			.digest = ahash_sha1_digest,
   1530			.export = ahash_noexport,
   1531			.import = ahash_noimport,
   1532			.halg.digestsize = SHA1_DIGEST_SIZE,
   1533			.halg.statesize = sizeof(struct hash_ctx),
   1534			.halg.base = {
   1535				.cra_name = "sha1",
   1536				.cra_driver_name = "sha1-ux500",
   1537				.cra_flags = CRYPTO_ALG_ASYNC,
   1538				.cra_blocksize = SHA1_BLOCK_SIZE,
   1539				.cra_ctxsize = sizeof(struct hash_ctx),
   1540				.cra_init = hash_cra_init,
   1541				.cra_module = THIS_MODULE,
   1542			}
   1543		}
   1544	},
   1545	{
   1546		.conf.algorithm	= HASH_ALGO_SHA256,
   1547		.conf.oper_mode	= HASH_OPER_MODE_HASH,
   1548		.hash = {
   1549			.init = ux500_hash_init,
   1550			.update	= ahash_update,
   1551			.final = ahash_final,
   1552			.digest = ahash_sha256_digest,
   1553			.export = ahash_noexport,
   1554			.import = ahash_noimport,
   1555			.halg.digestsize = SHA256_DIGEST_SIZE,
   1556			.halg.statesize = sizeof(struct hash_ctx),
   1557			.halg.base = {
   1558				.cra_name = "sha256",
   1559				.cra_driver_name = "sha256-ux500",
   1560				.cra_flags = CRYPTO_ALG_ASYNC,
   1561				.cra_blocksize = SHA256_BLOCK_SIZE,
   1562				.cra_ctxsize = sizeof(struct hash_ctx),
   1563				.cra_init = hash_cra_init,
   1564				.cra_module = THIS_MODULE,
   1565			}
   1566		}
   1567	},
   1568	{
   1569		.conf.algorithm = HASH_ALGO_SHA1,
   1570		.conf.oper_mode = HASH_OPER_MODE_HMAC,
   1571			.hash = {
   1572			.init = ux500_hash_init,
   1573			.update = ahash_update,
   1574			.final = ahash_final,
   1575			.digest = hmac_sha1_digest,
   1576			.setkey = hmac_sha1_setkey,
   1577			.export = ahash_noexport,
   1578			.import = ahash_noimport,
   1579			.halg.digestsize = SHA1_DIGEST_SIZE,
   1580			.halg.statesize = sizeof(struct hash_ctx),
   1581			.halg.base = {
   1582				.cra_name = "hmac(sha1)",
   1583				.cra_driver_name = "hmac-sha1-ux500",
   1584				.cra_flags = CRYPTO_ALG_ASYNC,
   1585				.cra_blocksize = SHA1_BLOCK_SIZE,
   1586				.cra_ctxsize = sizeof(struct hash_ctx),
   1587				.cra_init = hash_cra_init,
   1588				.cra_module = THIS_MODULE,
   1589			}
   1590		}
   1591	},
   1592	{
   1593		.conf.algorithm = HASH_ALGO_SHA256,
   1594		.conf.oper_mode = HASH_OPER_MODE_HMAC,
   1595		.hash = {
   1596			.init = ux500_hash_init,
   1597			.update = ahash_update,
   1598			.final = ahash_final,
   1599			.digest = hmac_sha256_digest,
   1600			.setkey = hmac_sha256_setkey,
   1601			.export = ahash_noexport,
   1602			.import = ahash_noimport,
   1603			.halg.digestsize = SHA256_DIGEST_SIZE,
   1604			.halg.statesize = sizeof(struct hash_ctx),
   1605			.halg.base = {
   1606				.cra_name = "hmac(sha256)",
   1607				.cra_driver_name = "hmac-sha256-ux500",
   1608				.cra_flags = CRYPTO_ALG_ASYNC,
   1609				.cra_blocksize = SHA256_BLOCK_SIZE,
   1610				.cra_ctxsize = sizeof(struct hash_ctx),
   1611				.cra_init = hash_cra_init,
   1612				.cra_module = THIS_MODULE,
   1613			}
   1614		}
   1615	}
   1616};
   1617
   1618static int ahash_algs_register_all(struct hash_device_data *device_data)
   1619{
   1620	int ret;
   1621	int i;
   1622	int count;
   1623
   1624	for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
   1625		ret = crypto_register_ahash(&hash_algs[i].hash);
   1626		if (ret) {
   1627			count = i;
   1628			dev_err(device_data->dev, "%s: alg registration failed\n",
   1629				hash_algs[i].hash.halg.base.cra_driver_name);
   1630			goto unreg;
   1631		}
   1632	}
   1633	return 0;
   1634unreg:
   1635	for (i = 0; i < count; i++)
   1636		crypto_unregister_ahash(&hash_algs[i].hash);
   1637	return ret;
   1638}
   1639
   1640static void ahash_algs_unregister_all(struct hash_device_data *device_data)
   1641{
   1642	int i;
   1643
   1644	for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
   1645		crypto_unregister_ahash(&hash_algs[i].hash);
   1646}
   1647
   1648/**
   1649 * ux500_hash_probe - Function that probes the hash hardware.
   1650 * @pdev: The platform device.
   1651 */
   1652static int ux500_hash_probe(struct platform_device *pdev)
   1653{
   1654	int			ret = 0;
   1655	struct resource		*res = NULL;
   1656	struct hash_device_data *device_data;
   1657	struct device		*dev = &pdev->dev;
   1658
   1659	device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
   1660	if (!device_data) {
   1661		ret = -ENOMEM;
   1662		goto out;
   1663	}
   1664
   1665	device_data->dev = dev;
   1666	device_data->current_ctx = NULL;
   1667
   1668	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
   1669	if (!res) {
   1670		dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
   1671		ret = -ENODEV;
   1672		goto out;
   1673	}
   1674
   1675	device_data->phybase = res->start;
   1676	device_data->base = devm_ioremap_resource(dev, res);
   1677	if (IS_ERR(device_data->base)) {
   1678		ret = PTR_ERR(device_data->base);
   1679		goto out;
   1680	}
   1681	spin_lock_init(&device_data->ctx_lock);
   1682	spin_lock_init(&device_data->power_state_lock);
   1683
   1684	/* Enable power for HASH1 hardware block */
   1685	device_data->regulator = regulator_get(dev, "v-ape");
   1686	if (IS_ERR(device_data->regulator)) {
   1687		dev_err(dev, "%s: regulator_get() failed!\n", __func__);
   1688		ret = PTR_ERR(device_data->regulator);
   1689		device_data->regulator = NULL;
   1690		goto out;
   1691	}
   1692
   1693	/* Enable the clock for HASH1 hardware block */
   1694	device_data->clk = devm_clk_get(dev, NULL);
   1695	if (IS_ERR(device_data->clk)) {
   1696		dev_err(dev, "%s: clk_get() failed!\n", __func__);
   1697		ret = PTR_ERR(device_data->clk);
   1698		goto out_regulator;
   1699	}
   1700
   1701	ret = clk_prepare(device_data->clk);
   1702	if (ret) {
   1703		dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
   1704		goto out_regulator;
   1705	}
   1706
   1707	/* Enable device power (and clock) */
   1708	ret = hash_enable_power(device_data, false);
   1709	if (ret) {
   1710		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
   1711		goto out_clk_unprepare;
   1712	}
   1713
   1714	ret = hash_check_hw(device_data);
   1715	if (ret) {
   1716		dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
   1717		goto out_power;
   1718	}
   1719
   1720	if (hash_mode == HASH_MODE_DMA)
   1721		hash_dma_setup_channel(device_data, dev);
   1722
   1723	platform_set_drvdata(pdev, device_data);
   1724
   1725	/* Put the new device into the device list... */
   1726	klist_add_tail(&device_data->list_node, &driver_data.device_list);
   1727	/* ... and signal that a new device is available. */
   1728	up(&driver_data.device_allocation);
   1729
   1730	ret = ahash_algs_register_all(device_data);
   1731	if (ret) {
   1732		dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
   1733			__func__);
   1734		goto out_power;
   1735	}
   1736
   1737	dev_info(dev, "successfully registered\n");
   1738	return 0;
   1739
   1740out_power:
   1741	hash_disable_power(device_data, false);
   1742
   1743out_clk_unprepare:
   1744	clk_unprepare(device_data->clk);
   1745
   1746out_regulator:
   1747	regulator_put(device_data->regulator);
   1748
   1749out:
   1750	return ret;
   1751}
   1752
   1753/**
   1754 * ux500_hash_remove - Function that removes the hash device from the platform.
   1755 * @pdev: The platform device.
   1756 */
   1757static int ux500_hash_remove(struct platform_device *pdev)
   1758{
   1759	struct hash_device_data *device_data;
   1760	struct device		*dev = &pdev->dev;
   1761
   1762	device_data = platform_get_drvdata(pdev);
   1763	if (!device_data) {
   1764		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
   1765		return -ENOMEM;
   1766	}
   1767
   1768	/* Try to decrease the number of available devices. */
   1769	if (down_trylock(&driver_data.device_allocation))
   1770		return -EBUSY;
   1771
   1772	/* Check that the device is free */
   1773	spin_lock(&device_data->ctx_lock);
   1774	/* current_ctx allocates a device, NULL = unallocated */
   1775	if (device_data->current_ctx) {
   1776		/* The device is busy */
   1777		spin_unlock(&device_data->ctx_lock);
   1778		/* Return the device to the pool. */
   1779		up(&driver_data.device_allocation);
   1780		return -EBUSY;
   1781	}
   1782
   1783	spin_unlock(&device_data->ctx_lock);
   1784
   1785	/* Remove the device from the list */
   1786	if (klist_node_attached(&device_data->list_node))
   1787		klist_remove(&device_data->list_node);
   1788
   1789	/* If this was the last device, remove the services */
   1790	if (list_empty(&driver_data.device_list.k_list))
   1791		ahash_algs_unregister_all(device_data);
   1792
   1793	if (hash_disable_power(device_data, false))
   1794		dev_err(dev, "%s: hash_disable_power() failed\n",
   1795			__func__);
   1796
   1797	clk_unprepare(device_data->clk);
   1798	regulator_put(device_data->regulator);
   1799
   1800	return 0;
   1801}
   1802
   1803/**
   1804 * ux500_hash_shutdown - Function that shutdown the hash device.
   1805 * @pdev: The platform device
   1806 */
   1807static void ux500_hash_shutdown(struct platform_device *pdev)
   1808{
   1809	struct hash_device_data *device_data;
   1810
   1811	device_data = platform_get_drvdata(pdev);
   1812	if (!device_data) {
   1813		dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
   1814			__func__);
   1815		return;
   1816	}
   1817
   1818	/* Check that the device is free */
   1819	spin_lock(&device_data->ctx_lock);
   1820	/* current_ctx allocates a device, NULL = unallocated */
   1821	if (!device_data->current_ctx) {
   1822		if (down_trylock(&driver_data.device_allocation))
   1823			dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
   1824				__func__);
   1825		/**
   1826		 * (Allocate the device)
   1827		 * Need to set this to non-null (dummy) value,
   1828		 * to avoid usage if context switching.
   1829		 */
   1830		device_data->current_ctx++;
   1831	}
   1832	spin_unlock(&device_data->ctx_lock);
   1833
   1834	/* Remove the device from the list */
   1835	if (klist_node_attached(&device_data->list_node))
   1836		klist_remove(&device_data->list_node);
   1837
   1838	/* If this was the last device, remove the services */
   1839	if (list_empty(&driver_data.device_list.k_list))
   1840		ahash_algs_unregister_all(device_data);
   1841
   1842	if (hash_disable_power(device_data, false))
   1843		dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
   1844			__func__);
   1845}
   1846
   1847#ifdef CONFIG_PM_SLEEP
   1848/**
   1849 * ux500_hash_suspend - Function that suspends the hash device.
   1850 * @dev:	Device to suspend.
   1851 */
   1852static int ux500_hash_suspend(struct device *dev)
   1853{
   1854	int ret;
   1855	struct hash_device_data *device_data;
   1856	struct hash_ctx *temp_ctx = NULL;
   1857
   1858	device_data = dev_get_drvdata(dev);
   1859	if (!device_data) {
   1860		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
   1861		return -ENOMEM;
   1862	}
   1863
   1864	spin_lock(&device_data->ctx_lock);
   1865	if (!device_data->current_ctx)
   1866		device_data->current_ctx++;
   1867	spin_unlock(&device_data->ctx_lock);
   1868
   1869	if (device_data->current_ctx == ++temp_ctx) {
   1870		if (down_interruptible(&driver_data.device_allocation))
   1871			dev_dbg(dev, "%s: down_interruptible() failed\n",
   1872				__func__);
   1873		ret = hash_disable_power(device_data, false);
   1874
   1875	} else {
   1876		ret = hash_disable_power(device_data, true);
   1877	}
   1878
   1879	if (ret)
   1880		dev_err(dev, "%s: hash_disable_power()\n", __func__);
   1881
   1882	return ret;
   1883}
   1884
   1885/**
   1886 * ux500_hash_resume - Function that resume the hash device.
   1887 * @dev:	Device to resume.
   1888 */
   1889static int ux500_hash_resume(struct device *dev)
   1890{
   1891	int ret = 0;
   1892	struct hash_device_data *device_data;
   1893	struct hash_ctx *temp_ctx = NULL;
   1894
   1895	device_data = dev_get_drvdata(dev);
   1896	if (!device_data) {
   1897		dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
   1898		return -ENOMEM;
   1899	}
   1900
   1901	spin_lock(&device_data->ctx_lock);
   1902	if (device_data->current_ctx == ++temp_ctx)
   1903		device_data->current_ctx = NULL;
   1904	spin_unlock(&device_data->ctx_lock);
   1905
   1906	if (!device_data->current_ctx)
   1907		up(&driver_data.device_allocation);
   1908	else
   1909		ret = hash_enable_power(device_data, true);
   1910
   1911	if (ret)
   1912		dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
   1913
   1914	return ret;
   1915}
   1916#endif
   1917
   1918static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
   1919
   1920static const struct of_device_id ux500_hash_match[] = {
   1921	{ .compatible = "stericsson,ux500-hash" },
   1922	{ },
   1923};
   1924MODULE_DEVICE_TABLE(of, ux500_hash_match);
   1925
   1926static struct platform_driver hash_driver = {
   1927	.probe  = ux500_hash_probe,
   1928	.remove = ux500_hash_remove,
   1929	.shutdown = ux500_hash_shutdown,
   1930	.driver = {
   1931		.name  = "hash1",
   1932		.of_match_table = ux500_hash_match,
   1933		.pm    = &ux500_hash_pm,
   1934	}
   1935};
   1936
   1937/**
   1938 * ux500_hash_mod_init - The kernel module init function.
   1939 */
   1940static int __init ux500_hash_mod_init(void)
   1941{
   1942	klist_init(&driver_data.device_list, NULL, NULL);
   1943	/* Initialize the semaphore to 0 devices (locked state) */
   1944	sema_init(&driver_data.device_allocation, 0);
   1945
   1946	return platform_driver_register(&hash_driver);
   1947}
   1948
   1949/**
   1950 * ux500_hash_mod_fini - The kernel module exit function.
   1951 */
   1952static void __exit ux500_hash_mod_fini(void)
   1953{
   1954	platform_driver_unregister(&hash_driver);
   1955}
   1956
   1957module_init(ux500_hash_mod_init);
   1958module_exit(ux500_hash_mod_fini);
   1959
   1960MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
   1961MODULE_LICENSE("GPL");
   1962
   1963MODULE_ALIAS_CRYPTO("sha1-all");
   1964MODULE_ALIAS_CRYPTO("sha256-all");
   1965MODULE_ALIAS_CRYPTO("hmac-sha1-all");
   1966MODULE_ALIAS_CRYPTO("hmac-sha256-all");