cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rk3288_crypto.c (10919B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Crypto acceleration support for Rockchip RK3288
      4 *
      5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
      6 *
      7 * Author: Zain Wang <zain.wang@rock-chips.com>
      8 *
      9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
     10 */
     11
     12#include "rk3288_crypto.h"
     13#include <linux/dma-mapping.h>
     14#include <linux/module.h>
     15#include <linux/platform_device.h>
     16#include <linux/of.h>
     17#include <linux/clk.h>
     18#include <linux/crypto.h>
     19#include <linux/reset.h>
     20
     21static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
     22{
     23	int err;
     24
     25	err = clk_prepare_enable(dev->sclk);
     26	if (err) {
     27		dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
     28			__func__, __LINE__);
     29		goto err_return;
     30	}
     31	err = clk_prepare_enable(dev->aclk);
     32	if (err) {
     33		dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
     34			__func__, __LINE__);
     35		goto err_aclk;
     36	}
     37	err = clk_prepare_enable(dev->hclk);
     38	if (err) {
     39		dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
     40			__func__, __LINE__);
     41		goto err_hclk;
     42	}
     43	err = clk_prepare_enable(dev->dmaclk);
     44	if (err) {
     45		dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
     46			__func__, __LINE__);
     47		goto err_dmaclk;
     48	}
     49	return err;
     50err_dmaclk:
     51	clk_disable_unprepare(dev->hclk);
     52err_hclk:
     53	clk_disable_unprepare(dev->aclk);
     54err_aclk:
     55	clk_disable_unprepare(dev->sclk);
     56err_return:
     57	return err;
     58}
     59
     60static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
     61{
     62	clk_disable_unprepare(dev->dmaclk);
     63	clk_disable_unprepare(dev->hclk);
     64	clk_disable_unprepare(dev->aclk);
     65	clk_disable_unprepare(dev->sclk);
     66}
     67
     68static int check_alignment(struct scatterlist *sg_src,
     69			   struct scatterlist *sg_dst,
     70			   int align_mask)
     71{
     72	int in, out, align;
     73
     74	in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
     75	     IS_ALIGNED((uint32_t)sg_src->length, align_mask);
     76	if (!sg_dst)
     77		return in;
     78	out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
     79	      IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
     80	align = in && out;
     81
     82	return (align && (sg_src->length == sg_dst->length));
     83}
     84
     85static int rk_load_data(struct rk_crypto_info *dev,
     86			struct scatterlist *sg_src,
     87			struct scatterlist *sg_dst)
     88{
     89	unsigned int count;
     90
     91	dev->aligned = dev->aligned ?
     92		check_alignment(sg_src, sg_dst, dev->align_size) :
     93		dev->aligned;
     94	if (dev->aligned) {
     95		count = min(dev->left_bytes, sg_src->length);
     96		dev->left_bytes -= count;
     97
     98		if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
     99			dev_err(dev->dev, "[%s:%d] dma_map_sg(src)  error\n",
    100				__func__, __LINE__);
    101			return -EINVAL;
    102		}
    103		dev->addr_in = sg_dma_address(sg_src);
    104
    105		if (sg_dst) {
    106			if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
    107				dev_err(dev->dev,
    108					"[%s:%d] dma_map_sg(dst)  error\n",
    109					__func__, __LINE__);
    110				dma_unmap_sg(dev->dev, sg_src, 1,
    111					     DMA_TO_DEVICE);
    112				return -EINVAL;
    113			}
    114			dev->addr_out = sg_dma_address(sg_dst);
    115		}
    116	} else {
    117		count = (dev->left_bytes > PAGE_SIZE) ?
    118			PAGE_SIZE : dev->left_bytes;
    119
    120		if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
    121					dev->addr_vir, count,
    122					dev->total - dev->left_bytes)) {
    123			dev_err(dev->dev, "[%s:%d] pcopy err\n",
    124				__func__, __LINE__);
    125			return -EINVAL;
    126		}
    127		dev->left_bytes -= count;
    128		sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
    129		if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
    130			dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp)  error\n",
    131				__func__, __LINE__);
    132			return -ENOMEM;
    133		}
    134		dev->addr_in = sg_dma_address(&dev->sg_tmp);
    135
    136		if (sg_dst) {
    137			if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
    138					DMA_FROM_DEVICE)) {
    139				dev_err(dev->dev,
    140					"[%s:%d] dma_map_sg(sg_tmp)  error\n",
    141					__func__, __LINE__);
    142				dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
    143					     DMA_TO_DEVICE);
    144				return -ENOMEM;
    145			}
    146			dev->addr_out = sg_dma_address(&dev->sg_tmp);
    147		}
    148	}
    149	dev->count = count;
    150	return 0;
    151}
    152
    153static void rk_unload_data(struct rk_crypto_info *dev)
    154{
    155	struct scatterlist *sg_in, *sg_out;
    156
    157	sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
    158	dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
    159
    160	if (dev->sg_dst) {
    161		sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
    162		dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
    163	}
    164}
    165
    166static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
    167{
    168	struct rk_crypto_info *dev  = platform_get_drvdata(dev_id);
    169	u32 interrupt_status;
    170
    171	spin_lock(&dev->lock);
    172	interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
    173	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
    174
    175	if (interrupt_status & 0x0a) {
    176		dev_warn(dev->dev, "DMA Error\n");
    177		dev->err = -EFAULT;
    178	}
    179	tasklet_schedule(&dev->done_task);
    180
    181	spin_unlock(&dev->lock);
    182	return IRQ_HANDLED;
    183}
    184
    185static int rk_crypto_enqueue(struct rk_crypto_info *dev,
    186			      struct crypto_async_request *async_req)
    187{
    188	unsigned long flags;
    189	int ret;
    190
    191	spin_lock_irqsave(&dev->lock, flags);
    192	ret = crypto_enqueue_request(&dev->queue, async_req);
    193	if (dev->busy) {
    194		spin_unlock_irqrestore(&dev->lock, flags);
    195		return ret;
    196	}
    197	dev->busy = true;
    198	spin_unlock_irqrestore(&dev->lock, flags);
    199	tasklet_schedule(&dev->queue_task);
    200
    201	return ret;
    202}
    203
    204static void rk_crypto_queue_task_cb(unsigned long data)
    205{
    206	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
    207	struct crypto_async_request *async_req, *backlog;
    208	unsigned long flags;
    209	int err = 0;
    210
    211	dev->err = 0;
    212	spin_lock_irqsave(&dev->lock, flags);
    213	backlog   = crypto_get_backlog(&dev->queue);
    214	async_req = crypto_dequeue_request(&dev->queue);
    215
    216	if (!async_req) {
    217		dev->busy = false;
    218		spin_unlock_irqrestore(&dev->lock, flags);
    219		return;
    220	}
    221	spin_unlock_irqrestore(&dev->lock, flags);
    222
    223	if (backlog) {
    224		backlog->complete(backlog, -EINPROGRESS);
    225		backlog = NULL;
    226	}
    227
    228	dev->async_req = async_req;
    229	err = dev->start(dev);
    230	if (err)
    231		dev->complete(dev->async_req, err);
    232}
    233
    234static void rk_crypto_done_task_cb(unsigned long data)
    235{
    236	struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
    237
    238	if (dev->err) {
    239		dev->complete(dev->async_req, dev->err);
    240		return;
    241	}
    242
    243	dev->err = dev->update(dev);
    244	if (dev->err)
    245		dev->complete(dev->async_req, dev->err);
    246}
    247
    248static struct rk_crypto_tmp *rk_cipher_algs[] = {
    249	&rk_ecb_aes_alg,
    250	&rk_cbc_aes_alg,
    251	&rk_ecb_des_alg,
    252	&rk_cbc_des_alg,
    253	&rk_ecb_des3_ede_alg,
    254	&rk_cbc_des3_ede_alg,
    255	&rk_ahash_sha1,
    256	&rk_ahash_sha256,
    257	&rk_ahash_md5,
    258};
    259
    260static int rk_crypto_register(struct rk_crypto_info *crypto_info)
    261{
    262	unsigned int i, k;
    263	int err = 0;
    264
    265	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
    266		rk_cipher_algs[i]->dev = crypto_info;
    267		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
    268			err = crypto_register_skcipher(
    269					&rk_cipher_algs[i]->alg.skcipher);
    270		else
    271			err = crypto_register_ahash(
    272					&rk_cipher_algs[i]->alg.hash);
    273		if (err)
    274			goto err_cipher_algs;
    275	}
    276	return 0;
    277
    278err_cipher_algs:
    279	for (k = 0; k < i; k++) {
    280		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
    281			crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
    282		else
    283			crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
    284	}
    285	return err;
    286}
    287
    288static void rk_crypto_unregister(void)
    289{
    290	unsigned int i;
    291
    292	for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
    293		if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
    294			crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
    295		else
    296			crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
    297	}
    298}
    299
    300static void rk_crypto_action(void *data)
    301{
    302	struct rk_crypto_info *crypto_info = data;
    303
    304	reset_control_assert(crypto_info->rst);
    305}
    306
    307static const struct of_device_id crypto_of_id_table[] = {
    308	{ .compatible = "rockchip,rk3288-crypto" },
    309	{}
    310};
    311MODULE_DEVICE_TABLE(of, crypto_of_id_table);
    312
    313static int rk_crypto_probe(struct platform_device *pdev)
    314{
    315	struct device *dev = &pdev->dev;
    316	struct rk_crypto_info *crypto_info;
    317	int err = 0;
    318
    319	crypto_info = devm_kzalloc(&pdev->dev,
    320				   sizeof(*crypto_info), GFP_KERNEL);
    321	if (!crypto_info) {
    322		err = -ENOMEM;
    323		goto err_crypto;
    324	}
    325
    326	crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
    327	if (IS_ERR(crypto_info->rst)) {
    328		err = PTR_ERR(crypto_info->rst);
    329		goto err_crypto;
    330	}
    331
    332	reset_control_assert(crypto_info->rst);
    333	usleep_range(10, 20);
    334	reset_control_deassert(crypto_info->rst);
    335
    336	err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
    337	if (err)
    338		goto err_crypto;
    339
    340	spin_lock_init(&crypto_info->lock);
    341
    342	crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
    343	if (IS_ERR(crypto_info->reg)) {
    344		err = PTR_ERR(crypto_info->reg);
    345		goto err_crypto;
    346	}
    347
    348	crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
    349	if (IS_ERR(crypto_info->aclk)) {
    350		err = PTR_ERR(crypto_info->aclk);
    351		goto err_crypto;
    352	}
    353
    354	crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
    355	if (IS_ERR(crypto_info->hclk)) {
    356		err = PTR_ERR(crypto_info->hclk);
    357		goto err_crypto;
    358	}
    359
    360	crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
    361	if (IS_ERR(crypto_info->sclk)) {
    362		err = PTR_ERR(crypto_info->sclk);
    363		goto err_crypto;
    364	}
    365
    366	crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
    367	if (IS_ERR(crypto_info->dmaclk)) {
    368		err = PTR_ERR(crypto_info->dmaclk);
    369		goto err_crypto;
    370	}
    371
    372	crypto_info->irq = platform_get_irq(pdev, 0);
    373	if (crypto_info->irq < 0) {
    374		dev_warn(crypto_info->dev,
    375			 "control Interrupt is not available.\n");
    376		err = crypto_info->irq;
    377		goto err_crypto;
    378	}
    379
    380	err = devm_request_irq(&pdev->dev, crypto_info->irq,
    381			       rk_crypto_irq_handle, IRQF_SHARED,
    382			       "rk-crypto", pdev);
    383
    384	if (err) {
    385		dev_err(crypto_info->dev, "irq request failed.\n");
    386		goto err_crypto;
    387	}
    388
    389	crypto_info->dev = &pdev->dev;
    390	platform_set_drvdata(pdev, crypto_info);
    391
    392	tasklet_init(&crypto_info->queue_task,
    393		     rk_crypto_queue_task_cb, (unsigned long)crypto_info);
    394	tasklet_init(&crypto_info->done_task,
    395		     rk_crypto_done_task_cb, (unsigned long)crypto_info);
    396	crypto_init_queue(&crypto_info->queue, 50);
    397
    398	crypto_info->enable_clk = rk_crypto_enable_clk;
    399	crypto_info->disable_clk = rk_crypto_disable_clk;
    400	crypto_info->load_data = rk_load_data;
    401	crypto_info->unload_data = rk_unload_data;
    402	crypto_info->enqueue = rk_crypto_enqueue;
    403	crypto_info->busy = false;
    404
    405	err = rk_crypto_register(crypto_info);
    406	if (err) {
    407		dev_err(dev, "err in register alg");
    408		goto err_register_alg;
    409	}
    410
    411	dev_info(dev, "Crypto Accelerator successfully registered\n");
    412	return 0;
    413
    414err_register_alg:
    415	tasklet_kill(&crypto_info->queue_task);
    416	tasklet_kill(&crypto_info->done_task);
    417err_crypto:
    418	return err;
    419}
    420
    421static int rk_crypto_remove(struct platform_device *pdev)
    422{
    423	struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
    424
    425	rk_crypto_unregister();
    426	tasklet_kill(&crypto_tmp->done_task);
    427	tasklet_kill(&crypto_tmp->queue_task);
    428	return 0;
    429}
    430
    431static struct platform_driver crypto_driver = {
    432	.probe		= rk_crypto_probe,
    433	.remove		= rk_crypto_remove,
    434	.driver		= {
    435		.name	= "rk3288-crypto",
    436		.of_match_table	= crypto_of_id_table,
    437	},
    438};
    439
    440module_platform_driver(crypto_driver);
    441
    442MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
    443MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
    444MODULE_LICENSE("GPL");