cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sun8i-ss-prng.c (4469B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * sun8i-ss-prng.c - hardware cryptographic offloader for
      4 * Allwinner A80/A83T SoC
      5 *
      6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
      7 *
      8 * This file handle the PRNG found in the SS
      9 *
     10 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
     11 */
     12#include "sun8i-ss.h"
     13#include <linux/dma-mapping.h>
     14#include <linux/pm_runtime.h>
     15#include <crypto/internal/rng.h>
     16
     17int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
     18		       unsigned int slen)
     19{
     20	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
     21
     22	if (ctx->seed && ctx->slen != slen) {
     23		kfree_sensitive(ctx->seed);
     24		ctx->slen = 0;
     25		ctx->seed = NULL;
     26	}
     27	if (!ctx->seed)
     28		ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
     29	if (!ctx->seed)
     30		return -ENOMEM;
     31
     32	memcpy(ctx->seed, seed, slen);
     33	ctx->slen = slen;
     34
     35	return 0;
     36}
     37
     38int sun8i_ss_prng_init(struct crypto_tfm *tfm)
     39{
     40	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
     41
     42	memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx));
     43	return 0;
     44}
     45
     46void sun8i_ss_prng_exit(struct crypto_tfm *tfm)
     47{
     48	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
     49
     50	kfree_sensitive(ctx->seed);
     51	ctx->seed = NULL;
     52	ctx->slen = 0;
     53}
     54
     55int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
     56			   unsigned int slen, u8 *dst, unsigned int dlen)
     57{
     58	struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
     59	struct rng_alg *alg = crypto_rng_alg(tfm);
     60	struct sun8i_ss_alg_template *algt;
     61	struct sun8i_ss_dev *ss;
     62	dma_addr_t dma_iv, dma_dst;
     63	unsigned int todo;
     64	int err = 0;
     65	int flow;
     66	void *d;
     67	u32 v;
     68
     69	algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng);
     70	ss = algt->ss;
     71
     72	if (ctx->slen == 0) {
     73		dev_err(ss->dev, "The PRNG is not seeded\n");
     74		return -EINVAL;
     75	}
     76
     77	/* The SS does not give an updated seed, so we need to get a new one.
     78	 * So we will ask for an extra PRNG_SEED_SIZE data.
     79	 * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE
     80	 */
     81	todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
     82	todo -= todo % PRNG_DATA_SIZE;
     83
     84	d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
     85	if (!d)
     86		return -ENOMEM;
     87
     88	flow = sun8i_ss_get_engine_number(ss);
     89
     90#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
     91	algt->stat_req++;
     92	algt->stat_bytes += todo;
     93#endif
     94
     95	v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START;
     96	if (flow)
     97		v |= SS_FLOW1;
     98	else
     99		v |= SS_FLOW0;
    100
    101	dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
    102	if (dma_mapping_error(ss->dev, dma_iv)) {
    103		dev_err(ss->dev, "Cannot DMA MAP IV\n");
    104		err = -EFAULT;
    105		goto err_free;
    106	}
    107
    108	dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
    109	if (dma_mapping_error(ss->dev, dma_dst)) {
    110		dev_err(ss->dev, "Cannot DMA MAP DST\n");
    111		err = -EFAULT;
    112		goto err_iv;
    113	}
    114
    115	err = pm_runtime_resume_and_get(ss->dev);
    116	if (err < 0)
    117		goto err_pm;
    118	err = 0;
    119
    120	mutex_lock(&ss->mlock);
    121	writel(dma_iv, ss->base + SS_IV_ADR_REG);
    122	/* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */
    123	writel(dma_iv, ss->base + SS_KEY_ADR_REG);
    124	writel(dma_dst, ss->base + SS_DST_ADR_REG);
    125	writel(todo / 4, ss->base + SS_LEN_ADR_REG);
    126
    127	reinit_completion(&ss->flows[flow].complete);
    128	ss->flows[flow].status = 0;
    129	/* Be sure all data is written before enabling the task */
    130	wmb();
    131
    132	writel(v, ss->base + SS_CTL_REG);
    133
    134	wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
    135						  msecs_to_jiffies(todo));
    136	if (ss->flows[flow].status == 0) {
    137		dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo);
    138		err = -EFAULT;
    139	}
    140	/* Since cipher and hash use the linux/cryptoengine and that we have
    141	 * a cryptoengine per flow, we are sure that they will issue only one
    142	 * request per flow.
    143	 * Since the cryptoengine wait for completion before submitting a new
    144	 * one, the mlock could be left just after the final writel.
    145	 * But cryptoengine cannot handle crypto_rng, so we need to be sure
    146	 * nothing will use our flow.
    147	 * The easiest way is to grab mlock until the hardware end our requests.
    148	 * We could have used a per flow lock, but this would increase
    149	 * complexity.
    150	 * The drawback is that no request could be handled for the other flow.
    151	 */
    152	mutex_unlock(&ss->mlock);
    153
    154	pm_runtime_put(ss->dev);
    155
    156err_pm:
    157	dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE);
    158err_iv:
    159	dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE);
    160
    161	if (!err) {
    162		memcpy(dst, d, dlen);
    163		/* Update seed */
    164		memcpy(ctx->seed, d + dlen, ctx->slen);
    165	}
    166err_free:
    167	kfree_sensitive(d);
    168
    169	return err;
    170}