cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

scompress.c (6962B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Synchronous Compression operations
      4 *
      5 * Copyright 2015 LG Electronics Inc.
      6 * Copyright (c) 2016, Intel Corporation
      7 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
      8 */
      9#include <linux/errno.h>
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/seq_file.h>
     13#include <linux/slab.h>
     14#include <linux/string.h>
     15#include <linux/crypto.h>
     16#include <linux/compiler.h>
     17#include <linux/vmalloc.h>
     18#include <crypto/algapi.h>
     19#include <linux/cryptouser.h>
     20#include <net/netlink.h>
     21#include <linux/scatterlist.h>
     22#include <crypto/scatterwalk.h>
     23#include <crypto/internal/acompress.h>
     24#include <crypto/internal/scompress.h>
     25#include "internal.h"
     26
     27struct scomp_scratch {
     28	spinlock_t	lock;
     29	void		*src;
     30	void		*dst;
     31};
     32
     33static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
     34	.lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
     35};
     36
     37static const struct crypto_type crypto_scomp_type;
     38static int scomp_scratch_users;
     39static DEFINE_MUTEX(scomp_lock);
     40
     41#ifdef CONFIG_NET
     42static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
     43{
     44	struct crypto_report_comp rscomp;
     45
     46	memset(&rscomp, 0, sizeof(rscomp));
     47
     48	strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
     49
     50	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
     51		       sizeof(rscomp), &rscomp);
     52}
     53#else
     54static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
     55{
     56	return -ENOSYS;
     57}
     58#endif
     59
     60static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
     61	__maybe_unused;
     62
     63static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
     64{
     65	seq_puts(m, "type         : scomp\n");
     66}
     67
     68static void crypto_scomp_free_scratches(void)
     69{
     70	struct scomp_scratch *scratch;
     71	int i;
     72
     73	for_each_possible_cpu(i) {
     74		scratch = per_cpu_ptr(&scomp_scratch, i);
     75
     76		vfree(scratch->src);
     77		vfree(scratch->dst);
     78		scratch->src = NULL;
     79		scratch->dst = NULL;
     80	}
     81}
     82
     83static int crypto_scomp_alloc_scratches(void)
     84{
     85	struct scomp_scratch *scratch;
     86	int i;
     87
     88	for_each_possible_cpu(i) {
     89		void *mem;
     90
     91		scratch = per_cpu_ptr(&scomp_scratch, i);
     92
     93		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
     94		if (!mem)
     95			goto error;
     96		scratch->src = mem;
     97		mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
     98		if (!mem)
     99			goto error;
    100		scratch->dst = mem;
    101	}
    102	return 0;
    103error:
    104	crypto_scomp_free_scratches();
    105	return -ENOMEM;
    106}
    107
    108static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
    109{
    110	int ret = 0;
    111
    112	mutex_lock(&scomp_lock);
    113	if (!scomp_scratch_users++)
    114		ret = crypto_scomp_alloc_scratches();
    115	mutex_unlock(&scomp_lock);
    116
    117	return ret;
    118}
    119
    120static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
    121{
    122	struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
    123	void **tfm_ctx = acomp_tfm_ctx(tfm);
    124	struct crypto_scomp *scomp = *tfm_ctx;
    125	void **ctx = acomp_request_ctx(req);
    126	struct scomp_scratch *scratch;
    127	int ret;
    128
    129	if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
    130		return -EINVAL;
    131
    132	if (req->dst && !req->dlen)
    133		return -EINVAL;
    134
    135	if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
    136		req->dlen = SCOMP_SCRATCH_SIZE;
    137
    138	scratch = raw_cpu_ptr(&scomp_scratch);
    139	spin_lock(&scratch->lock);
    140
    141	scatterwalk_map_and_copy(scratch->src, req->src, 0, req->slen, 0);
    142	if (dir)
    143		ret = crypto_scomp_compress(scomp, scratch->src, req->slen,
    144					    scratch->dst, &req->dlen, *ctx);
    145	else
    146		ret = crypto_scomp_decompress(scomp, scratch->src, req->slen,
    147					      scratch->dst, &req->dlen, *ctx);
    148	if (!ret) {
    149		if (!req->dst) {
    150			req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
    151			if (!req->dst) {
    152				ret = -ENOMEM;
    153				goto out;
    154			}
    155		}
    156		scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen,
    157					 1);
    158	}
    159out:
    160	spin_unlock(&scratch->lock);
    161	return ret;
    162}
    163
    164static int scomp_acomp_compress(struct acomp_req *req)
    165{
    166	return scomp_acomp_comp_decomp(req, 1);
    167}
    168
    169static int scomp_acomp_decompress(struct acomp_req *req)
    170{
    171	return scomp_acomp_comp_decomp(req, 0);
    172}
    173
    174static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
    175{
    176	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
    177
    178	crypto_free_scomp(*ctx);
    179
    180	mutex_lock(&scomp_lock);
    181	if (!--scomp_scratch_users)
    182		crypto_scomp_free_scratches();
    183	mutex_unlock(&scomp_lock);
    184}
    185
    186int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
    187{
    188	struct crypto_alg *calg = tfm->__crt_alg;
    189	struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
    190	struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
    191	struct crypto_scomp *scomp;
    192
    193	if (!crypto_mod_get(calg))
    194		return -EAGAIN;
    195
    196	scomp = crypto_create_tfm(calg, &crypto_scomp_type);
    197	if (IS_ERR(scomp)) {
    198		crypto_mod_put(calg);
    199		return PTR_ERR(scomp);
    200	}
    201
    202	*ctx = scomp;
    203	tfm->exit = crypto_exit_scomp_ops_async;
    204
    205	crt->compress = scomp_acomp_compress;
    206	crt->decompress = scomp_acomp_decompress;
    207	crt->dst_free = sgl_free;
    208	crt->reqsize = sizeof(void *);
    209
    210	return 0;
    211}
    212
    213struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
    214{
    215	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
    216	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
    217	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
    218	struct crypto_scomp *scomp = *tfm_ctx;
    219	void *ctx;
    220
    221	ctx = crypto_scomp_alloc_ctx(scomp);
    222	if (IS_ERR(ctx)) {
    223		kfree(req);
    224		return NULL;
    225	}
    226
    227	*req->__ctx = ctx;
    228
    229	return req;
    230}
    231
    232void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
    233{
    234	struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
    235	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
    236	struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
    237	struct crypto_scomp *scomp = *tfm_ctx;
    238	void *ctx = *req->__ctx;
    239
    240	if (ctx)
    241		crypto_scomp_free_ctx(scomp, ctx);
    242}
    243
    244static const struct crypto_type crypto_scomp_type = {
    245	.extsize = crypto_alg_extsize,
    246	.init_tfm = crypto_scomp_init_tfm,
    247#ifdef CONFIG_PROC_FS
    248	.show = crypto_scomp_show,
    249#endif
    250	.report = crypto_scomp_report,
    251	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
    252	.maskset = CRYPTO_ALG_TYPE_MASK,
    253	.type = CRYPTO_ALG_TYPE_SCOMPRESS,
    254	.tfmsize = offsetof(struct crypto_scomp, base),
    255};
    256
    257int crypto_register_scomp(struct scomp_alg *alg)
    258{
    259	struct crypto_alg *base = &alg->base;
    260
    261	base->cra_type = &crypto_scomp_type;
    262	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
    263	base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
    264
    265	return crypto_register_alg(base);
    266}
    267EXPORT_SYMBOL_GPL(crypto_register_scomp);
    268
    269void crypto_unregister_scomp(struct scomp_alg *alg)
    270{
    271	crypto_unregister_alg(&alg->base);
    272}
    273EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
    274
    275int crypto_register_scomps(struct scomp_alg *algs, int count)
    276{
    277	int i, ret;
    278
    279	for (i = 0; i < count; i++) {
    280		ret = crypto_register_scomp(&algs[i]);
    281		if (ret)
    282			goto err;
    283	}
    284
    285	return 0;
    286
    287err:
    288	for (--i; i >= 0; --i)
    289		crypto_unregister_scomp(&algs[i]);
    290
    291	return ret;
    292}
    293EXPORT_SYMBOL_GPL(crypto_register_scomps);
    294
    295void crypto_unregister_scomps(struct scomp_alg *algs, int count)
    296{
    297	int i;
    298
    299	for (i = count - 1; i >= 0; --i)
    300		crypto_unregister_scomp(&algs[i]);
    301}
    302EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
    303
    304MODULE_LICENSE("GPL");
    305MODULE_DESCRIPTION("Synchronous compression type");