cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

blk-crypto.c (12349B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright 2019 Google LLC
      4 */
      5
      6/*
      7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
      8 */
      9
     10#define pr_fmt(fmt) "blk-crypto: " fmt
     11
     12#include <linux/bio.h>
     13#include <linux/blkdev.h>
     14#include <linux/blk-crypto-profile.h>
     15#include <linux/module.h>
     16#include <linux/slab.h>
     17
     18#include "blk-crypto-internal.h"
     19
     20const struct blk_crypto_mode blk_crypto_modes[] = {
     21	[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
     22		.name = "AES-256-XTS",
     23		.cipher_str = "xts(aes)",
     24		.keysize = 64,
     25		.ivsize = 16,
     26	},
     27	[BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
     28		.name = "AES-128-CBC-ESSIV",
     29		.cipher_str = "essiv(cbc(aes),sha256)",
     30		.keysize = 16,
     31		.ivsize = 16,
     32	},
     33	[BLK_ENCRYPTION_MODE_ADIANTUM] = {
     34		.name = "Adiantum",
     35		.cipher_str = "adiantum(xchacha12,aes)",
     36		.keysize = 32,
     37		.ivsize = 32,
     38	},
     39};
     40
     41/*
     42 * This number needs to be at least (the number of threads doing IO
     43 * concurrently) * (maximum recursive depth of a bio), so that we don't
     44 * deadlock on crypt_ctx allocations. The default is chosen to be the same
     45 * as the default number of post read contexts in both EXT4 and F2FS.
     46 */
     47static int num_prealloc_crypt_ctxs = 128;
     48
     49module_param(num_prealloc_crypt_ctxs, int, 0444);
     50MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
     51		"Number of bio crypto contexts to preallocate");
     52
     53static struct kmem_cache *bio_crypt_ctx_cache;
     54static mempool_t *bio_crypt_ctx_pool;
     55
     56static int __init bio_crypt_ctx_init(void)
     57{
     58	size_t i;
     59
     60	bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
     61	if (!bio_crypt_ctx_cache)
     62		goto out_no_mem;
     63
     64	bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
     65						      bio_crypt_ctx_cache);
     66	if (!bio_crypt_ctx_pool)
     67		goto out_no_mem;
     68
     69	/* This is assumed in various places. */
     70	BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
     71
     72	/* Sanity check that no algorithm exceeds the defined limits. */
     73	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
     74		BUG_ON(blk_crypto_modes[i].keysize > BLK_CRYPTO_MAX_KEY_SIZE);
     75		BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
     76	}
     77
     78	return 0;
     79out_no_mem:
     80	panic("Failed to allocate mem for bio crypt ctxs\n");
     81}
     82subsys_initcall(bio_crypt_ctx_init);
     83
     84void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
     85		       const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
     86{
     87	struct bio_crypt_ctx *bc;
     88
     89	/*
     90	 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
     91	 * that the mempool_alloc() can't fail.
     92	 */
     93	WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
     94
     95	bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
     96
     97	bc->bc_key = key;
     98	memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
     99
    100	bio->bi_crypt_context = bc;
    101}
    102
    103void __bio_crypt_free_ctx(struct bio *bio)
    104{
    105	mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
    106	bio->bi_crypt_context = NULL;
    107}
    108
    109int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
    110{
    111	dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
    112	if (!dst->bi_crypt_context)
    113		return -ENOMEM;
    114	*dst->bi_crypt_context = *src->bi_crypt_context;
    115	return 0;
    116}
    117
    118/* Increments @dun by @inc, treating @dun as a multi-limb integer. */
    119void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
    120			     unsigned int inc)
    121{
    122	int i;
    123
    124	for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
    125		dun[i] += inc;
    126		/*
    127		 * If the addition in this limb overflowed, then we need to
    128		 * carry 1 into the next limb. Else the carry is 0.
    129		 */
    130		if (dun[i] < inc)
    131			inc = 1;
    132		else
    133			inc = 0;
    134	}
    135}
    136
    137void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
    138{
    139	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
    140
    141	bio_crypt_dun_increment(bc->bc_dun,
    142				bytes >> bc->bc_key->data_unit_size_bits);
    143}
    144
    145/*
    146 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
    147 * @next_dun, treating the DUNs as multi-limb integers.
    148 */
    149bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
    150				 unsigned int bytes,
    151				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
    152{
    153	int i;
    154	unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
    155
    156	for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
    157		if (bc->bc_dun[i] + carry != next_dun[i])
    158			return false;
    159		/*
    160		 * If the addition in this limb overflowed, then we need to
    161		 * carry 1 into the next limb. Else the carry is 0.
    162		 */
    163		if ((bc->bc_dun[i] + carry) < carry)
    164			carry = 1;
    165		else
    166			carry = 0;
    167	}
    168
    169	/* If the DUN wrapped through 0, don't treat it as contiguous. */
    170	return carry == 0;
    171}
    172
    173/*
    174 * Checks that two bio crypt contexts are compatible - i.e. that
    175 * they are mergeable except for data_unit_num continuity.
    176 */
    177static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
    178				     struct bio_crypt_ctx *bc2)
    179{
    180	if (!bc1)
    181		return !bc2;
    182
    183	return bc2 && bc1->bc_key == bc2->bc_key;
    184}
    185
    186bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
    187{
    188	return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
    189}
    190
    191/*
    192 * Checks that two bio crypt contexts are compatible, and also
    193 * that their data_unit_nums are continuous (and can hence be merged)
    194 * in the order @bc1 followed by @bc2.
    195 */
    196bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
    197			     struct bio_crypt_ctx *bc2)
    198{
    199	if (!bio_crypt_ctx_compatible(bc1, bc2))
    200		return false;
    201
    202	return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
    203}
    204
    205/* Check that all I/O segments are data unit aligned. */
    206static bool bio_crypt_check_alignment(struct bio *bio)
    207{
    208	const unsigned int data_unit_size =
    209		bio->bi_crypt_context->bc_key->crypto_cfg.data_unit_size;
    210	struct bvec_iter iter;
    211	struct bio_vec bv;
    212
    213	bio_for_each_segment(bv, bio, iter) {
    214		if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size))
    215			return false;
    216	}
    217
    218	return true;
    219}
    220
    221blk_status_t __blk_crypto_init_request(struct request *rq)
    222{
    223	return blk_crypto_get_keyslot(rq->q->crypto_profile,
    224				      rq->crypt_ctx->bc_key,
    225				      &rq->crypt_keyslot);
    226}
    227
    228/**
    229 * __blk_crypto_free_request - Uninitialize the crypto fields of a request.
    230 *
    231 * @rq: The request whose crypto fields to uninitialize.
    232 *
    233 * Completely uninitializes the crypto fields of a request. If a keyslot has
    234 * been programmed into some inline encryption hardware, that keyslot is
    235 * released. The rq->crypt_ctx is also freed.
    236 */
    237void __blk_crypto_free_request(struct request *rq)
    238{
    239	blk_crypto_put_keyslot(rq->crypt_keyslot);
    240	mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
    241	blk_crypto_rq_set_defaults(rq);
    242}
    243
    244/**
    245 * __blk_crypto_bio_prep - Prepare bio for inline encryption
    246 *
    247 * @bio_ptr: pointer to original bio pointer
    248 *
    249 * If the bio crypt context provided for the bio is supported by the underlying
    250 * device's inline encryption hardware, do nothing.
    251 *
    252 * Otherwise, try to perform en/decryption for this bio by falling back to the
    253 * kernel crypto API. When the crypto API fallback is used for encryption,
    254 * blk-crypto may choose to split the bio into 2 - the first one that will
    255 * continue to be processed and the second one that will be resubmitted via
    256 * submit_bio_noacct. A bounce bio will be allocated to encrypt the contents
    257 * of the aforementioned "first one", and *bio_ptr will be updated to this
    258 * bounce bio.
    259 *
    260 * Caller must ensure bio has bio_crypt_ctx.
    261 *
    262 * Return: true on success; false on error (and bio->bi_status will be set
    263 *	   appropriately, and bio_endio() will have been called so bio
    264 *	   submission should abort).
    265 */
    266bool __blk_crypto_bio_prep(struct bio **bio_ptr)
    267{
    268	struct bio *bio = *bio_ptr;
    269	const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
    270	struct blk_crypto_profile *profile;
    271
    272	/* Error if bio has no data. */
    273	if (WARN_ON_ONCE(!bio_has_data(bio))) {
    274		bio->bi_status = BLK_STS_IOERR;
    275		goto fail;
    276	}
    277
    278	if (!bio_crypt_check_alignment(bio)) {
    279		bio->bi_status = BLK_STS_IOERR;
    280		goto fail;
    281	}
    282
    283	/*
    284	 * Success if device supports the encryption context, or if we succeeded
    285	 * in falling back to the crypto API.
    286	 */
    287	profile = bdev_get_queue(bio->bi_bdev)->crypto_profile;
    288	if (__blk_crypto_cfg_supported(profile, &bc_key->crypto_cfg))
    289		return true;
    290
    291	if (blk_crypto_fallback_bio_prep(bio_ptr))
    292		return true;
    293fail:
    294	bio_endio(*bio_ptr);
    295	return false;
    296}
    297
    298int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
    299			     gfp_t gfp_mask)
    300{
    301	if (!rq->crypt_ctx) {
    302		rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
    303		if (!rq->crypt_ctx)
    304			return -ENOMEM;
    305	}
    306	*rq->crypt_ctx = *bio->bi_crypt_context;
    307	return 0;
    308}
    309
    310/**
    311 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
    312 * @blk_key: Pointer to the blk_crypto_key to initialize.
    313 * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
    314 *	     @crypto_mode; see blk_crypto_modes[].
    315 * @crypto_mode: identifier for the encryption algorithm to use
    316 * @dun_bytes: number of bytes that will be used to specify the DUN when this
    317 *	       key is used
    318 * @data_unit_size: the data unit size to use for en/decryption
    319 *
    320 * Return: 0 on success, -errno on failure.  The caller is responsible for
    321 *	   zeroizing both blk_key and raw_key when done with them.
    322 */
    323int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
    324			enum blk_crypto_mode_num crypto_mode,
    325			unsigned int dun_bytes,
    326			unsigned int data_unit_size)
    327{
    328	const struct blk_crypto_mode *mode;
    329
    330	memset(blk_key, 0, sizeof(*blk_key));
    331
    332	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
    333		return -EINVAL;
    334
    335	mode = &blk_crypto_modes[crypto_mode];
    336	if (mode->keysize == 0)
    337		return -EINVAL;
    338
    339	if (dun_bytes == 0 || dun_bytes > mode->ivsize)
    340		return -EINVAL;
    341
    342	if (!is_power_of_2(data_unit_size))
    343		return -EINVAL;
    344
    345	blk_key->crypto_cfg.crypto_mode = crypto_mode;
    346	blk_key->crypto_cfg.dun_bytes = dun_bytes;
    347	blk_key->crypto_cfg.data_unit_size = data_unit_size;
    348	blk_key->data_unit_size_bits = ilog2(data_unit_size);
    349	blk_key->size = mode->keysize;
    350	memcpy(blk_key->raw, raw_key, mode->keysize);
    351
    352	return 0;
    353}
    354
    355/*
    356 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
    357 * request queue it's submitted to supports inline crypto, or the
    358 * blk-crypto-fallback is enabled and supports the cfg).
    359 */
    360bool blk_crypto_config_supported(struct request_queue *q,
    361				 const struct blk_crypto_config *cfg)
    362{
    363	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
    364	       __blk_crypto_cfg_supported(q->crypto_profile, cfg);
    365}
    366
    367/**
    368 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
    369 * @key: A key to use on the device
    370 * @q: the request queue for the device
    371 *
    372 * Upper layers must call this function to ensure that either the hardware
    373 * supports the key's crypto settings, or the crypto API fallback has transforms
    374 * for the needed mode allocated and ready to go. This function may allocate
    375 * an skcipher, and *should not* be called from the data path, since that might
    376 * cause a deadlock
    377 *
    378 * Return: 0 on success; -ENOPKG if the hardware doesn't support the key and
    379 *	   blk-crypto-fallback is either disabled or the needed algorithm
    380 *	   is disabled in the crypto API; or another -errno code.
    381 */
    382int blk_crypto_start_using_key(const struct blk_crypto_key *key,
    383			       struct request_queue *q)
    384{
    385	if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
    386		return 0;
    387	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
    388}
    389
    390/**
    391 * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
    392 *			    it may have been programmed into
    393 * @q: The request queue who's associated inline encryption hardware this key
    394 *     might have been programmed into
    395 * @key: The key to evict
    396 *
    397 * Upper layers (filesystems) must call this function to ensure that a key is
    398 * evicted from any hardware that it might have been programmed into.  The key
    399 * must not be in use by any in-flight IO when this function is called.
    400 *
    401 * Return: 0 on success or if the key wasn't in any keyslot; -errno on error.
    402 */
    403int blk_crypto_evict_key(struct request_queue *q,
    404			 const struct blk_crypto_key *key)
    405{
    406	if (__blk_crypto_cfg_supported(q->crypto_profile, &key->crypto_cfg))
    407		return __blk_crypto_evict_key(q->crypto_profile, key);
    408
    409	/*
    410	 * If the request_queue didn't support the key, then blk-crypto-fallback
    411	 * may have been used, so try to evict the key from blk-crypto-fallback.
    412	 */
    413	return blk_crypto_fallback_evict_key(key);
    414}
    415EXPORT_SYMBOL_GPL(blk_crypto_evict_key);