cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

block-rsv.c (16971B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include "misc.h"
      4#include "ctree.h"
      5#include "block-rsv.h"
      6#include "space-info.h"
      7#include "transaction.h"
      8#include "block-group.h"
      9#include "disk-io.h"
     10
     11/*
     12 * HOW DO BLOCK RESERVES WORK
     13 *
     14 *   Think of block_rsv's as buckets for logically grouped metadata
     15 *   reservations.  Each block_rsv has a ->size and a ->reserved.  ->size is
     16 *   how large we want our block rsv to be, ->reserved is how much space is
     17 *   currently reserved for this block reserve.
     18 *
     19 *   ->failfast exists for the truncate case, and is described below.
     20 *
     21 * NORMAL OPERATION
     22 *
     23 *   -> Reserve
     24 *     Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
     25 *
     26 *     We call into btrfs_reserve_metadata_bytes() with our bytes, which is
     27 *     accounted for in space_info->bytes_may_use, and then add the bytes to
     28 *     ->reserved, and ->size in the case of btrfs_block_rsv_add.
     29 *
     30 *     ->size is an over-estimation of how much we may use for a particular
     31 *     operation.
     32 *
     33 *   -> Use
     34 *     Entrance: btrfs_use_block_rsv
     35 *
     36 *     When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
     37 *     to determine the appropriate block_rsv to use, and then verify that
     38 *     ->reserved has enough space for our tree block allocation.  Once
     39 *     successful we subtract fs_info->nodesize from ->reserved.
     40 *
     41 *   -> Finish
     42 *     Entrance: btrfs_block_rsv_release
     43 *
     44 *     We are finished with our operation, subtract our individual reservation
     45 *     from ->size, and then subtract ->size from ->reserved and free up the
     46 *     excess if there is any.
     47 *
     48 *     There is some logic here to refill the delayed refs rsv or the global rsv
     49 *     as needed, otherwise the excess is subtracted from
     50 *     space_info->bytes_may_use.
     51 *
     52 * TYPES OF BLOCK RESERVES
     53 *
     54 * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
     55 *   These behave normally, as described above, just within the confines of the
     56 *   lifetime of their particular operation (transaction for the whole trans
     57 *   handle lifetime, for example).
     58 *
     59 * BLOCK_RSV_GLOBAL
     60 *   It is impossible to properly account for all the space that may be required
     61 *   to make our extent tree updates.  This block reserve acts as an overflow
     62 *   buffer in case our delayed refs reserve does not reserve enough space to
     63 *   update the extent tree.
     64 *
     65 *   We can steal from this in some cases as well, notably on evict() or
     66 *   truncate() in order to help users recover from ENOSPC conditions.
     67 *
     68 * BLOCK_RSV_DELALLOC
     69 *   The individual item sizes are determined by the per-inode size
     70 *   calculations, which are described with the delalloc code.  This is pretty
     71 *   straightforward, it's just the calculation of ->size encodes a lot of
     72 *   different items, and thus it gets used when updating inodes, inserting file
     73 *   extents, and inserting checksums.
     74 *
     75 * BLOCK_RSV_DELREFS
     76 *   We keep a running tally of how many delayed refs we have on the system.
     77 *   We assume each one of these delayed refs are going to use a full
     78 *   reservation.  We use the transaction items and pre-reserve space for every
     79 *   operation, and use this reservation to refill any gap between ->size and
     80 *   ->reserved that may exist.
     81 *
     82 *   From there it's straightforward, removing a delayed ref means we remove its
     83 *   count from ->size and free up reservations as necessary.  Since this is
     84 *   the most dynamic block reserve in the system, we will try to refill this
     85 *   block reserve first with any excess returned by any other block reserve.
     86 *
     87 * BLOCK_RSV_EMPTY
     88 *   This is the fallback block reserve to make us try to reserve space if we
     89 *   don't have a specific bucket for this allocation.  It is mostly used for
     90 *   updating the device tree and such, since that is a separate pool we're
     91 *   content to just reserve space from the space_info on demand.
     92 *
     93 * BLOCK_RSV_TEMP
     94 *   This is used by things like truncate and iput.  We will temporarily
     95 *   allocate a block reserve, set it to some size, and then truncate bytes
     96 *   until we have no space left.  With ->failfast set we'll simply return
     97 *   ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
     98 *   to make a new reservation.  This is because these operations are
     99 *   unbounded, so we want to do as much work as we can, and then back off and
    100 *   re-reserve.
    101 */
    102
    103static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
    104				    struct btrfs_block_rsv *block_rsv,
    105				    struct btrfs_block_rsv *dest, u64 num_bytes,
    106				    u64 *qgroup_to_release_ret)
    107{
    108	struct btrfs_space_info *space_info = block_rsv->space_info;
    109	u64 qgroup_to_release = 0;
    110	u64 ret;
    111
    112	spin_lock(&block_rsv->lock);
    113	if (num_bytes == (u64)-1) {
    114		num_bytes = block_rsv->size;
    115		qgroup_to_release = block_rsv->qgroup_rsv_size;
    116	}
    117	block_rsv->size -= num_bytes;
    118	if (block_rsv->reserved >= block_rsv->size) {
    119		num_bytes = block_rsv->reserved - block_rsv->size;
    120		block_rsv->reserved = block_rsv->size;
    121		block_rsv->full = 1;
    122	} else {
    123		num_bytes = 0;
    124	}
    125	if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
    126		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
    127				    block_rsv->qgroup_rsv_size;
    128		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
    129	} else {
    130		qgroup_to_release = 0;
    131	}
    132	spin_unlock(&block_rsv->lock);
    133
    134	ret = num_bytes;
    135	if (num_bytes > 0) {
    136		if (dest) {
    137			spin_lock(&dest->lock);
    138			if (!dest->full) {
    139				u64 bytes_to_add;
    140
    141				bytes_to_add = dest->size - dest->reserved;
    142				bytes_to_add = min(num_bytes, bytes_to_add);
    143				dest->reserved += bytes_to_add;
    144				if (dest->reserved >= dest->size)
    145					dest->full = 1;
    146				num_bytes -= bytes_to_add;
    147			}
    148			spin_unlock(&dest->lock);
    149		}
    150		if (num_bytes)
    151			btrfs_space_info_free_bytes_may_use(fs_info,
    152							    space_info,
    153							    num_bytes);
    154	}
    155	if (qgroup_to_release_ret)
    156		*qgroup_to_release_ret = qgroup_to_release;
    157	return ret;
    158}
    159
    160int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
    161			    struct btrfs_block_rsv *dst, u64 num_bytes,
    162			    bool update_size)
    163{
    164	int ret;
    165
    166	ret = btrfs_block_rsv_use_bytes(src, num_bytes);
    167	if (ret)
    168		return ret;
    169
    170	btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
    171	return 0;
    172}
    173
    174void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
    175{
    176	memset(rsv, 0, sizeof(*rsv));
    177	spin_lock_init(&rsv->lock);
    178	rsv->type = type;
    179}
    180
    181void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
    182				   struct btrfs_block_rsv *rsv,
    183				   unsigned short type)
    184{
    185	btrfs_init_block_rsv(rsv, type);
    186	rsv->space_info = btrfs_find_space_info(fs_info,
    187					    BTRFS_BLOCK_GROUP_METADATA);
    188}
    189
    190struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
    191					      unsigned short type)
    192{
    193	struct btrfs_block_rsv *block_rsv;
    194
    195	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
    196	if (!block_rsv)
    197		return NULL;
    198
    199	btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
    200	return block_rsv;
    201}
    202
    203void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
    204			  struct btrfs_block_rsv *rsv)
    205{
    206	if (!rsv)
    207		return;
    208	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
    209	kfree(rsv);
    210}
    211
    212int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
    213			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
    214			enum btrfs_reserve_flush_enum flush)
    215{
    216	int ret;
    217
    218	if (num_bytes == 0)
    219		return 0;
    220
    221	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
    222	if (!ret)
    223		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
    224
    225	return ret;
    226}
    227
    228int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor)
    229{
    230	u64 num_bytes = 0;
    231	int ret = -ENOSPC;
    232
    233	if (!block_rsv)
    234		return 0;
    235
    236	spin_lock(&block_rsv->lock);
    237	num_bytes = div_factor(block_rsv->size, min_factor);
    238	if (block_rsv->reserved >= num_bytes)
    239		ret = 0;
    240	spin_unlock(&block_rsv->lock);
    241
    242	return ret;
    243}
    244
    245int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
    246			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
    247			   enum btrfs_reserve_flush_enum flush)
    248{
    249	u64 num_bytes = 0;
    250	int ret = -ENOSPC;
    251
    252	if (!block_rsv)
    253		return 0;
    254
    255	spin_lock(&block_rsv->lock);
    256	num_bytes = min_reserved;
    257	if (block_rsv->reserved >= num_bytes)
    258		ret = 0;
    259	else
    260		num_bytes -= block_rsv->reserved;
    261	spin_unlock(&block_rsv->lock);
    262
    263	if (!ret)
    264		return 0;
    265
    266	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
    267	if (!ret) {
    268		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
    269		return 0;
    270	}
    271
    272	return ret;
    273}
    274
    275u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
    276			    struct btrfs_block_rsv *block_rsv, u64 num_bytes,
    277			    u64 *qgroup_to_release)
    278{
    279	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
    280	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
    281	struct btrfs_block_rsv *target = NULL;
    282
    283	/*
    284	 * If we are the delayed_rsv then push to the global rsv, otherwise dump
    285	 * into the delayed rsv if it is not full.
    286	 */
    287	if (block_rsv == delayed_rsv)
    288		target = global_rsv;
    289	else if (block_rsv != global_rsv && !delayed_rsv->full)
    290		target = delayed_rsv;
    291
    292	if (target && block_rsv->space_info != target->space_info)
    293		target = NULL;
    294
    295	return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
    296				       qgroup_to_release);
    297}
    298
    299int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
    300{
    301	int ret = -ENOSPC;
    302
    303	spin_lock(&block_rsv->lock);
    304	if (block_rsv->reserved >= num_bytes) {
    305		block_rsv->reserved -= num_bytes;
    306		if (block_rsv->reserved < block_rsv->size)
    307			block_rsv->full = 0;
    308		ret = 0;
    309	}
    310	spin_unlock(&block_rsv->lock);
    311	return ret;
    312}
    313
    314void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
    315			       u64 num_bytes, bool update_size)
    316{
    317	spin_lock(&block_rsv->lock);
    318	block_rsv->reserved += num_bytes;
    319	if (update_size)
    320		block_rsv->size += num_bytes;
    321	else if (block_rsv->reserved >= block_rsv->size)
    322		block_rsv->full = 1;
    323	spin_unlock(&block_rsv->lock);
    324}
    325
    326int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
    327			     struct btrfs_block_rsv *dest, u64 num_bytes,
    328			     int min_factor)
    329{
    330	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
    331	u64 min_bytes;
    332
    333	if (global_rsv->space_info != dest->space_info)
    334		return -ENOSPC;
    335
    336	spin_lock(&global_rsv->lock);
    337	min_bytes = div_factor(global_rsv->size, min_factor);
    338	if (global_rsv->reserved < min_bytes + num_bytes) {
    339		spin_unlock(&global_rsv->lock);
    340		return -ENOSPC;
    341	}
    342	global_rsv->reserved -= num_bytes;
    343	if (global_rsv->reserved < global_rsv->size)
    344		global_rsv->full = 0;
    345	spin_unlock(&global_rsv->lock);
    346
    347	btrfs_block_rsv_add_bytes(dest, num_bytes, true);
    348	return 0;
    349}
    350
    351void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
    352{
    353	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
    354	struct btrfs_space_info *sinfo = block_rsv->space_info;
    355	struct btrfs_root *root, *tmp;
    356	u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
    357	unsigned int min_items = 1;
    358
    359	/*
    360	 * The global block rsv is based on the size of the extent tree, the
    361	 * checksum tree and the root tree.  If the fs is empty we want to set
    362	 * it to a minimal amount for safety.
    363	 *
    364	 * We also are going to need to modify the minimum of the tree root and
    365	 * any global roots we could touch.
    366	 */
    367	read_lock(&fs_info->global_root_lock);
    368	rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
    369					     rb_node) {
    370		if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID ||
    371		    root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID ||
    372		    root->root_key.objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) {
    373			num_bytes += btrfs_root_used(&root->root_item);
    374			min_items++;
    375		}
    376	}
    377	read_unlock(&fs_info->global_root_lock);
    378
    379	/*
    380	 * But we also want to reserve enough space so we can do the fallback
    381	 * global reserve for an unlink, which is an additional 5 items (see the
    382	 * comment in __unlink_start_trans for what we're modifying.)
    383	 *
    384	 * But we also need space for the delayed ref updates from the unlink,
    385	 * so its 10, 5 for the actual operation, and 5 for the delayed ref
    386	 * updates.
    387	 */
    388	min_items += 10;
    389
    390	num_bytes = max_t(u64, num_bytes,
    391			  btrfs_calc_insert_metadata_size(fs_info, min_items));
    392
    393	spin_lock(&sinfo->lock);
    394	spin_lock(&block_rsv->lock);
    395
    396	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
    397
    398	if (block_rsv->reserved < block_rsv->size) {
    399		num_bytes = block_rsv->size - block_rsv->reserved;
    400		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
    401						      num_bytes);
    402		block_rsv->reserved = block_rsv->size;
    403	} else if (block_rsv->reserved > block_rsv->size) {
    404		num_bytes = block_rsv->reserved - block_rsv->size;
    405		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
    406						      -num_bytes);
    407		block_rsv->reserved = block_rsv->size;
    408		btrfs_try_granting_tickets(fs_info, sinfo);
    409	}
    410
    411	if (block_rsv->reserved == block_rsv->size)
    412		block_rsv->full = 1;
    413	else
    414		block_rsv->full = 0;
    415
    416	if (block_rsv->size >= sinfo->total_bytes)
    417		sinfo->force_alloc = CHUNK_ALLOC_FORCE;
    418	spin_unlock(&block_rsv->lock);
    419	spin_unlock(&sinfo->lock);
    420}
    421
    422void btrfs_init_root_block_rsv(struct btrfs_root *root)
    423{
    424	struct btrfs_fs_info *fs_info = root->fs_info;
    425
    426	switch (root->root_key.objectid) {
    427	case BTRFS_CSUM_TREE_OBJECTID:
    428	case BTRFS_EXTENT_TREE_OBJECTID:
    429	case BTRFS_FREE_SPACE_TREE_OBJECTID:
    430		root->block_rsv = &fs_info->delayed_refs_rsv;
    431		break;
    432	case BTRFS_ROOT_TREE_OBJECTID:
    433	case BTRFS_DEV_TREE_OBJECTID:
    434	case BTRFS_QUOTA_TREE_OBJECTID:
    435		root->block_rsv = &fs_info->global_block_rsv;
    436		break;
    437	case BTRFS_CHUNK_TREE_OBJECTID:
    438		root->block_rsv = &fs_info->chunk_block_rsv;
    439		break;
    440	default:
    441		root->block_rsv = NULL;
    442		break;
    443	}
    444}
    445
    446void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
    447{
    448	struct btrfs_space_info *space_info;
    449
    450	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
    451	fs_info->chunk_block_rsv.space_info = space_info;
    452
    453	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
    454	fs_info->global_block_rsv.space_info = space_info;
    455	fs_info->trans_block_rsv.space_info = space_info;
    456	fs_info->empty_block_rsv.space_info = space_info;
    457	fs_info->delayed_block_rsv.space_info = space_info;
    458	fs_info->delayed_refs_rsv.space_info = space_info;
    459
    460	btrfs_update_global_block_rsv(fs_info);
    461}
    462
    463void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
    464{
    465	btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
    466				NULL);
    467	WARN_ON(fs_info->trans_block_rsv.size > 0);
    468	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
    469	WARN_ON(fs_info->chunk_block_rsv.size > 0);
    470	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
    471	WARN_ON(fs_info->delayed_block_rsv.size > 0);
    472	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
    473	WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
    474	WARN_ON(fs_info->delayed_refs_rsv.size > 0);
    475}
    476
    477static struct btrfs_block_rsv *get_block_rsv(
    478					const struct btrfs_trans_handle *trans,
    479					const struct btrfs_root *root)
    480{
    481	struct btrfs_fs_info *fs_info = root->fs_info;
    482	struct btrfs_block_rsv *block_rsv = NULL;
    483
    484	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
    485	    (root == fs_info->uuid_root) ||
    486	    (trans->adding_csums &&
    487	     root->root_key.objectid == BTRFS_CSUM_TREE_OBJECTID))
    488		block_rsv = trans->block_rsv;
    489
    490	if (!block_rsv)
    491		block_rsv = root->block_rsv;
    492
    493	if (!block_rsv)
    494		block_rsv = &fs_info->empty_block_rsv;
    495
    496	return block_rsv;
    497}
    498
    499struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
    500					    struct btrfs_root *root,
    501					    u32 blocksize)
    502{
    503	struct btrfs_fs_info *fs_info = root->fs_info;
    504	struct btrfs_block_rsv *block_rsv;
    505	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
    506	int ret;
    507	bool global_updated = false;
    508
    509	block_rsv = get_block_rsv(trans, root);
    510
    511	if (unlikely(block_rsv->size == 0))
    512		goto try_reserve;
    513again:
    514	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
    515	if (!ret)
    516		return block_rsv;
    517
    518	if (block_rsv->failfast)
    519		return ERR_PTR(ret);
    520
    521	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
    522		global_updated = true;
    523		btrfs_update_global_block_rsv(fs_info);
    524		goto again;
    525	}
    526
    527	/*
    528	 * The global reserve still exists to save us from ourselves, so don't
    529	 * warn_on if we are short on our delayed refs reserve.
    530	 */
    531	if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
    532	    btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
    533		static DEFINE_RATELIMIT_STATE(_rs,
    534				DEFAULT_RATELIMIT_INTERVAL * 10,
    535				/*DEFAULT_RATELIMIT_BURST*/ 1);
    536		if (__ratelimit(&_rs))
    537			WARN(1, KERN_DEBUG
    538				"BTRFS: block rsv %d returned %d\n",
    539				block_rsv->type, ret);
    540	}
    541try_reserve:
    542	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, blocksize,
    543					   BTRFS_RESERVE_NO_FLUSH);
    544	if (!ret)
    545		return block_rsv;
    546	/*
    547	 * If we couldn't reserve metadata bytes try and use some from
    548	 * the global reserve if its space type is the same as the global
    549	 * reservation.
    550	 */
    551	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
    552	    block_rsv->space_info == global_rsv->space_info) {
    553		ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
    554		if (!ret)
    555			return global_rsv;
    556	}
    557	return ERR_PTR(ret);
    558}