cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fastmap-wl.c (11850B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2012 Linutronix GmbH
      4 * Copyright (c) 2014 sigma star gmbh
      5 * Author: Richard Weinberger <richard@nod.at>
      6 */
      7
      8/**
      9 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
     10 * @wrk: the work description object
     11 */
     12static void update_fastmap_work_fn(struct work_struct *wrk)
     13{
     14	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
     15
     16	ubi_update_fastmap(ubi);
     17	spin_lock(&ubi->wl_lock);
     18	ubi->fm_work_scheduled = 0;
     19	spin_unlock(&ubi->wl_lock);
     20}
     21
     22/**
     23 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
     24 * @root: the RB-tree where to look for
     25 */
     26static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
     27{
     28	struct rb_node *p;
     29	struct ubi_wl_entry *e, *victim = NULL;
     30	int max_ec = UBI_MAX_ERASECOUNTER;
     31
     32	ubi_rb_for_each_entry(p, e, root, u.rb) {
     33		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
     34			victim = e;
     35			max_ec = e->ec;
     36		}
     37	}
     38
     39	return victim;
     40}
     41
     42static inline void return_unused_peb(struct ubi_device *ubi,
     43				     struct ubi_wl_entry *e)
     44{
     45	wl_tree_add(e, &ubi->free);
     46	ubi->free_count++;
     47}
     48
     49/**
     50 * return_unused_pool_pebs - returns unused PEB to the free tree.
     51 * @ubi: UBI device description object
     52 * @pool: fastmap pool description object
     53 */
     54static void return_unused_pool_pebs(struct ubi_device *ubi,
     55				    struct ubi_fm_pool *pool)
     56{
     57	int i;
     58	struct ubi_wl_entry *e;
     59
     60	for (i = pool->used; i < pool->size; i++) {
     61		e = ubi->lookuptbl[pool->pebs[i]];
     62		return_unused_peb(ubi, e);
     63	}
     64}
     65
     66/**
     67 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
     68 * @ubi: UBI device description object
     69 * @anchor: This PEB will be used as anchor PEB by fastmap
     70 *
     71 * The function returns a physical erase block with a given maximal number
     72 * and removes it from the wl subsystem.
     73 * Must be called with wl_lock held!
     74 */
     75struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
     76{
     77	struct ubi_wl_entry *e = NULL;
     78
     79	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
     80		goto out;
     81
     82	if (anchor)
     83		e = find_anchor_wl_entry(&ubi->free);
     84	else
     85		e = find_mean_wl_entry(ubi, &ubi->free);
     86
     87	if (!e)
     88		goto out;
     89
     90	self_check_in_wl_tree(ubi, e, &ubi->free);
     91
     92	/* remove it from the free list,
     93	 * the wl subsystem does no longer know this erase block */
     94	rb_erase(&e->u.rb, &ubi->free);
     95	ubi->free_count--;
     96out:
     97	return e;
     98}
     99
    100/*
    101 * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
    102 * @ubi: UBI device description object
    103 * @is_wl_pool: whether UBI is filling wear leveling pool
    104 *
    105 * This helper function checks whether there are enough free pebs (deducted
    106 * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
    107 * there is at least one of free pebs is filled into fm_wl_pool.
    108 * For wear leveling pool, UBI should also reserve free pebs for bad pebs
    109 * handling, because there maybe no enough free pebs for user volumes after
    110 * producing new bad pebs.
    111 */
    112static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
    113{
    114	int fm_used = 0;	// fastmap non anchor pebs.
    115	int beb_rsvd_pebs;
    116
    117	if (!ubi->free.rb_node)
    118		return false;
    119
    120	beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
    121	if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
    122		fm_used = ubi->fm_size / ubi->leb_size - 1;
    123
    124	return ubi->free_count - beb_rsvd_pebs > fm_used;
    125}
    126
    127/**
    128 * ubi_refill_pools - refills all fastmap PEB pools.
    129 * @ubi: UBI device description object
    130 */
    131void ubi_refill_pools(struct ubi_device *ubi)
    132{
    133	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
    134	struct ubi_fm_pool *pool = &ubi->fm_pool;
    135	struct ubi_wl_entry *e;
    136	int enough;
    137
    138	spin_lock(&ubi->wl_lock);
    139
    140	return_unused_pool_pebs(ubi, wl_pool);
    141	return_unused_pool_pebs(ubi, pool);
    142
    143	wl_pool->size = 0;
    144	pool->size = 0;
    145
    146	if (ubi->fm_anchor) {
    147		wl_tree_add(ubi->fm_anchor, &ubi->free);
    148		ubi->free_count++;
    149	}
    150
    151	/*
    152	 * All available PEBs are in ubi->free, now is the time to get
    153	 * the best anchor PEBs.
    154	 */
    155	ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
    156
    157	for (;;) {
    158		enough = 0;
    159		if (pool->size < pool->max_size) {
    160			if (!has_enough_free_count(ubi, false))
    161				break;
    162
    163			e = wl_get_wle(ubi);
    164			if (!e)
    165				break;
    166
    167			pool->pebs[pool->size] = e->pnum;
    168			pool->size++;
    169		} else
    170			enough++;
    171
    172		if (wl_pool->size < wl_pool->max_size) {
    173			if (!has_enough_free_count(ubi, true))
    174				break;
    175
    176			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
    177			self_check_in_wl_tree(ubi, e, &ubi->free);
    178			rb_erase(&e->u.rb, &ubi->free);
    179			ubi->free_count--;
    180
    181			wl_pool->pebs[wl_pool->size] = e->pnum;
    182			wl_pool->size++;
    183		} else
    184			enough++;
    185
    186		if (enough == 2)
    187			break;
    188	}
    189
    190	wl_pool->used = 0;
    191	pool->used = 0;
    192
    193	spin_unlock(&ubi->wl_lock);
    194}
    195
    196/**
    197 * produce_free_peb - produce a free physical eraseblock.
    198 * @ubi: UBI device description object
    199 *
    200 * This function tries to make a free PEB by means of synchronous execution of
    201 * pending works. This may be needed if, for example the background thread is
    202 * disabled. Returns zero in case of success and a negative error code in case
    203 * of failure.
    204 */
    205static int produce_free_peb(struct ubi_device *ubi)
    206{
    207	int err;
    208
    209	while (!ubi->free.rb_node && ubi->works_count) {
    210		dbg_wl("do one work synchronously");
    211		err = do_work(ubi);
    212
    213		if (err)
    214			return err;
    215	}
    216
    217	return 0;
    218}
    219
    220/**
    221 * ubi_wl_get_peb - get a physical eraseblock.
    222 * @ubi: UBI device description object
    223 *
    224 * This function returns a physical eraseblock in case of success and a
    225 * negative error code in case of failure.
    226 * Returns with ubi->fm_eba_sem held in read mode!
    227 */
    228int ubi_wl_get_peb(struct ubi_device *ubi)
    229{
    230	int ret, attempts = 0;
    231	struct ubi_fm_pool *pool = &ubi->fm_pool;
    232	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
    233
    234again:
    235	down_read(&ubi->fm_eba_sem);
    236	spin_lock(&ubi->wl_lock);
    237
    238	/* We check here also for the WL pool because at this point we can
    239	 * refill the WL pool synchronous. */
    240	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
    241		spin_unlock(&ubi->wl_lock);
    242		up_read(&ubi->fm_eba_sem);
    243		ret = ubi_update_fastmap(ubi);
    244		if (ret) {
    245			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
    246			down_read(&ubi->fm_eba_sem);
    247			return -ENOSPC;
    248		}
    249		down_read(&ubi->fm_eba_sem);
    250		spin_lock(&ubi->wl_lock);
    251	}
    252
    253	if (pool->used == pool->size) {
    254		spin_unlock(&ubi->wl_lock);
    255		attempts++;
    256		if (attempts == 10) {
    257			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
    258			ret = -ENOSPC;
    259			goto out;
    260		}
    261		up_read(&ubi->fm_eba_sem);
    262		ret = produce_free_peb(ubi);
    263		if (ret < 0) {
    264			down_read(&ubi->fm_eba_sem);
    265			goto out;
    266		}
    267		goto again;
    268	}
    269
    270	ubi_assert(pool->used < pool->size);
    271	ret = pool->pebs[pool->used++];
    272	prot_queue_add(ubi, ubi->lookuptbl[ret]);
    273	spin_unlock(&ubi->wl_lock);
    274out:
    275	return ret;
    276}
    277
    278/**
    279 * next_peb_for_wl - returns next PEB to be used internally by the
    280 * WL sub-system.
    281 *
    282 * @ubi: UBI device description object
    283 */
    284static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
    285{
    286	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
    287	int pnum;
    288
    289	if (pool->used == pool->size)
    290		return NULL;
    291
    292	pnum = pool->pebs[pool->used];
    293	return ubi->lookuptbl[pnum];
    294}
    295
    296/**
    297 * need_wear_leveling - checks whether to trigger a wear leveling work.
    298 * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
    299 * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
    300 * 'wl_pool' by ubi_refill_pools().
    301 *
    302 * @ubi: UBI device description object
    303 */
    304static bool need_wear_leveling(struct ubi_device *ubi)
    305{
    306	int ec;
    307	struct ubi_wl_entry *e;
    308
    309	if (!ubi->used.rb_node)
    310		return false;
    311
    312	e = next_peb_for_wl(ubi);
    313	if (!e) {
    314		if (!ubi->free.rb_node)
    315			return false;
    316		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
    317		ec = e->ec;
    318	} else {
    319		ec = e->ec;
    320		if (ubi->free.rb_node) {
    321			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
    322			ec = max(ec, e->ec);
    323		}
    324	}
    325	e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
    326
    327	return ec - e->ec >= UBI_WL_THRESHOLD;
    328}
    329
    330/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
    331 *
    332 * @ubi: UBI device description object
    333 */
    334static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
    335{
    336	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
    337	int pnum;
    338
    339	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
    340
    341	if (pool->used == pool->size) {
    342		/* We cannot update the fastmap here because this
    343		 * function is called in atomic context.
    344		 * Let's fail here and refill/update it as soon as possible. */
    345		if (!ubi->fm_work_scheduled) {
    346			ubi->fm_work_scheduled = 1;
    347			schedule_work(&ubi->fm_work);
    348		}
    349		return NULL;
    350	}
    351
    352	pnum = pool->pebs[pool->used++];
    353	return ubi->lookuptbl[pnum];
    354}
    355
    356/**
    357 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
    358 * @ubi: UBI device description object
    359 */
    360int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
    361{
    362	struct ubi_work *wrk;
    363	struct ubi_wl_entry *anchor;
    364
    365	spin_lock(&ubi->wl_lock);
    366
    367	/* Do we already have an anchor? */
    368	if (ubi->fm_anchor) {
    369		spin_unlock(&ubi->wl_lock);
    370		return 0;
    371	}
    372
    373	/* See if we can find an anchor PEB on the list of free PEBs */
    374	anchor = ubi_wl_get_fm_peb(ubi, 1);
    375	if (anchor) {
    376		ubi->fm_anchor = anchor;
    377		spin_unlock(&ubi->wl_lock);
    378		return 0;
    379	}
    380
    381	ubi->fm_do_produce_anchor = 1;
    382	/* No luck, trigger wear leveling to produce a new anchor PEB. */
    383	if (ubi->wl_scheduled) {
    384		spin_unlock(&ubi->wl_lock);
    385		return 0;
    386	}
    387	ubi->wl_scheduled = 1;
    388	spin_unlock(&ubi->wl_lock);
    389
    390	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
    391	if (!wrk) {
    392		spin_lock(&ubi->wl_lock);
    393		ubi->wl_scheduled = 0;
    394		spin_unlock(&ubi->wl_lock);
    395		return -ENOMEM;
    396	}
    397
    398	wrk->func = &wear_leveling_worker;
    399	__schedule_ubi_work(ubi, wrk);
    400	return 0;
    401}
    402
    403/**
    404 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
    405 * sub-system.
    406 * see: ubi_wl_put_peb()
    407 *
    408 * @ubi: UBI device description object
    409 * @fm_e: physical eraseblock to return
    410 * @lnum: the last used logical eraseblock number for the PEB
    411 * @torture: if this physical eraseblock has to be tortured
    412 */
    413int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
    414		      int lnum, int torture)
    415{
    416	struct ubi_wl_entry *e;
    417	int vol_id, pnum = fm_e->pnum;
    418
    419	dbg_wl("PEB %d", pnum);
    420
    421	ubi_assert(pnum >= 0);
    422	ubi_assert(pnum < ubi->peb_count);
    423
    424	spin_lock(&ubi->wl_lock);
    425	e = ubi->lookuptbl[pnum];
    426
    427	/* This can happen if we recovered from a fastmap the very
    428	 * first time and writing now a new one. In this case the wl system
    429	 * has never seen any PEB used by the original fastmap.
    430	 */
    431	if (!e) {
    432		e = fm_e;
    433		ubi_assert(e->ec >= 0);
    434		ubi->lookuptbl[pnum] = e;
    435	}
    436
    437	spin_unlock(&ubi->wl_lock);
    438
    439	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
    440	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
    441}
    442
    443/**
    444 * ubi_is_erase_work - checks whether a work is erase work.
    445 * @wrk: The work object to be checked
    446 */
    447int ubi_is_erase_work(struct ubi_work *wrk)
    448{
    449	return wrk->func == erase_worker;
    450}
    451
    452static void ubi_fastmap_close(struct ubi_device *ubi)
    453{
    454	int i;
    455
    456	return_unused_pool_pebs(ubi, &ubi->fm_pool);
    457	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
    458
    459	if (ubi->fm_anchor) {
    460		return_unused_peb(ubi, ubi->fm_anchor);
    461		ubi->fm_anchor = NULL;
    462	}
    463
    464	if (ubi->fm) {
    465		for (i = 0; i < ubi->fm->used_blocks; i++)
    466			kfree(ubi->fm->e[i]);
    467	}
    468	kfree(ubi->fm);
    469}
    470
    471/**
    472 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
    473 * See find_mean_wl_entry()
    474 *
    475 * @ubi: UBI device description object
    476 * @e: physical eraseblock to return
    477 * @root: RB tree to test against.
    478 */
    479static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
    480					   struct ubi_wl_entry *e,
    481					   struct rb_root *root) {
    482	if (e && !ubi->fm_disabled && !ubi->fm &&
    483	    e->pnum < UBI_FM_MAX_START)
    484		e = rb_entry(rb_next(root->rb_node),
    485			     struct ubi_wl_entry, u.rb);
    486
    487	return e;
    488}