cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sysfs.c (30902B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * bcache sysfs interfaces
      4 *
      5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
      6 * Copyright 2012 Google, Inc.
      7 */
      8
      9#include "bcache.h"
     10#include "sysfs.h"
     11#include "btree.h"
     12#include "request.h"
     13#include "writeback.h"
     14#include "features.h"
     15
     16#include <linux/blkdev.h>
     17#include <linux/sort.h>
     18#include <linux/sched/clock.h>
     19
     20extern bool bcache_is_reboot;
     21
     22/* Default is 0 ("writethrough") */
     23static const char * const bch_cache_modes[] = {
     24	"writethrough",
     25	"writeback",
     26	"writearound",
     27	"none",
     28	NULL
     29};
     30
     31static const char * const bch_reada_cache_policies[] = {
     32	"all",
     33	"meta-only",
     34	NULL
     35};
     36
     37/* Default is 0 ("auto") */
     38static const char * const bch_stop_on_failure_modes[] = {
     39	"auto",
     40	"always",
     41	NULL
     42};
     43
     44static const char * const cache_replacement_policies[] = {
     45	"lru",
     46	"fifo",
     47	"random",
     48	NULL
     49};
     50
     51static const char * const error_actions[] = {
     52	"unregister",
     53	"panic",
     54	NULL
     55};
     56
     57write_attribute(attach);
     58write_attribute(detach);
     59write_attribute(unregister);
     60write_attribute(stop);
     61write_attribute(clear_stats);
     62write_attribute(trigger_gc);
     63write_attribute(prune_cache);
     64write_attribute(flash_vol_create);
     65
     66read_attribute(bucket_size);
     67read_attribute(block_size);
     68read_attribute(nbuckets);
     69read_attribute(tree_depth);
     70read_attribute(root_usage_percent);
     71read_attribute(priority_stats);
     72read_attribute(btree_cache_size);
     73read_attribute(btree_cache_max_chain);
     74read_attribute(cache_available_percent);
     75read_attribute(written);
     76read_attribute(btree_written);
     77read_attribute(metadata_written);
     78read_attribute(active_journal_entries);
     79read_attribute(backing_dev_name);
     80read_attribute(backing_dev_uuid);
     81
     82sysfs_time_stats_attribute(btree_gc,	sec, ms);
     83sysfs_time_stats_attribute(btree_split, sec, us);
     84sysfs_time_stats_attribute(btree_sort,	ms,  us);
     85sysfs_time_stats_attribute(btree_read,	ms,  us);
     86
     87read_attribute(btree_nodes);
     88read_attribute(btree_used_percent);
     89read_attribute(average_key_size);
     90read_attribute(dirty_data);
     91read_attribute(bset_tree_stats);
     92read_attribute(feature_compat);
     93read_attribute(feature_ro_compat);
     94read_attribute(feature_incompat);
     95
     96read_attribute(state);
     97read_attribute(cache_read_races);
     98read_attribute(reclaim);
     99read_attribute(reclaimed_journal_buckets);
    100read_attribute(flush_write);
    101read_attribute(writeback_keys_done);
    102read_attribute(writeback_keys_failed);
    103read_attribute(io_errors);
    104read_attribute(congested);
    105read_attribute(cutoff_writeback);
    106read_attribute(cutoff_writeback_sync);
    107rw_attribute(congested_read_threshold_us);
    108rw_attribute(congested_write_threshold_us);
    109
    110rw_attribute(sequential_cutoff);
    111rw_attribute(data_csum);
    112rw_attribute(cache_mode);
    113rw_attribute(readahead_cache_policy);
    114rw_attribute(stop_when_cache_set_failed);
    115rw_attribute(writeback_metadata);
    116rw_attribute(writeback_running);
    117rw_attribute(writeback_percent);
    118rw_attribute(writeback_delay);
    119rw_attribute(writeback_rate);
    120rw_attribute(writeback_consider_fragment);
    121
    122rw_attribute(writeback_rate_update_seconds);
    123rw_attribute(writeback_rate_i_term_inverse);
    124rw_attribute(writeback_rate_p_term_inverse);
    125rw_attribute(writeback_rate_fp_term_low);
    126rw_attribute(writeback_rate_fp_term_mid);
    127rw_attribute(writeback_rate_fp_term_high);
    128rw_attribute(writeback_rate_minimum);
    129read_attribute(writeback_rate_debug);
    130
    131read_attribute(stripe_size);
    132read_attribute(partial_stripes_expensive);
    133
    134rw_attribute(synchronous);
    135rw_attribute(journal_delay_ms);
    136rw_attribute(io_disable);
    137rw_attribute(discard);
    138rw_attribute(running);
    139rw_attribute(label);
    140rw_attribute(errors);
    141rw_attribute(io_error_limit);
    142rw_attribute(io_error_halflife);
    143rw_attribute(verify);
    144rw_attribute(bypass_torture_test);
    145rw_attribute(key_merging_disabled);
    146rw_attribute(gc_always_rewrite);
    147rw_attribute(expensive_debug_checks);
    148rw_attribute(cache_replacement_policy);
    149rw_attribute(btree_shrinker_disabled);
    150rw_attribute(copy_gc_enabled);
    151rw_attribute(idle_max_writeback_rate);
    152rw_attribute(gc_after_writeback);
    153rw_attribute(size);
    154
    155static ssize_t bch_snprint_string_list(char *buf,
    156				       size_t size,
    157				       const char * const list[],
    158				       size_t selected)
    159{
    160	char *out = buf;
    161	size_t i;
    162
    163	for (i = 0; list[i]; i++)
    164		out += scnprintf(out, buf + size - out,
    165				i == selected ? "[%s] " : "%s ", list[i]);
    166
    167	out[-1] = '\n';
    168	return out - buf;
    169}
    170
    171SHOW(__bch_cached_dev)
    172{
    173	struct cached_dev *dc = container_of(kobj, struct cached_dev,
    174					     disk.kobj);
    175	char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
    176	int wb = dc->writeback_running;
    177
    178#define var(stat)		(dc->stat)
    179
    180	if (attr == &sysfs_cache_mode)
    181		return bch_snprint_string_list(buf, PAGE_SIZE,
    182					       bch_cache_modes,
    183					       BDEV_CACHE_MODE(&dc->sb));
    184
    185	if (attr == &sysfs_readahead_cache_policy)
    186		return bch_snprint_string_list(buf, PAGE_SIZE,
    187					      bch_reada_cache_policies,
    188					      dc->cache_readahead_policy);
    189
    190	if (attr == &sysfs_stop_when_cache_set_failed)
    191		return bch_snprint_string_list(buf, PAGE_SIZE,
    192					       bch_stop_on_failure_modes,
    193					       dc->stop_when_cache_set_failed);
    194
    195
    196	sysfs_printf(data_csum,		"%i", dc->disk.data_csum);
    197	var_printf(verify,		"%i");
    198	var_printf(bypass_torture_test,	"%i");
    199	var_printf(writeback_metadata,	"%i");
    200	var_printf(writeback_running,	"%i");
    201	var_printf(writeback_consider_fragment,	"%i");
    202	var_print(writeback_delay);
    203	var_print(writeback_percent);
    204	sysfs_hprint(writeback_rate,
    205		     wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
    206	sysfs_printf(io_errors,		"%i", atomic_read(&dc->io_errors));
    207	sysfs_printf(io_error_limit,	"%i", dc->error_limit);
    208	sysfs_printf(io_disable,	"%i", dc->io_disable);
    209	var_print(writeback_rate_update_seconds);
    210	var_print(writeback_rate_i_term_inverse);
    211	var_print(writeback_rate_p_term_inverse);
    212	var_print(writeback_rate_fp_term_low);
    213	var_print(writeback_rate_fp_term_mid);
    214	var_print(writeback_rate_fp_term_high);
    215	var_print(writeback_rate_minimum);
    216
    217	if (attr == &sysfs_writeback_rate_debug) {
    218		char rate[20];
    219		char dirty[20];
    220		char target[20];
    221		char proportional[20];
    222		char integral[20];
    223		char change[20];
    224		s64 next_io;
    225
    226		/*
    227		 * Except for dirty and target, other values should
    228		 * be 0 if writeback is not running.
    229		 */
    230		bch_hprint(rate,
    231			   wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
    232			      : 0);
    233		bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
    234		bch_hprint(target, dc->writeback_rate_target << 9);
    235		bch_hprint(proportional,
    236			   wb ? dc->writeback_rate_proportional << 9 : 0);
    237		bch_hprint(integral,
    238			   wb ? dc->writeback_rate_integral_scaled << 9 : 0);
    239		bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
    240		next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
    241					 NSEC_PER_MSEC) : 0;
    242
    243		return sprintf(buf,
    244			       "rate:\t\t%s/sec\n"
    245			       "dirty:\t\t%s\n"
    246			       "target:\t\t%s\n"
    247			       "proportional:\t%s\n"
    248			       "integral:\t%s\n"
    249			       "change:\t\t%s/sec\n"
    250			       "next io:\t%llims\n",
    251			       rate, dirty, target, proportional,
    252			       integral, change, next_io);
    253	}
    254
    255	sysfs_hprint(dirty_data,
    256		     bcache_dev_sectors_dirty(&dc->disk) << 9);
    257
    258	sysfs_hprint(stripe_size,	 ((uint64_t)dc->disk.stripe_size) << 9);
    259	var_printf(partial_stripes_expensive,	"%u");
    260
    261	var_hprint(sequential_cutoff);
    262
    263	sysfs_print(running,		atomic_read(&dc->running));
    264	sysfs_print(state,		states[BDEV_STATE(&dc->sb)]);
    265
    266	if (attr == &sysfs_label) {
    267		memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
    268		buf[SB_LABEL_SIZE + 1] = '\0';
    269		strcat(buf, "\n");
    270		return strlen(buf);
    271	}
    272
    273	if (attr == &sysfs_backing_dev_name) {
    274		snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
    275		strcat(buf, "\n");
    276		return strlen(buf);
    277	}
    278
    279	if (attr == &sysfs_backing_dev_uuid) {
    280		/* convert binary uuid into 36-byte string plus '\0' */
    281		snprintf(buf, 36+1, "%pU", dc->sb.uuid);
    282		strcat(buf, "\n");
    283		return strlen(buf);
    284	}
    285
    286#undef var
    287	return 0;
    288}
    289SHOW_LOCKED(bch_cached_dev)
    290
    291STORE(__cached_dev)
    292{
    293	struct cached_dev *dc = container_of(kobj, struct cached_dev,
    294					     disk.kobj);
    295	ssize_t v;
    296	struct cache_set *c;
    297	struct kobj_uevent_env *env;
    298
    299	/* no user space access if system is rebooting */
    300	if (bcache_is_reboot)
    301		return -EBUSY;
    302
    303#define d_strtoul(var)		sysfs_strtoul(var, dc->var)
    304#define d_strtoul_nonzero(var)	sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
    305#define d_strtoi_h(var)		sysfs_hatoi(var, dc->var)
    306
    307	sysfs_strtoul(data_csum,	dc->disk.data_csum);
    308	d_strtoul(verify);
    309	sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
    310	sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
    311	sysfs_strtoul_bool(writeback_running, dc->writeback_running);
    312	sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment);
    313	sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
    314
    315	sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
    316			    0, bch_cutoff_writeback);
    317
    318	if (attr == &sysfs_writeback_rate) {
    319		ssize_t ret;
    320		long int v = atomic_long_read(&dc->writeback_rate.rate);
    321
    322		ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
    323
    324		if (!ret) {
    325			atomic_long_set(&dc->writeback_rate.rate, v);
    326			ret = size;
    327		}
    328
    329		return ret;
    330	}
    331
    332	sysfs_strtoul_clamp(writeback_rate_update_seconds,
    333			    dc->writeback_rate_update_seconds,
    334			    1, WRITEBACK_RATE_UPDATE_SECS_MAX);
    335	sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
    336			    dc->writeback_rate_i_term_inverse,
    337			    1, UINT_MAX);
    338	sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
    339			    dc->writeback_rate_p_term_inverse,
    340			    1, UINT_MAX);
    341	sysfs_strtoul_clamp(writeback_rate_fp_term_low,
    342			    dc->writeback_rate_fp_term_low,
    343			    1, dc->writeback_rate_fp_term_mid - 1);
    344	sysfs_strtoul_clamp(writeback_rate_fp_term_mid,
    345			    dc->writeback_rate_fp_term_mid,
    346			    dc->writeback_rate_fp_term_low + 1,
    347			    dc->writeback_rate_fp_term_high - 1);
    348	sysfs_strtoul_clamp(writeback_rate_fp_term_high,
    349			    dc->writeback_rate_fp_term_high,
    350			    dc->writeback_rate_fp_term_mid + 1, UINT_MAX);
    351	sysfs_strtoul_clamp(writeback_rate_minimum,
    352			    dc->writeback_rate_minimum,
    353			    1, UINT_MAX);
    354
    355	sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
    356
    357	if (attr == &sysfs_io_disable) {
    358		int v = strtoul_or_return(buf);
    359
    360		dc->io_disable = v ? 1 : 0;
    361	}
    362
    363	sysfs_strtoul_clamp(sequential_cutoff,
    364			    dc->sequential_cutoff,
    365			    0, UINT_MAX);
    366
    367	if (attr == &sysfs_clear_stats)
    368		bch_cache_accounting_clear(&dc->accounting);
    369
    370	if (attr == &sysfs_running &&
    371	    strtoul_or_return(buf)) {
    372		v = bch_cached_dev_run(dc);
    373		if (v)
    374			return v;
    375	}
    376
    377	if (attr == &sysfs_cache_mode) {
    378		v = __sysfs_match_string(bch_cache_modes, -1, buf);
    379		if (v < 0)
    380			return v;
    381
    382		if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
    383			SET_BDEV_CACHE_MODE(&dc->sb, v);
    384			bch_write_bdev_super(dc, NULL);
    385		}
    386	}
    387
    388	if (attr == &sysfs_readahead_cache_policy) {
    389		v = __sysfs_match_string(bch_reada_cache_policies, -1, buf);
    390		if (v < 0)
    391			return v;
    392
    393		if ((unsigned int) v != dc->cache_readahead_policy)
    394			dc->cache_readahead_policy = v;
    395	}
    396
    397	if (attr == &sysfs_stop_when_cache_set_failed) {
    398		v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
    399		if (v < 0)
    400			return v;
    401
    402		dc->stop_when_cache_set_failed = v;
    403	}
    404
    405	if (attr == &sysfs_label) {
    406		if (size > SB_LABEL_SIZE)
    407			return -EINVAL;
    408		memcpy(dc->sb.label, buf, size);
    409		if (size < SB_LABEL_SIZE)
    410			dc->sb.label[size] = '\0';
    411		if (size && dc->sb.label[size - 1] == '\n')
    412			dc->sb.label[size - 1] = '\0';
    413		bch_write_bdev_super(dc, NULL);
    414		if (dc->disk.c) {
    415			memcpy(dc->disk.c->uuids[dc->disk.id].label,
    416			       buf, SB_LABEL_SIZE);
    417			bch_uuid_write(dc->disk.c);
    418		}
    419		env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
    420		if (!env)
    421			return -ENOMEM;
    422		add_uevent_var(env, "DRIVER=bcache");
    423		add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid);
    424		add_uevent_var(env, "CACHED_LABEL=%s", buf);
    425		kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
    426				   KOBJ_CHANGE,
    427				   env->envp);
    428		kfree(env);
    429	}
    430
    431	if (attr == &sysfs_attach) {
    432		uint8_t		set_uuid[16];
    433
    434		if (bch_parse_uuid(buf, set_uuid) < 16)
    435			return -EINVAL;
    436
    437		v = -ENOENT;
    438		list_for_each_entry(c, &bch_cache_sets, list) {
    439			v = bch_cached_dev_attach(dc, c, set_uuid);
    440			if (!v)
    441				return size;
    442		}
    443		if (v == -ENOENT)
    444			pr_err("Can't attach %s: cache set not found\n", buf);
    445		return v;
    446	}
    447
    448	if (attr == &sysfs_detach && dc->disk.c)
    449		bch_cached_dev_detach(dc);
    450
    451	if (attr == &sysfs_stop)
    452		bcache_device_stop(&dc->disk);
    453
    454	return size;
    455}
    456
    457STORE(bch_cached_dev)
    458{
    459	struct cached_dev *dc = container_of(kobj, struct cached_dev,
    460					     disk.kobj);
    461
    462	/* no user space access if system is rebooting */
    463	if (bcache_is_reboot)
    464		return -EBUSY;
    465
    466	mutex_lock(&bch_register_lock);
    467	size = __cached_dev_store(kobj, attr, buf, size);
    468
    469	if (attr == &sysfs_writeback_running) {
    470		/* dc->writeback_running changed in __cached_dev_store() */
    471		if (IS_ERR_OR_NULL(dc->writeback_thread)) {
    472			/*
    473			 * reject setting it to 1 via sysfs if writeback
    474			 * kthread is not created yet.
    475			 */
    476			if (dc->writeback_running) {
    477				dc->writeback_running = false;
    478				pr_err("%s: failed to run non-existent writeback thread\n",
    479						dc->disk.disk->disk_name);
    480			}
    481		} else
    482			/*
    483			 * writeback kthread will check if dc->writeback_running
    484			 * is true or false.
    485			 */
    486			bch_writeback_queue(dc);
    487	}
    488
    489	/*
    490	 * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
    491	 * a cache set, otherwise it doesn't make sense.
    492	 */
    493	if (attr == &sysfs_writeback_percent)
    494		if ((dc->disk.c != NULL) &&
    495		    (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
    496			schedule_delayed_work(&dc->writeback_rate_update,
    497				      dc->writeback_rate_update_seconds * HZ);
    498
    499	mutex_unlock(&bch_register_lock);
    500	return size;
    501}
    502
    503static struct attribute *bch_cached_dev_attrs[] = {
    504	&sysfs_attach,
    505	&sysfs_detach,
    506	&sysfs_stop,
    507#if 0
    508	&sysfs_data_csum,
    509#endif
    510	&sysfs_cache_mode,
    511	&sysfs_readahead_cache_policy,
    512	&sysfs_stop_when_cache_set_failed,
    513	&sysfs_writeback_metadata,
    514	&sysfs_writeback_running,
    515	&sysfs_writeback_delay,
    516	&sysfs_writeback_percent,
    517	&sysfs_writeback_rate,
    518	&sysfs_writeback_consider_fragment,
    519	&sysfs_writeback_rate_update_seconds,
    520	&sysfs_writeback_rate_i_term_inverse,
    521	&sysfs_writeback_rate_p_term_inverse,
    522	&sysfs_writeback_rate_fp_term_low,
    523	&sysfs_writeback_rate_fp_term_mid,
    524	&sysfs_writeback_rate_fp_term_high,
    525	&sysfs_writeback_rate_minimum,
    526	&sysfs_writeback_rate_debug,
    527	&sysfs_io_errors,
    528	&sysfs_io_error_limit,
    529	&sysfs_io_disable,
    530	&sysfs_dirty_data,
    531	&sysfs_stripe_size,
    532	&sysfs_partial_stripes_expensive,
    533	&sysfs_sequential_cutoff,
    534	&sysfs_clear_stats,
    535	&sysfs_running,
    536	&sysfs_state,
    537	&sysfs_label,
    538#ifdef CONFIG_BCACHE_DEBUG
    539	&sysfs_verify,
    540	&sysfs_bypass_torture_test,
    541#endif
    542	&sysfs_backing_dev_name,
    543	&sysfs_backing_dev_uuid,
    544	NULL
    545};
    546ATTRIBUTE_GROUPS(bch_cached_dev);
    547KTYPE(bch_cached_dev);
    548
    549SHOW(bch_flash_dev)
    550{
    551	struct bcache_device *d = container_of(kobj, struct bcache_device,
    552					       kobj);
    553	struct uuid_entry *u = &d->c->uuids[d->id];
    554
    555	sysfs_printf(data_csum,	"%i", d->data_csum);
    556	sysfs_hprint(size,	u->sectors << 9);
    557
    558	if (attr == &sysfs_label) {
    559		memcpy(buf, u->label, SB_LABEL_SIZE);
    560		buf[SB_LABEL_SIZE + 1] = '\0';
    561		strcat(buf, "\n");
    562		return strlen(buf);
    563	}
    564
    565	return 0;
    566}
    567
    568STORE(__bch_flash_dev)
    569{
    570	struct bcache_device *d = container_of(kobj, struct bcache_device,
    571					       kobj);
    572	struct uuid_entry *u = &d->c->uuids[d->id];
    573
    574	/* no user space access if system is rebooting */
    575	if (bcache_is_reboot)
    576		return -EBUSY;
    577
    578	sysfs_strtoul(data_csum,	d->data_csum);
    579
    580	if (attr == &sysfs_size) {
    581		uint64_t v;
    582
    583		strtoi_h_or_return(buf, v);
    584
    585		u->sectors = v >> 9;
    586		bch_uuid_write(d->c);
    587		set_capacity(d->disk, u->sectors);
    588	}
    589
    590	if (attr == &sysfs_label) {
    591		memcpy(u->label, buf, SB_LABEL_SIZE);
    592		bch_uuid_write(d->c);
    593	}
    594
    595	if (attr == &sysfs_unregister) {
    596		set_bit(BCACHE_DEV_DETACHING, &d->flags);
    597		bcache_device_stop(d);
    598	}
    599
    600	return size;
    601}
    602STORE_LOCKED(bch_flash_dev)
    603
    604static struct attribute *bch_flash_dev_attrs[] = {
    605	&sysfs_unregister,
    606#if 0
    607	&sysfs_data_csum,
    608#endif
    609	&sysfs_label,
    610	&sysfs_size,
    611	NULL
    612};
    613ATTRIBUTE_GROUPS(bch_flash_dev);
    614KTYPE(bch_flash_dev);
    615
    616struct bset_stats_op {
    617	struct btree_op op;
    618	size_t nodes;
    619	struct bset_stats stats;
    620};
    621
    622static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
    623{
    624	struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
    625
    626	op->nodes++;
    627	bch_btree_keys_stats(&b->keys, &op->stats);
    628
    629	return MAP_CONTINUE;
    630}
    631
    632static int bch_bset_print_stats(struct cache_set *c, char *buf)
    633{
    634	struct bset_stats_op op;
    635	int ret;
    636
    637	memset(&op, 0, sizeof(op));
    638	bch_btree_op_init(&op.op, -1);
    639
    640	ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
    641	if (ret < 0)
    642		return ret;
    643
    644	return snprintf(buf, PAGE_SIZE,
    645			"btree nodes:		%zu\n"
    646			"written sets:		%zu\n"
    647			"unwritten sets:		%zu\n"
    648			"written key bytes:	%zu\n"
    649			"unwritten key bytes:	%zu\n"
    650			"floats:			%zu\n"
    651			"failed:			%zu\n",
    652			op.nodes,
    653			op.stats.sets_written, op.stats.sets_unwritten,
    654			op.stats.bytes_written, op.stats.bytes_unwritten,
    655			op.stats.floats, op.stats.failed);
    656}
    657
    658static unsigned int bch_root_usage(struct cache_set *c)
    659{
    660	unsigned int bytes = 0;
    661	struct bkey *k;
    662	struct btree *b;
    663	struct btree_iter iter;
    664
    665	goto lock_root;
    666
    667	do {
    668		rw_unlock(false, b);
    669lock_root:
    670		b = c->root;
    671		rw_lock(false, b, b->level);
    672	} while (b != c->root);
    673
    674	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
    675		bytes += bkey_bytes(k);
    676
    677	rw_unlock(false, b);
    678
    679	return (bytes * 100) / btree_bytes(c);
    680}
    681
    682static size_t bch_cache_size(struct cache_set *c)
    683{
    684	size_t ret = 0;
    685	struct btree *b;
    686
    687	mutex_lock(&c->bucket_lock);
    688	list_for_each_entry(b, &c->btree_cache, list)
    689		ret += 1 << (b->keys.page_order + PAGE_SHIFT);
    690
    691	mutex_unlock(&c->bucket_lock);
    692	return ret;
    693}
    694
    695static unsigned int bch_cache_max_chain(struct cache_set *c)
    696{
    697	unsigned int ret = 0;
    698	struct hlist_head *h;
    699
    700	mutex_lock(&c->bucket_lock);
    701
    702	for (h = c->bucket_hash;
    703	     h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
    704	     h++) {
    705		unsigned int i = 0;
    706		struct hlist_node *p;
    707
    708		hlist_for_each(p, h)
    709			i++;
    710
    711		ret = max(ret, i);
    712	}
    713
    714	mutex_unlock(&c->bucket_lock);
    715	return ret;
    716}
    717
    718static unsigned int bch_btree_used(struct cache_set *c)
    719{
    720	return div64_u64(c->gc_stats.key_bytes * 100,
    721			 (c->gc_stats.nodes ?: 1) * btree_bytes(c));
    722}
    723
    724static unsigned int bch_average_key_size(struct cache_set *c)
    725{
    726	return c->gc_stats.nkeys
    727		? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
    728		: 0;
    729}
    730
    731SHOW(__bch_cache_set)
    732{
    733	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
    734
    735	sysfs_print(synchronous,		CACHE_SYNC(&c->cache->sb));
    736	sysfs_print(journal_delay_ms,		c->journal_delay_ms);
    737	sysfs_hprint(bucket_size,		bucket_bytes(c->cache));
    738	sysfs_hprint(block_size,		block_bytes(c->cache));
    739	sysfs_print(tree_depth,			c->root->level);
    740	sysfs_print(root_usage_percent,		bch_root_usage(c));
    741
    742	sysfs_hprint(btree_cache_size,		bch_cache_size(c));
    743	sysfs_print(btree_cache_max_chain,	bch_cache_max_chain(c));
    744	sysfs_print(cache_available_percent,	100 - c->gc_stats.in_use);
    745
    746	sysfs_print_time_stats(&c->btree_gc_time,	btree_gc, sec, ms);
    747	sysfs_print_time_stats(&c->btree_split_time,	btree_split, sec, us);
    748	sysfs_print_time_stats(&c->sort.time,		btree_sort, ms, us);
    749	sysfs_print_time_stats(&c->btree_read_time,	btree_read, ms, us);
    750
    751	sysfs_print(btree_used_percent,	bch_btree_used(c));
    752	sysfs_print(btree_nodes,	c->gc_stats.nodes);
    753	sysfs_hprint(average_key_size,	bch_average_key_size(c));
    754
    755	sysfs_print(cache_read_races,
    756		    atomic_long_read(&c->cache_read_races));
    757
    758	sysfs_print(reclaim,
    759		    atomic_long_read(&c->reclaim));
    760
    761	sysfs_print(reclaimed_journal_buckets,
    762		    atomic_long_read(&c->reclaimed_journal_buckets));
    763
    764	sysfs_print(flush_write,
    765		    atomic_long_read(&c->flush_write));
    766
    767	sysfs_print(writeback_keys_done,
    768		    atomic_long_read(&c->writeback_keys_done));
    769	sysfs_print(writeback_keys_failed,
    770		    atomic_long_read(&c->writeback_keys_failed));
    771
    772	if (attr == &sysfs_errors)
    773		return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
    774					       c->on_error);
    775
    776	/* See count_io_errors for why 88 */
    777	sysfs_print(io_error_halflife,	c->error_decay * 88);
    778	sysfs_print(io_error_limit,	c->error_limit);
    779
    780	sysfs_hprint(congested,
    781		     ((uint64_t) bch_get_congested(c)) << 9);
    782	sysfs_print(congested_read_threshold_us,
    783		    c->congested_read_threshold_us);
    784	sysfs_print(congested_write_threshold_us,
    785		    c->congested_write_threshold_us);
    786
    787	sysfs_print(cutoff_writeback, bch_cutoff_writeback);
    788	sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
    789
    790	sysfs_print(active_journal_entries,	fifo_used(&c->journal.pin));
    791	sysfs_printf(verify,			"%i", c->verify);
    792	sysfs_printf(key_merging_disabled,	"%i", c->key_merging_disabled);
    793	sysfs_printf(expensive_debug_checks,
    794		     "%i", c->expensive_debug_checks);
    795	sysfs_printf(gc_always_rewrite,		"%i", c->gc_always_rewrite);
    796	sysfs_printf(btree_shrinker_disabled,	"%i", c->shrinker_disabled);
    797	sysfs_printf(copy_gc_enabled,		"%i", c->copy_gc_enabled);
    798	sysfs_printf(idle_max_writeback_rate,	"%i",
    799		     c->idle_max_writeback_rate_enabled);
    800	sysfs_printf(gc_after_writeback,	"%i", c->gc_after_writeback);
    801	sysfs_printf(io_disable,		"%i",
    802		     test_bit(CACHE_SET_IO_DISABLE, &c->flags));
    803
    804	if (attr == &sysfs_bset_tree_stats)
    805		return bch_bset_print_stats(c, buf);
    806
    807	if (attr == &sysfs_feature_compat)
    808		return bch_print_cache_set_feature_compat(c, buf, PAGE_SIZE);
    809	if (attr == &sysfs_feature_ro_compat)
    810		return bch_print_cache_set_feature_ro_compat(c, buf, PAGE_SIZE);
    811	if (attr == &sysfs_feature_incompat)
    812		return bch_print_cache_set_feature_incompat(c, buf, PAGE_SIZE);
    813
    814	return 0;
    815}
    816SHOW_LOCKED(bch_cache_set)
    817
    818STORE(__bch_cache_set)
    819{
    820	struct cache_set *c = container_of(kobj, struct cache_set, kobj);
    821	ssize_t v;
    822
    823	/* no user space access if system is rebooting */
    824	if (bcache_is_reboot)
    825		return -EBUSY;
    826
    827	if (attr == &sysfs_unregister)
    828		bch_cache_set_unregister(c);
    829
    830	if (attr == &sysfs_stop)
    831		bch_cache_set_stop(c);
    832
    833	if (attr == &sysfs_synchronous) {
    834		bool sync = strtoul_or_return(buf);
    835
    836		if (sync != CACHE_SYNC(&c->cache->sb)) {
    837			SET_CACHE_SYNC(&c->cache->sb, sync);
    838			bcache_write_super(c);
    839		}
    840	}
    841
    842	if (attr == &sysfs_flash_vol_create) {
    843		int r;
    844		uint64_t v;
    845
    846		strtoi_h_or_return(buf, v);
    847
    848		r = bch_flash_dev_create(c, v);
    849		if (r)
    850			return r;
    851	}
    852
    853	if (attr == &sysfs_clear_stats) {
    854		atomic_long_set(&c->writeback_keys_done,	0);
    855		atomic_long_set(&c->writeback_keys_failed,	0);
    856
    857		memset(&c->gc_stats, 0, sizeof(struct gc_stat));
    858		bch_cache_accounting_clear(&c->accounting);
    859	}
    860
    861	if (attr == &sysfs_trigger_gc)
    862		force_wake_up_gc(c);
    863
    864	if (attr == &sysfs_prune_cache) {
    865		struct shrink_control sc;
    866
    867		sc.gfp_mask = GFP_KERNEL;
    868		sc.nr_to_scan = strtoul_or_return(buf);
    869		c->shrink.scan_objects(&c->shrink, &sc);
    870	}
    871
    872	sysfs_strtoul_clamp(congested_read_threshold_us,
    873			    c->congested_read_threshold_us,
    874			    0, UINT_MAX);
    875	sysfs_strtoul_clamp(congested_write_threshold_us,
    876			    c->congested_write_threshold_us,
    877			    0, UINT_MAX);
    878
    879	if (attr == &sysfs_errors) {
    880		v = __sysfs_match_string(error_actions, -1, buf);
    881		if (v < 0)
    882			return v;
    883
    884		c->on_error = v;
    885	}
    886
    887	sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
    888
    889	/* See count_io_errors() for why 88 */
    890	if (attr == &sysfs_io_error_halflife) {
    891		unsigned long v = 0;
    892		ssize_t ret;
    893
    894		ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
    895		if (!ret) {
    896			c->error_decay = v / 88;
    897			return size;
    898		}
    899		return ret;
    900	}
    901
    902	if (attr == &sysfs_io_disable) {
    903		v = strtoul_or_return(buf);
    904		if (v) {
    905			if (test_and_set_bit(CACHE_SET_IO_DISABLE,
    906					     &c->flags))
    907				pr_warn("CACHE_SET_IO_DISABLE already set\n");
    908		} else {
    909			if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
    910						&c->flags))
    911				pr_warn("CACHE_SET_IO_DISABLE already cleared\n");
    912		}
    913	}
    914
    915	sysfs_strtoul_clamp(journal_delay_ms,
    916			    c->journal_delay_ms,
    917			    0, USHRT_MAX);
    918	sysfs_strtoul_bool(verify,		c->verify);
    919	sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
    920	sysfs_strtoul(expensive_debug_checks,	c->expensive_debug_checks);
    921	sysfs_strtoul_bool(gc_always_rewrite,	c->gc_always_rewrite);
    922	sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
    923	sysfs_strtoul_bool(copy_gc_enabled,	c->copy_gc_enabled);
    924	sysfs_strtoul_bool(idle_max_writeback_rate,
    925			   c->idle_max_writeback_rate_enabled);
    926
    927	/*
    928	 * write gc_after_writeback here may overwrite an already set
    929	 * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
    930	 * set in next chance.
    931	 */
    932	sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
    933
    934	return size;
    935}
    936STORE_LOCKED(bch_cache_set)
    937
    938SHOW(bch_cache_set_internal)
    939{
    940	struct cache_set *c = container_of(kobj, struct cache_set, internal);
    941
    942	return bch_cache_set_show(&c->kobj, attr, buf);
    943}
    944
    945STORE(bch_cache_set_internal)
    946{
    947	struct cache_set *c = container_of(kobj, struct cache_set, internal);
    948
    949	/* no user space access if system is rebooting */
    950	if (bcache_is_reboot)
    951		return -EBUSY;
    952
    953	return bch_cache_set_store(&c->kobj, attr, buf, size);
    954}
    955
    956static void bch_cache_set_internal_release(struct kobject *k)
    957{
    958}
    959
    960static struct attribute *bch_cache_set_attrs[] = {
    961	&sysfs_unregister,
    962	&sysfs_stop,
    963	&sysfs_synchronous,
    964	&sysfs_journal_delay_ms,
    965	&sysfs_flash_vol_create,
    966
    967	&sysfs_bucket_size,
    968	&sysfs_block_size,
    969	&sysfs_tree_depth,
    970	&sysfs_root_usage_percent,
    971	&sysfs_btree_cache_size,
    972	&sysfs_cache_available_percent,
    973
    974	&sysfs_average_key_size,
    975
    976	&sysfs_errors,
    977	&sysfs_io_error_limit,
    978	&sysfs_io_error_halflife,
    979	&sysfs_congested,
    980	&sysfs_congested_read_threshold_us,
    981	&sysfs_congested_write_threshold_us,
    982	&sysfs_clear_stats,
    983	NULL
    984};
    985ATTRIBUTE_GROUPS(bch_cache_set);
    986KTYPE(bch_cache_set);
    987
    988static struct attribute *bch_cache_set_internal_attrs[] = {
    989	&sysfs_active_journal_entries,
    990
    991	sysfs_time_stats_attribute_list(btree_gc, sec, ms)
    992	sysfs_time_stats_attribute_list(btree_split, sec, us)
    993	sysfs_time_stats_attribute_list(btree_sort, ms, us)
    994	sysfs_time_stats_attribute_list(btree_read, ms, us)
    995
    996	&sysfs_btree_nodes,
    997	&sysfs_btree_used_percent,
    998	&sysfs_btree_cache_max_chain,
    999
   1000	&sysfs_bset_tree_stats,
   1001	&sysfs_cache_read_races,
   1002	&sysfs_reclaim,
   1003	&sysfs_reclaimed_journal_buckets,
   1004	&sysfs_flush_write,
   1005	&sysfs_writeback_keys_done,
   1006	&sysfs_writeback_keys_failed,
   1007
   1008	&sysfs_trigger_gc,
   1009	&sysfs_prune_cache,
   1010#ifdef CONFIG_BCACHE_DEBUG
   1011	&sysfs_verify,
   1012	&sysfs_key_merging_disabled,
   1013	&sysfs_expensive_debug_checks,
   1014#endif
   1015	&sysfs_gc_always_rewrite,
   1016	&sysfs_btree_shrinker_disabled,
   1017	&sysfs_copy_gc_enabled,
   1018	&sysfs_idle_max_writeback_rate,
   1019	&sysfs_gc_after_writeback,
   1020	&sysfs_io_disable,
   1021	&sysfs_cutoff_writeback,
   1022	&sysfs_cutoff_writeback_sync,
   1023	&sysfs_feature_compat,
   1024	&sysfs_feature_ro_compat,
   1025	&sysfs_feature_incompat,
   1026	NULL
   1027};
   1028ATTRIBUTE_GROUPS(bch_cache_set_internal);
   1029KTYPE(bch_cache_set_internal);
   1030
   1031static int __bch_cache_cmp(const void *l, const void *r)
   1032{
   1033	cond_resched();
   1034	return *((uint16_t *)r) - *((uint16_t *)l);
   1035}
   1036
   1037SHOW(__bch_cache)
   1038{
   1039	struct cache *ca = container_of(kobj, struct cache, kobj);
   1040
   1041	sysfs_hprint(bucket_size,	bucket_bytes(ca));
   1042	sysfs_hprint(block_size,	block_bytes(ca));
   1043	sysfs_print(nbuckets,		ca->sb.nbuckets);
   1044	sysfs_print(discard,		ca->discard);
   1045	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
   1046	sysfs_hprint(btree_written,
   1047		     atomic_long_read(&ca->btree_sectors_written) << 9);
   1048	sysfs_hprint(metadata_written,
   1049		     (atomic_long_read(&ca->meta_sectors_written) +
   1050		      atomic_long_read(&ca->btree_sectors_written)) << 9);
   1051
   1052	sysfs_print(io_errors,
   1053		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
   1054
   1055	if (attr == &sysfs_cache_replacement_policy)
   1056		return bch_snprint_string_list(buf, PAGE_SIZE,
   1057					       cache_replacement_policies,
   1058					       CACHE_REPLACEMENT(&ca->sb));
   1059
   1060	if (attr == &sysfs_priority_stats) {
   1061		struct bucket *b;
   1062		size_t n = ca->sb.nbuckets, i;
   1063		size_t unused = 0, available = 0, dirty = 0, meta = 0;
   1064		uint64_t sum = 0;
   1065		/* Compute 31 quantiles */
   1066		uint16_t q[31], *p, *cached;
   1067		ssize_t ret;
   1068
   1069		cached = p = vmalloc(array_size(sizeof(uint16_t),
   1070						ca->sb.nbuckets));
   1071		if (!p)
   1072			return -ENOMEM;
   1073
   1074		mutex_lock(&ca->set->bucket_lock);
   1075		for_each_bucket(b, ca) {
   1076			if (!GC_SECTORS_USED(b))
   1077				unused++;
   1078			if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
   1079				available++;
   1080			if (GC_MARK(b) == GC_MARK_DIRTY)
   1081				dirty++;
   1082			if (GC_MARK(b) == GC_MARK_METADATA)
   1083				meta++;
   1084		}
   1085
   1086		for (i = ca->sb.first_bucket; i < n; i++)
   1087			p[i] = ca->buckets[i].prio;
   1088		mutex_unlock(&ca->set->bucket_lock);
   1089
   1090		sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
   1091
   1092		while (n &&
   1093		       !cached[n - 1])
   1094			--n;
   1095
   1096		while (cached < p + n &&
   1097		       *cached == BTREE_PRIO) {
   1098			cached++;
   1099			n--;
   1100		}
   1101
   1102		for (i = 0; i < n; i++)
   1103			sum += INITIAL_PRIO - cached[i];
   1104
   1105		if (n)
   1106			do_div(sum, n);
   1107
   1108		for (i = 0; i < ARRAY_SIZE(q); i++)
   1109			q[i] = INITIAL_PRIO - cached[n * (i + 1) /
   1110				(ARRAY_SIZE(q) + 1)];
   1111
   1112		vfree(p);
   1113
   1114		ret = scnprintf(buf, PAGE_SIZE,
   1115				"Unused:		%zu%%\n"
   1116				"Clean:		%zu%%\n"
   1117				"Dirty:		%zu%%\n"
   1118				"Metadata:	%zu%%\n"
   1119				"Average:	%llu\n"
   1120				"Sectors per Q:	%zu\n"
   1121				"Quantiles:	[",
   1122				unused * 100 / (size_t) ca->sb.nbuckets,
   1123				available * 100 / (size_t) ca->sb.nbuckets,
   1124				dirty * 100 / (size_t) ca->sb.nbuckets,
   1125				meta * 100 / (size_t) ca->sb.nbuckets, sum,
   1126				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
   1127
   1128		for (i = 0; i < ARRAY_SIZE(q); i++)
   1129			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
   1130					 "%u ", q[i]);
   1131		ret--;
   1132
   1133		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
   1134
   1135		return ret;
   1136	}
   1137
   1138	return 0;
   1139}
   1140SHOW_LOCKED(bch_cache)
   1141
   1142STORE(__bch_cache)
   1143{
   1144	struct cache *ca = container_of(kobj, struct cache, kobj);
   1145	ssize_t v;
   1146
   1147	/* no user space access if system is rebooting */
   1148	if (bcache_is_reboot)
   1149		return -EBUSY;
   1150
   1151	if (attr == &sysfs_discard) {
   1152		bool v = strtoul_or_return(buf);
   1153
   1154		if (bdev_max_discard_sectors(ca->bdev))
   1155			ca->discard = v;
   1156
   1157		if (v != CACHE_DISCARD(&ca->sb)) {
   1158			SET_CACHE_DISCARD(&ca->sb, v);
   1159			bcache_write_super(ca->set);
   1160		}
   1161	}
   1162
   1163	if (attr == &sysfs_cache_replacement_policy) {
   1164		v = __sysfs_match_string(cache_replacement_policies, -1, buf);
   1165		if (v < 0)
   1166			return v;
   1167
   1168		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
   1169			mutex_lock(&ca->set->bucket_lock);
   1170			SET_CACHE_REPLACEMENT(&ca->sb, v);
   1171			mutex_unlock(&ca->set->bucket_lock);
   1172
   1173			bcache_write_super(ca->set);
   1174		}
   1175	}
   1176
   1177	if (attr == &sysfs_clear_stats) {
   1178		atomic_long_set(&ca->sectors_written, 0);
   1179		atomic_long_set(&ca->btree_sectors_written, 0);
   1180		atomic_long_set(&ca->meta_sectors_written, 0);
   1181		atomic_set(&ca->io_count, 0);
   1182		atomic_set(&ca->io_errors, 0);
   1183	}
   1184
   1185	return size;
   1186}
   1187STORE_LOCKED(bch_cache)
   1188
   1189static struct attribute *bch_cache_attrs[] = {
   1190	&sysfs_bucket_size,
   1191	&sysfs_block_size,
   1192	&sysfs_nbuckets,
   1193	&sysfs_priority_stats,
   1194	&sysfs_discard,
   1195	&sysfs_written,
   1196	&sysfs_btree_written,
   1197	&sysfs_metadata_written,
   1198	&sysfs_io_errors,
   1199	&sysfs_clear_stats,
   1200	&sysfs_cache_replacement_policy,
   1201	NULL
   1202};
   1203ATTRIBUTE_GROUPS(bch_cache);
   1204KTYPE(bch_cache);