cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

btt.c (43771B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Block Translation Table
      4 * Copyright (c) 2014-2015, Intel Corporation.
      5 */
      6#include <linux/highmem.h>
      7#include <linux/debugfs.h>
      8#include <linux/blkdev.h>
      9#include <linux/pagemap.h>
     10#include <linux/module.h>
     11#include <linux/device.h>
     12#include <linux/mutex.h>
     13#include <linux/hdreg.h>
     14#include <linux/sizes.h>
     15#include <linux/ndctl.h>
     16#include <linux/fs.h>
     17#include <linux/nd.h>
     18#include <linux/backing-dev.h>
     19#include "btt.h"
     20#include "nd.h"
     21
     22enum log_ent_request {
     23	LOG_NEW_ENT = 0,
     24	LOG_OLD_ENT
     25};
     26
     27static struct device *to_dev(struct arena_info *arena)
     28{
     29	return &arena->nd_btt->dev;
     30}
     31
     32static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
     33{
     34	return offset + nd_btt->initial_offset;
     35}
     36
     37static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
     38		void *buf, size_t n, unsigned long flags)
     39{
     40	struct nd_btt *nd_btt = arena->nd_btt;
     41	struct nd_namespace_common *ndns = nd_btt->ndns;
     42
     43	/* arena offsets may be shifted from the base of the device */
     44	offset = adjust_initial_offset(nd_btt, offset);
     45	return nvdimm_read_bytes(ndns, offset, buf, n, flags);
     46}
     47
     48static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
     49		void *buf, size_t n, unsigned long flags)
     50{
     51	struct nd_btt *nd_btt = arena->nd_btt;
     52	struct nd_namespace_common *ndns = nd_btt->ndns;
     53
     54	/* arena offsets may be shifted from the base of the device */
     55	offset = adjust_initial_offset(nd_btt, offset);
     56	return nvdimm_write_bytes(ndns, offset, buf, n, flags);
     57}
     58
     59static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
     60{
     61	int ret;
     62
     63	/*
     64	 * infooff and info2off should always be at least 512B aligned.
     65	 * We rely on that to make sure rw_bytes does error clearing
     66	 * correctly, so make sure that is the case.
     67	 */
     68	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
     69		"arena->infooff: %#llx is unaligned\n", arena->infooff);
     70	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
     71		"arena->info2off: %#llx is unaligned\n", arena->info2off);
     72
     73	ret = arena_write_bytes(arena, arena->info2off, super,
     74			sizeof(struct btt_sb), 0);
     75	if (ret)
     76		return ret;
     77
     78	return arena_write_bytes(arena, arena->infooff, super,
     79			sizeof(struct btt_sb), 0);
     80}
     81
     82static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
     83{
     84	return arena_read_bytes(arena, arena->infooff, super,
     85			sizeof(struct btt_sb), 0);
     86}
     87
     88/*
     89 * 'raw' version of btt_map write
     90 * Assumptions:
     91 *   mapping is in little-endian
     92 *   mapping contains 'E' and 'Z' flags as desired
     93 */
     94static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
     95		unsigned long flags)
     96{
     97	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
     98
     99	if (unlikely(lba >= arena->external_nlba))
    100		dev_err_ratelimited(to_dev(arena),
    101			"%s: lba %#x out of range (max: %#x)\n",
    102			__func__, lba, arena->external_nlba);
    103	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
    104}
    105
    106static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
    107			u32 z_flag, u32 e_flag, unsigned long rwb_flags)
    108{
    109	u32 ze;
    110	__le32 mapping_le;
    111
    112	/*
    113	 * This 'mapping' is supposed to be just the LBA mapping, without
    114	 * any flags set, so strip the flag bits.
    115	 */
    116	mapping = ent_lba(mapping);
    117
    118	ze = (z_flag << 1) + e_flag;
    119	switch (ze) {
    120	case 0:
    121		/*
    122		 * We want to set neither of the Z or E flags, and
    123		 * in the actual layout, this means setting the bit
    124		 * positions of both to '1' to indicate a 'normal'
    125		 * map entry
    126		 */
    127		mapping |= MAP_ENT_NORMAL;
    128		break;
    129	case 1:
    130		mapping |= (1 << MAP_ERR_SHIFT);
    131		break;
    132	case 2:
    133		mapping |= (1 << MAP_TRIM_SHIFT);
    134		break;
    135	default:
    136		/*
    137		 * The case where Z and E are both sent in as '1' could be
    138		 * construed as a valid 'normal' case, but we decide not to,
    139		 * to avoid confusion
    140		 */
    141		dev_err_ratelimited(to_dev(arena),
    142			"Invalid use of Z and E flags\n");
    143		return -EIO;
    144	}
    145
    146	mapping_le = cpu_to_le32(mapping);
    147	return __btt_map_write(arena, lba, mapping_le, rwb_flags);
    148}
    149
    150static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
    151			int *trim, int *error, unsigned long rwb_flags)
    152{
    153	int ret;
    154	__le32 in;
    155	u32 raw_mapping, postmap, ze, z_flag, e_flag;
    156	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
    157
    158	if (unlikely(lba >= arena->external_nlba))
    159		dev_err_ratelimited(to_dev(arena),
    160			"%s: lba %#x out of range (max: %#x)\n",
    161			__func__, lba, arena->external_nlba);
    162
    163	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
    164	if (ret)
    165		return ret;
    166
    167	raw_mapping = le32_to_cpu(in);
    168
    169	z_flag = ent_z_flag(raw_mapping);
    170	e_flag = ent_e_flag(raw_mapping);
    171	ze = (z_flag << 1) + e_flag;
    172	postmap = ent_lba(raw_mapping);
    173
    174	/* Reuse the {z,e}_flag variables for *trim and *error */
    175	z_flag = 0;
    176	e_flag = 0;
    177
    178	switch (ze) {
    179	case 0:
    180		/* Initial state. Return postmap = premap */
    181		*mapping = lba;
    182		break;
    183	case 1:
    184		*mapping = postmap;
    185		e_flag = 1;
    186		break;
    187	case 2:
    188		*mapping = postmap;
    189		z_flag = 1;
    190		break;
    191	case 3:
    192		*mapping = postmap;
    193		break;
    194	default:
    195		return -EIO;
    196	}
    197
    198	if (trim)
    199		*trim = z_flag;
    200	if (error)
    201		*error = e_flag;
    202
    203	return ret;
    204}
    205
    206static int btt_log_group_read(struct arena_info *arena, u32 lane,
    207			struct log_group *log)
    208{
    209	return arena_read_bytes(arena,
    210			arena->logoff + (lane * LOG_GRP_SIZE), log,
    211			LOG_GRP_SIZE, 0);
    212}
    213
    214static struct dentry *debugfs_root;
    215
    216static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
    217				int idx)
    218{
    219	char dirname[32];
    220	struct dentry *d;
    221
    222	/* If for some reason, parent bttN was not created, exit */
    223	if (!parent)
    224		return;
    225
    226	snprintf(dirname, 32, "arena%d", idx);
    227	d = debugfs_create_dir(dirname, parent);
    228	if (IS_ERR_OR_NULL(d))
    229		return;
    230	a->debugfs_dir = d;
    231
    232	debugfs_create_x64("size", S_IRUGO, d, &a->size);
    233	debugfs_create_x64("external_lba_start", S_IRUGO, d,
    234				&a->external_lba_start);
    235	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
    236	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
    237				&a->internal_lbasize);
    238	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
    239	debugfs_create_u32("external_lbasize", S_IRUGO, d,
    240				&a->external_lbasize);
    241	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
    242	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
    243	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
    244	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
    245	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
    246	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
    247	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
    248	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
    249	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
    250	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
    251	debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
    252	debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
    253}
    254
    255static void btt_debugfs_init(struct btt *btt)
    256{
    257	int i = 0;
    258	struct arena_info *arena;
    259
    260	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
    261						debugfs_root);
    262	if (IS_ERR_OR_NULL(btt->debugfs_dir))
    263		return;
    264
    265	list_for_each_entry(arena, &btt->arena_list, list) {
    266		arena_debugfs_init(arena, btt->debugfs_dir, i);
    267		i++;
    268	}
    269}
    270
    271static u32 log_seq(struct log_group *log, int log_idx)
    272{
    273	return le32_to_cpu(log->ent[log_idx].seq);
    274}
    275
    276/*
    277 * This function accepts two log entries, and uses the
    278 * sequence number to find the 'older' entry.
    279 * It also updates the sequence number in this old entry to
    280 * make it the 'new' one if the mark_flag is set.
    281 * Finally, it returns which of the entries was the older one.
    282 *
    283 * TODO The logic feels a bit kludge-y. make it better..
    284 */
    285static int btt_log_get_old(struct arena_info *a, struct log_group *log)
    286{
    287	int idx0 = a->log_index[0];
    288	int idx1 = a->log_index[1];
    289	int old;
    290
    291	/*
    292	 * the first ever time this is seen, the entry goes into [0]
    293	 * the next time, the following logic works out to put this
    294	 * (next) entry into [1]
    295	 */
    296	if (log_seq(log, idx0) == 0) {
    297		log->ent[idx0].seq = cpu_to_le32(1);
    298		return 0;
    299	}
    300
    301	if (log_seq(log, idx0) == log_seq(log, idx1))
    302		return -EINVAL;
    303	if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
    304		return -EINVAL;
    305
    306	if (log_seq(log, idx0) < log_seq(log, idx1)) {
    307		if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
    308			old = 0;
    309		else
    310			old = 1;
    311	} else {
    312		if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
    313			old = 1;
    314		else
    315			old = 0;
    316	}
    317
    318	return old;
    319}
    320
    321/*
    322 * This function copies the desired (old/new) log entry into ent if
    323 * it is not NULL. It returns the sub-slot number (0 or 1)
    324 * where the desired log entry was found. Negative return values
    325 * indicate errors.
    326 */
    327static int btt_log_read(struct arena_info *arena, u32 lane,
    328			struct log_entry *ent, int old_flag)
    329{
    330	int ret;
    331	int old_ent, ret_ent;
    332	struct log_group log;
    333
    334	ret = btt_log_group_read(arena, lane, &log);
    335	if (ret)
    336		return -EIO;
    337
    338	old_ent = btt_log_get_old(arena, &log);
    339	if (old_ent < 0 || old_ent > 1) {
    340		dev_err(to_dev(arena),
    341				"log corruption (%d): lane %d seq [%d, %d]\n",
    342				old_ent, lane, log.ent[arena->log_index[0]].seq,
    343				log.ent[arena->log_index[1]].seq);
    344		/* TODO set error state? */
    345		return -EIO;
    346	}
    347
    348	ret_ent = (old_flag ? old_ent : (1 - old_ent));
    349
    350	if (ent != NULL)
    351		memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
    352
    353	return ret_ent;
    354}
    355
    356/*
    357 * This function commits a log entry to media
    358 * It does _not_ prepare the freelist entry for the next write
    359 * btt_flog_write is the wrapper for updating the freelist elements
    360 */
    361static int __btt_log_write(struct arena_info *arena, u32 lane,
    362			u32 sub, struct log_entry *ent, unsigned long flags)
    363{
    364	int ret;
    365	u32 group_slot = arena->log_index[sub];
    366	unsigned int log_half = LOG_ENT_SIZE / 2;
    367	void *src = ent;
    368	u64 ns_off;
    369
    370	ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
    371		(group_slot * LOG_ENT_SIZE);
    372	/* split the 16B write into atomic, durable halves */
    373	ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
    374	if (ret)
    375		return ret;
    376
    377	ns_off += log_half;
    378	src += log_half;
    379	return arena_write_bytes(arena, ns_off, src, log_half, flags);
    380}
    381
    382static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
    383			struct log_entry *ent)
    384{
    385	int ret;
    386
    387	ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
    388	if (ret)
    389		return ret;
    390
    391	/* prepare the next free entry */
    392	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
    393	if (++(arena->freelist[lane].seq) == 4)
    394		arena->freelist[lane].seq = 1;
    395	if (ent_e_flag(le32_to_cpu(ent->old_map)))
    396		arena->freelist[lane].has_err = 1;
    397	arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
    398
    399	return ret;
    400}
    401
    402/*
    403 * This function initializes the BTT map to the initial state, which is
    404 * all-zeroes, and indicates an identity mapping
    405 */
    406static int btt_map_init(struct arena_info *arena)
    407{
    408	int ret = -EINVAL;
    409	void *zerobuf;
    410	size_t offset = 0;
    411	size_t chunk_size = SZ_2M;
    412	size_t mapsize = arena->logoff - arena->mapoff;
    413
    414	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
    415	if (!zerobuf)
    416		return -ENOMEM;
    417
    418	/*
    419	 * mapoff should always be at least 512B  aligned. We rely on that to
    420	 * make sure rw_bytes does error clearing correctly, so make sure that
    421	 * is the case.
    422	 */
    423	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
    424		"arena->mapoff: %#llx is unaligned\n", arena->mapoff);
    425
    426	while (mapsize) {
    427		size_t size = min(mapsize, chunk_size);
    428
    429		dev_WARN_ONCE(to_dev(arena), size < 512,
    430			"chunk size: %#zx is unaligned\n", size);
    431		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
    432				size, 0);
    433		if (ret)
    434			goto free;
    435
    436		offset += size;
    437		mapsize -= size;
    438		cond_resched();
    439	}
    440
    441 free:
    442	kfree(zerobuf);
    443	return ret;
    444}
    445
    446/*
    447 * This function initializes the BTT log with 'fake' entries pointing
    448 * to the initial reserved set of blocks as being free
    449 */
    450static int btt_log_init(struct arena_info *arena)
    451{
    452	size_t logsize = arena->info2off - arena->logoff;
    453	size_t chunk_size = SZ_4K, offset = 0;
    454	struct log_entry ent;
    455	void *zerobuf;
    456	int ret;
    457	u32 i;
    458
    459	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
    460	if (!zerobuf)
    461		return -ENOMEM;
    462	/*
    463	 * logoff should always be at least 512B  aligned. We rely on that to
    464	 * make sure rw_bytes does error clearing correctly, so make sure that
    465	 * is the case.
    466	 */
    467	dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
    468		"arena->logoff: %#llx is unaligned\n", arena->logoff);
    469
    470	while (logsize) {
    471		size_t size = min(logsize, chunk_size);
    472
    473		dev_WARN_ONCE(to_dev(arena), size < 512,
    474			"chunk size: %#zx is unaligned\n", size);
    475		ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
    476				size, 0);
    477		if (ret)
    478			goto free;
    479
    480		offset += size;
    481		logsize -= size;
    482		cond_resched();
    483	}
    484
    485	for (i = 0; i < arena->nfree; i++) {
    486		ent.lba = cpu_to_le32(i);
    487		ent.old_map = cpu_to_le32(arena->external_nlba + i);
    488		ent.new_map = cpu_to_le32(arena->external_nlba + i);
    489		ent.seq = cpu_to_le32(LOG_SEQ_INIT);
    490		ret = __btt_log_write(arena, i, 0, &ent, 0);
    491		if (ret)
    492			goto free;
    493	}
    494
    495 free:
    496	kfree(zerobuf);
    497	return ret;
    498}
    499
    500static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
    501{
    502	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
    503}
    504
    505static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
    506{
    507	int ret = 0;
    508
    509	if (arena->freelist[lane].has_err) {
    510		void *zero_page = page_address(ZERO_PAGE(0));
    511		u32 lba = arena->freelist[lane].block;
    512		u64 nsoff = to_namespace_offset(arena, lba);
    513		unsigned long len = arena->sector_size;
    514
    515		mutex_lock(&arena->err_lock);
    516
    517		while (len) {
    518			unsigned long chunk = min(len, PAGE_SIZE);
    519
    520			ret = arena_write_bytes(arena, nsoff, zero_page,
    521				chunk, 0);
    522			if (ret)
    523				break;
    524			len -= chunk;
    525			nsoff += chunk;
    526			if (len == 0)
    527				arena->freelist[lane].has_err = 0;
    528		}
    529		mutex_unlock(&arena->err_lock);
    530	}
    531	return ret;
    532}
    533
    534static int btt_freelist_init(struct arena_info *arena)
    535{
    536	int new, ret;
    537	struct log_entry log_new;
    538	u32 i, map_entry, log_oldmap, log_newmap;
    539
    540	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
    541					GFP_KERNEL);
    542	if (!arena->freelist)
    543		return -ENOMEM;
    544
    545	for (i = 0; i < arena->nfree; i++) {
    546		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
    547		if (new < 0)
    548			return new;
    549
    550		/* old and new map entries with any flags stripped out */
    551		log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
    552		log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
    553
    554		/* sub points to the next one to be overwritten */
    555		arena->freelist[i].sub = 1 - new;
    556		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
    557		arena->freelist[i].block = log_oldmap;
    558
    559		/*
    560		 * FIXME: if error clearing fails during init, we want to make
    561		 * the BTT read-only
    562		 */
    563		if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
    564		    !ent_normal(le32_to_cpu(log_new.old_map))) {
    565			arena->freelist[i].has_err = 1;
    566			ret = arena_clear_freelist_error(arena, i);
    567			if (ret)
    568				dev_err_ratelimited(to_dev(arena),
    569					"Unable to clear known errors\n");
    570		}
    571
    572		/* This implies a newly created or untouched flog entry */
    573		if (log_oldmap == log_newmap)
    574			continue;
    575
    576		/* Check if map recovery is needed */
    577		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
    578				NULL, NULL, 0);
    579		if (ret)
    580			return ret;
    581
    582		/*
    583		 * The map_entry from btt_read_map is stripped of any flag bits,
    584		 * so use the stripped out versions from the log as well for
    585		 * testing whether recovery is needed. For restoration, use the
    586		 * 'raw' version of the log entries as that captured what we
    587		 * were going to write originally.
    588		 */
    589		if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
    590			/*
    591			 * Last transaction wrote the flog, but wasn't able
    592			 * to complete the map write. So fix up the map.
    593			 */
    594			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
    595					le32_to_cpu(log_new.new_map), 0, 0, 0);
    596			if (ret)
    597				return ret;
    598		}
    599	}
    600
    601	return 0;
    602}
    603
    604static bool ent_is_padding(struct log_entry *ent)
    605{
    606	return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
    607		&& (ent->seq == 0);
    608}
    609
    610/*
    611 * Detecting valid log indices: We read a log group (see the comments in btt.h
    612 * for a description of a 'log_group' and its 'slots'), and iterate over its
    613 * four slots. We expect that a padding slot will be all-zeroes, and use this
    614 * to detect a padding slot vs. an actual entry.
    615 *
    616 * If a log_group is in the initial state, i.e. hasn't been used since the
    617 * creation of this BTT layout, it will have three of the four slots with
    618 * zeroes. We skip over these log_groups for the detection of log_index. If
    619 * all log_groups are in the initial state (i.e. the BTT has never been
    620 * written to), it is safe to assume the 'new format' of log entries in slots
    621 * (0, 1).
    622 */
    623static int log_set_indices(struct arena_info *arena)
    624{
    625	bool idx_set = false, initial_state = true;
    626	int ret, log_index[2] = {-1, -1};
    627	u32 i, j, next_idx = 0;
    628	struct log_group log;
    629	u32 pad_count = 0;
    630
    631	for (i = 0; i < arena->nfree; i++) {
    632		ret = btt_log_group_read(arena, i, &log);
    633		if (ret < 0)
    634			return ret;
    635
    636		for (j = 0; j < 4; j++) {
    637			if (!idx_set) {
    638				if (ent_is_padding(&log.ent[j])) {
    639					pad_count++;
    640					continue;
    641				} else {
    642					/* Skip if index has been recorded */
    643					if ((next_idx == 1) &&
    644						(j == log_index[0]))
    645						continue;
    646					/* valid entry, record index */
    647					log_index[next_idx] = j;
    648					next_idx++;
    649				}
    650				if (next_idx == 2) {
    651					/* two valid entries found */
    652					idx_set = true;
    653				} else if (next_idx > 2) {
    654					/* too many valid indices */
    655					return -ENXIO;
    656				}
    657			} else {
    658				/*
    659				 * once the indices have been set, just verify
    660				 * that all subsequent log groups are either in
    661				 * their initial state or follow the same
    662				 * indices.
    663				 */
    664				if (j == log_index[0]) {
    665					/* entry must be 'valid' */
    666					if (ent_is_padding(&log.ent[j]))
    667						return -ENXIO;
    668				} else if (j == log_index[1]) {
    669					;
    670					/*
    671					 * log_index[1] can be padding if the
    672					 * lane never got used and it is still
    673					 * in the initial state (three 'padding'
    674					 * entries)
    675					 */
    676				} else {
    677					/* entry must be invalid (padding) */
    678					if (!ent_is_padding(&log.ent[j]))
    679						return -ENXIO;
    680				}
    681			}
    682		}
    683		/*
    684		 * If any of the log_groups have more than one valid,
    685		 * non-padding entry, then the we are no longer in the
    686		 * initial_state
    687		 */
    688		if (pad_count < 3)
    689			initial_state = false;
    690		pad_count = 0;
    691	}
    692
    693	if (!initial_state && !idx_set)
    694		return -ENXIO;
    695
    696	/*
    697	 * If all the entries in the log were in the initial state,
    698	 * assume new padding scheme
    699	 */
    700	if (initial_state)
    701		log_index[1] = 1;
    702
    703	/*
    704	 * Only allow the known permutations of log/padding indices,
    705	 * i.e. (0, 1), and (0, 2)
    706	 */
    707	if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
    708		; /* known index possibilities */
    709	else {
    710		dev_err(to_dev(arena), "Found an unknown padding scheme\n");
    711		return -ENXIO;
    712	}
    713
    714	arena->log_index[0] = log_index[0];
    715	arena->log_index[1] = log_index[1];
    716	dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
    717	dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
    718	return 0;
    719}
    720
    721static int btt_rtt_init(struct arena_info *arena)
    722{
    723	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
    724	if (arena->rtt == NULL)
    725		return -ENOMEM;
    726
    727	return 0;
    728}
    729
    730static int btt_maplocks_init(struct arena_info *arena)
    731{
    732	u32 i;
    733
    734	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
    735				GFP_KERNEL);
    736	if (!arena->map_locks)
    737		return -ENOMEM;
    738
    739	for (i = 0; i < arena->nfree; i++)
    740		spin_lock_init(&arena->map_locks[i].lock);
    741
    742	return 0;
    743}
    744
    745static struct arena_info *alloc_arena(struct btt *btt, size_t size,
    746				size_t start, size_t arena_off)
    747{
    748	struct arena_info *arena;
    749	u64 logsize, mapsize, datasize;
    750	u64 available = size;
    751
    752	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
    753	if (!arena)
    754		return NULL;
    755	arena->nd_btt = btt->nd_btt;
    756	arena->sector_size = btt->sector_size;
    757	mutex_init(&arena->err_lock);
    758
    759	if (!size)
    760		return arena;
    761
    762	arena->size = size;
    763	arena->external_lba_start = start;
    764	arena->external_lbasize = btt->lbasize;
    765	arena->internal_lbasize = roundup(arena->external_lbasize,
    766					INT_LBASIZE_ALIGNMENT);
    767	arena->nfree = BTT_DEFAULT_NFREE;
    768	arena->version_major = btt->nd_btt->version_major;
    769	arena->version_minor = btt->nd_btt->version_minor;
    770
    771	if (available % BTT_PG_SIZE)
    772		available -= (available % BTT_PG_SIZE);
    773
    774	/* Two pages are reserved for the super block and its copy */
    775	available -= 2 * BTT_PG_SIZE;
    776
    777	/* The log takes a fixed amount of space based on nfree */
    778	logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
    779	available -= logsize;
    780
    781	/* Calculate optimal split between map and data area */
    782	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
    783			arena->internal_lbasize + MAP_ENT_SIZE);
    784	arena->external_nlba = arena->internal_nlba - arena->nfree;
    785
    786	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
    787	datasize = available - mapsize;
    788
    789	/* 'Absolute' values, relative to start of storage space */
    790	arena->infooff = arena_off;
    791	arena->dataoff = arena->infooff + BTT_PG_SIZE;
    792	arena->mapoff = arena->dataoff + datasize;
    793	arena->logoff = arena->mapoff + mapsize;
    794	arena->info2off = arena->logoff + logsize;
    795
    796	/* Default log indices are (0,1) */
    797	arena->log_index[0] = 0;
    798	arena->log_index[1] = 1;
    799	return arena;
    800}
    801
    802static void free_arenas(struct btt *btt)
    803{
    804	struct arena_info *arena, *next;
    805
    806	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
    807		list_del(&arena->list);
    808		kfree(arena->rtt);
    809		kfree(arena->map_locks);
    810		kfree(arena->freelist);
    811		debugfs_remove_recursive(arena->debugfs_dir);
    812		kfree(arena);
    813	}
    814}
    815
    816/*
    817 * This function reads an existing valid btt superblock and
    818 * populates the corresponding arena_info struct
    819 */
    820static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
    821				u64 arena_off)
    822{
    823	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
    824	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
    825	arena->external_nlba = le32_to_cpu(super->external_nlba);
    826	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
    827	arena->nfree = le32_to_cpu(super->nfree);
    828	arena->version_major = le16_to_cpu(super->version_major);
    829	arena->version_minor = le16_to_cpu(super->version_minor);
    830
    831	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
    832			le64_to_cpu(super->nextoff));
    833	arena->infooff = arena_off;
    834	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
    835	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
    836	arena->logoff = arena_off + le64_to_cpu(super->logoff);
    837	arena->info2off = arena_off + le64_to_cpu(super->info2off);
    838
    839	arena->size = (le64_to_cpu(super->nextoff) > 0)
    840		? (le64_to_cpu(super->nextoff))
    841		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
    842
    843	arena->flags = le32_to_cpu(super->flags);
    844}
    845
    846static int discover_arenas(struct btt *btt)
    847{
    848	int ret = 0;
    849	struct arena_info *arena;
    850	struct btt_sb *super;
    851	size_t remaining = btt->rawsize;
    852	u64 cur_nlba = 0;
    853	size_t cur_off = 0;
    854	int num_arenas = 0;
    855
    856	super = kzalloc(sizeof(*super), GFP_KERNEL);
    857	if (!super)
    858		return -ENOMEM;
    859
    860	while (remaining) {
    861		/* Alloc memory for arena */
    862		arena = alloc_arena(btt, 0, 0, 0);
    863		if (!arena) {
    864			ret = -ENOMEM;
    865			goto out_super;
    866		}
    867
    868		arena->infooff = cur_off;
    869		ret = btt_info_read(arena, super);
    870		if (ret)
    871			goto out;
    872
    873		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
    874			if (remaining == btt->rawsize) {
    875				btt->init_state = INIT_NOTFOUND;
    876				dev_info(to_dev(arena), "No existing arenas\n");
    877				goto out;
    878			} else {
    879				dev_err(to_dev(arena),
    880						"Found corrupted metadata!\n");
    881				ret = -ENODEV;
    882				goto out;
    883			}
    884		}
    885
    886		arena->external_lba_start = cur_nlba;
    887		parse_arena_meta(arena, super, cur_off);
    888
    889		ret = log_set_indices(arena);
    890		if (ret) {
    891			dev_err(to_dev(arena),
    892				"Unable to deduce log/padding indices\n");
    893			goto out;
    894		}
    895
    896		ret = btt_freelist_init(arena);
    897		if (ret)
    898			goto out;
    899
    900		ret = btt_rtt_init(arena);
    901		if (ret)
    902			goto out;
    903
    904		ret = btt_maplocks_init(arena);
    905		if (ret)
    906			goto out;
    907
    908		list_add_tail(&arena->list, &btt->arena_list);
    909
    910		remaining -= arena->size;
    911		cur_off += arena->size;
    912		cur_nlba += arena->external_nlba;
    913		num_arenas++;
    914
    915		if (arena->nextoff == 0)
    916			break;
    917	}
    918	btt->num_arenas = num_arenas;
    919	btt->nlba = cur_nlba;
    920	btt->init_state = INIT_READY;
    921
    922	kfree(super);
    923	return ret;
    924
    925 out:
    926	kfree(arena);
    927	free_arenas(btt);
    928 out_super:
    929	kfree(super);
    930	return ret;
    931}
    932
    933static int create_arenas(struct btt *btt)
    934{
    935	size_t remaining = btt->rawsize;
    936	size_t cur_off = 0;
    937
    938	while (remaining) {
    939		struct arena_info *arena;
    940		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
    941
    942		remaining -= arena_size;
    943		if (arena_size < ARENA_MIN_SIZE)
    944			break;
    945
    946		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
    947		if (!arena) {
    948			free_arenas(btt);
    949			return -ENOMEM;
    950		}
    951		btt->nlba += arena->external_nlba;
    952		if (remaining >= ARENA_MIN_SIZE)
    953			arena->nextoff = arena->size;
    954		else
    955			arena->nextoff = 0;
    956		cur_off += arena_size;
    957		list_add_tail(&arena->list, &btt->arena_list);
    958	}
    959
    960	return 0;
    961}
    962
    963/*
    964 * This function completes arena initialization by writing
    965 * all the metadata.
    966 * It is only called for an uninitialized arena when a write
    967 * to that arena occurs for the first time.
    968 */
    969static int btt_arena_write_layout(struct arena_info *arena)
    970{
    971	int ret;
    972	u64 sum;
    973	struct btt_sb *super;
    974	struct nd_btt *nd_btt = arena->nd_btt;
    975	const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
    976
    977	ret = btt_map_init(arena);
    978	if (ret)
    979		return ret;
    980
    981	ret = btt_log_init(arena);
    982	if (ret)
    983		return ret;
    984
    985	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
    986	if (!super)
    987		return -ENOMEM;
    988
    989	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
    990	export_uuid(super->uuid, nd_btt->uuid);
    991	export_uuid(super->parent_uuid, parent_uuid);
    992	super->flags = cpu_to_le32(arena->flags);
    993	super->version_major = cpu_to_le16(arena->version_major);
    994	super->version_minor = cpu_to_le16(arena->version_minor);
    995	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
    996	super->external_nlba = cpu_to_le32(arena->external_nlba);
    997	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
    998	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
    999	super->nfree = cpu_to_le32(arena->nfree);
   1000	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
   1001	super->nextoff = cpu_to_le64(arena->nextoff);
   1002	/*
   1003	 * Subtract arena->infooff (arena start) so numbers are relative
   1004	 * to 'this' arena
   1005	 */
   1006	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
   1007	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
   1008	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
   1009	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
   1010
   1011	super->flags = 0;
   1012	sum = nd_sb_checksum((struct nd_gen_sb *) super);
   1013	super->checksum = cpu_to_le64(sum);
   1014
   1015	ret = btt_info_write(arena, super);
   1016
   1017	kfree(super);
   1018	return ret;
   1019}
   1020
   1021/*
   1022 * This function completes the initialization for the BTT namespace
   1023 * such that it is ready to accept IOs
   1024 */
   1025static int btt_meta_init(struct btt *btt)
   1026{
   1027	int ret = 0;
   1028	struct arena_info *arena;
   1029
   1030	mutex_lock(&btt->init_lock);
   1031	list_for_each_entry(arena, &btt->arena_list, list) {
   1032		ret = btt_arena_write_layout(arena);
   1033		if (ret)
   1034			goto unlock;
   1035
   1036		ret = btt_freelist_init(arena);
   1037		if (ret)
   1038			goto unlock;
   1039
   1040		ret = btt_rtt_init(arena);
   1041		if (ret)
   1042			goto unlock;
   1043
   1044		ret = btt_maplocks_init(arena);
   1045		if (ret)
   1046			goto unlock;
   1047	}
   1048
   1049	btt->init_state = INIT_READY;
   1050
   1051 unlock:
   1052	mutex_unlock(&btt->init_lock);
   1053	return ret;
   1054}
   1055
   1056static u32 btt_meta_size(struct btt *btt)
   1057{
   1058	return btt->lbasize - btt->sector_size;
   1059}
   1060
   1061/*
   1062 * This function calculates the arena in which the given LBA lies
   1063 * by doing a linear walk. This is acceptable since we expect only
   1064 * a few arenas. If we have backing devices that get much larger,
   1065 * we can construct a balanced binary tree of arenas at init time
   1066 * so that this range search becomes faster.
   1067 */
   1068static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
   1069				struct arena_info **arena)
   1070{
   1071	struct arena_info *arena_list;
   1072	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
   1073
   1074	list_for_each_entry(arena_list, &btt->arena_list, list) {
   1075		if (lba < arena_list->external_nlba) {
   1076			*arena = arena_list;
   1077			*premap = lba;
   1078			return 0;
   1079		}
   1080		lba -= arena_list->external_nlba;
   1081	}
   1082
   1083	return -EIO;
   1084}
   1085
   1086/*
   1087 * The following (lock_map, unlock_map) are mostly just to improve
   1088 * readability, since they index into an array of locks
   1089 */
   1090static void lock_map(struct arena_info *arena, u32 premap)
   1091		__acquires(&arena->map_locks[idx].lock)
   1092{
   1093	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
   1094
   1095	spin_lock(&arena->map_locks[idx].lock);
   1096}
   1097
   1098static void unlock_map(struct arena_info *arena, u32 premap)
   1099		__releases(&arena->map_locks[idx].lock)
   1100{
   1101	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
   1102
   1103	spin_unlock(&arena->map_locks[idx].lock);
   1104}
   1105
   1106static int btt_data_read(struct arena_info *arena, struct page *page,
   1107			unsigned int off, u32 lba, u32 len)
   1108{
   1109	int ret;
   1110	u64 nsoff = to_namespace_offset(arena, lba);
   1111	void *mem = kmap_atomic(page);
   1112
   1113	ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
   1114	kunmap_atomic(mem);
   1115
   1116	return ret;
   1117}
   1118
   1119static int btt_data_write(struct arena_info *arena, u32 lba,
   1120			struct page *page, unsigned int off, u32 len)
   1121{
   1122	int ret;
   1123	u64 nsoff = to_namespace_offset(arena, lba);
   1124	void *mem = kmap_atomic(page);
   1125
   1126	ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
   1127	kunmap_atomic(mem);
   1128
   1129	return ret;
   1130}
   1131
   1132static void zero_fill_data(struct page *page, unsigned int off, u32 len)
   1133{
   1134	void *mem = kmap_atomic(page);
   1135
   1136	memset(mem + off, 0, len);
   1137	kunmap_atomic(mem);
   1138}
   1139
   1140#ifdef CONFIG_BLK_DEV_INTEGRITY
   1141static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
   1142			struct arena_info *arena, u32 postmap, int rw)
   1143{
   1144	unsigned int len = btt_meta_size(btt);
   1145	u64 meta_nsoff;
   1146	int ret = 0;
   1147
   1148	if (bip == NULL)
   1149		return 0;
   1150
   1151	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
   1152
   1153	while (len) {
   1154		unsigned int cur_len;
   1155		struct bio_vec bv;
   1156		void *mem;
   1157
   1158		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
   1159		/*
   1160		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
   1161		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
   1162		 * can use those directly
   1163		 */
   1164
   1165		cur_len = min(len, bv.bv_len);
   1166		mem = bvec_kmap_local(&bv);
   1167		if (rw)
   1168			ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
   1169					NVDIMM_IO_ATOMIC);
   1170		else
   1171			ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
   1172					NVDIMM_IO_ATOMIC);
   1173
   1174		kunmap_local(mem);
   1175		if (ret)
   1176			return ret;
   1177
   1178		len -= cur_len;
   1179		meta_nsoff += cur_len;
   1180		if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
   1181			return -EIO;
   1182	}
   1183
   1184	return ret;
   1185}
   1186
   1187#else /* CONFIG_BLK_DEV_INTEGRITY */
   1188static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
   1189			struct arena_info *arena, u32 postmap, int rw)
   1190{
   1191	return 0;
   1192}
   1193#endif
   1194
   1195static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
   1196			struct page *page, unsigned int off, sector_t sector,
   1197			unsigned int len)
   1198{
   1199	int ret = 0;
   1200	int t_flag, e_flag;
   1201	struct arena_info *arena = NULL;
   1202	u32 lane = 0, premap, postmap;
   1203
   1204	while (len) {
   1205		u32 cur_len;
   1206
   1207		lane = nd_region_acquire_lane(btt->nd_region);
   1208
   1209		ret = lba_to_arena(btt, sector, &premap, &arena);
   1210		if (ret)
   1211			goto out_lane;
   1212
   1213		cur_len = min(btt->sector_size, len);
   1214
   1215		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
   1216				NVDIMM_IO_ATOMIC);
   1217		if (ret)
   1218			goto out_lane;
   1219
   1220		/*
   1221		 * We loop to make sure that the post map LBA didn't change
   1222		 * from under us between writing the RTT and doing the actual
   1223		 * read.
   1224		 */
   1225		while (1) {
   1226			u32 new_map;
   1227			int new_t, new_e;
   1228
   1229			if (t_flag) {
   1230				zero_fill_data(page, off, cur_len);
   1231				goto out_lane;
   1232			}
   1233
   1234			if (e_flag) {
   1235				ret = -EIO;
   1236				goto out_lane;
   1237			}
   1238
   1239			arena->rtt[lane] = RTT_VALID | postmap;
   1240			/*
   1241			 * Barrier to make sure this write is not reordered
   1242			 * to do the verification map_read before the RTT store
   1243			 */
   1244			barrier();
   1245
   1246			ret = btt_map_read(arena, premap, &new_map, &new_t,
   1247						&new_e, NVDIMM_IO_ATOMIC);
   1248			if (ret)
   1249				goto out_rtt;
   1250
   1251			if ((postmap == new_map) && (t_flag == new_t) &&
   1252					(e_flag == new_e))
   1253				break;
   1254
   1255			postmap = new_map;
   1256			t_flag = new_t;
   1257			e_flag = new_e;
   1258		}
   1259
   1260		ret = btt_data_read(arena, page, off, postmap, cur_len);
   1261		if (ret) {
   1262			/* Media error - set the e_flag */
   1263			if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
   1264				dev_warn_ratelimited(to_dev(arena),
   1265					"Error persistently tracking bad blocks at %#x\n",
   1266					premap);
   1267			goto out_rtt;
   1268		}
   1269
   1270		if (bip) {
   1271			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
   1272			if (ret)
   1273				goto out_rtt;
   1274		}
   1275
   1276		arena->rtt[lane] = RTT_INVALID;
   1277		nd_region_release_lane(btt->nd_region, lane);
   1278
   1279		len -= cur_len;
   1280		off += cur_len;
   1281		sector += btt->sector_size >> SECTOR_SHIFT;
   1282	}
   1283
   1284	return 0;
   1285
   1286 out_rtt:
   1287	arena->rtt[lane] = RTT_INVALID;
   1288 out_lane:
   1289	nd_region_release_lane(btt->nd_region, lane);
   1290	return ret;
   1291}
   1292
   1293/*
   1294 * Normally, arena_{read,write}_bytes will take care of the initial offset
   1295 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
   1296 * we need the final, raw namespace offset here
   1297 */
   1298static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
   1299		u32 postmap)
   1300{
   1301	u64 nsoff = adjust_initial_offset(arena->nd_btt,
   1302			to_namespace_offset(arena, postmap));
   1303	sector_t phys_sector = nsoff >> 9;
   1304
   1305	return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
   1306}
   1307
   1308static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
   1309			sector_t sector, struct page *page, unsigned int off,
   1310			unsigned int len)
   1311{
   1312	int ret = 0;
   1313	struct arena_info *arena = NULL;
   1314	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
   1315	struct log_entry log;
   1316	int sub;
   1317
   1318	while (len) {
   1319		u32 cur_len;
   1320		int e_flag;
   1321
   1322 retry:
   1323		lane = nd_region_acquire_lane(btt->nd_region);
   1324
   1325		ret = lba_to_arena(btt, sector, &premap, &arena);
   1326		if (ret)
   1327			goto out_lane;
   1328		cur_len = min(btt->sector_size, len);
   1329
   1330		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
   1331			ret = -EIO;
   1332			goto out_lane;
   1333		}
   1334
   1335		if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
   1336			arena->freelist[lane].has_err = 1;
   1337
   1338		if (mutex_is_locked(&arena->err_lock)
   1339				|| arena->freelist[lane].has_err) {
   1340			nd_region_release_lane(btt->nd_region, lane);
   1341
   1342			ret = arena_clear_freelist_error(arena, lane);
   1343			if (ret)
   1344				return ret;
   1345
   1346			/* OK to acquire a different lane/free block */
   1347			goto retry;
   1348		}
   1349
   1350		new_postmap = arena->freelist[lane].block;
   1351
   1352		/* Wait if the new block is being read from */
   1353		for (i = 0; i < arena->nfree; i++)
   1354			while (arena->rtt[i] == (RTT_VALID | new_postmap))
   1355				cpu_relax();
   1356
   1357
   1358		if (new_postmap >= arena->internal_nlba) {
   1359			ret = -EIO;
   1360			goto out_lane;
   1361		}
   1362
   1363		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
   1364		if (ret)
   1365			goto out_lane;
   1366
   1367		if (bip) {
   1368			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
   1369						WRITE);
   1370			if (ret)
   1371				goto out_lane;
   1372		}
   1373
   1374		lock_map(arena, premap);
   1375		ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
   1376				NVDIMM_IO_ATOMIC);
   1377		if (ret)
   1378			goto out_map;
   1379		if (old_postmap >= arena->internal_nlba) {
   1380			ret = -EIO;
   1381			goto out_map;
   1382		}
   1383		if (e_flag)
   1384			set_e_flag(old_postmap);
   1385
   1386		log.lba = cpu_to_le32(premap);
   1387		log.old_map = cpu_to_le32(old_postmap);
   1388		log.new_map = cpu_to_le32(new_postmap);
   1389		log.seq = cpu_to_le32(arena->freelist[lane].seq);
   1390		sub = arena->freelist[lane].sub;
   1391		ret = btt_flog_write(arena, lane, sub, &log);
   1392		if (ret)
   1393			goto out_map;
   1394
   1395		ret = btt_map_write(arena, premap, new_postmap, 0, 0,
   1396			NVDIMM_IO_ATOMIC);
   1397		if (ret)
   1398			goto out_map;
   1399
   1400		unlock_map(arena, premap);
   1401		nd_region_release_lane(btt->nd_region, lane);
   1402
   1403		if (e_flag) {
   1404			ret = arena_clear_freelist_error(arena, lane);
   1405			if (ret)
   1406				return ret;
   1407		}
   1408
   1409		len -= cur_len;
   1410		off += cur_len;
   1411		sector += btt->sector_size >> SECTOR_SHIFT;
   1412	}
   1413
   1414	return 0;
   1415
   1416 out_map:
   1417	unlock_map(arena, premap);
   1418 out_lane:
   1419	nd_region_release_lane(btt->nd_region, lane);
   1420	return ret;
   1421}
   1422
   1423static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
   1424			struct page *page, unsigned int len, unsigned int off,
   1425			unsigned int op, sector_t sector)
   1426{
   1427	int ret;
   1428
   1429	if (!op_is_write(op)) {
   1430		ret = btt_read_pg(btt, bip, page, off, sector, len);
   1431		flush_dcache_page(page);
   1432	} else {
   1433		flush_dcache_page(page);
   1434		ret = btt_write_pg(btt, bip, sector, page, off, len);
   1435	}
   1436
   1437	return ret;
   1438}
   1439
   1440static void btt_submit_bio(struct bio *bio)
   1441{
   1442	struct bio_integrity_payload *bip = bio_integrity(bio);
   1443	struct btt *btt = bio->bi_bdev->bd_disk->private_data;
   1444	struct bvec_iter iter;
   1445	unsigned long start;
   1446	struct bio_vec bvec;
   1447	int err = 0;
   1448	bool do_acct;
   1449
   1450	if (!bio_integrity_prep(bio))
   1451		return;
   1452
   1453	do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
   1454	if (do_acct)
   1455		start = bio_start_io_acct(bio);
   1456	bio_for_each_segment(bvec, bio, iter) {
   1457		unsigned int len = bvec.bv_len;
   1458
   1459		if (len > PAGE_SIZE || len < btt->sector_size ||
   1460				len % btt->sector_size) {
   1461			dev_err_ratelimited(&btt->nd_btt->dev,
   1462				"unaligned bio segment (len: %d)\n", len);
   1463			bio->bi_status = BLK_STS_IOERR;
   1464			break;
   1465		}
   1466
   1467		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
   1468				  bio_op(bio), iter.bi_sector);
   1469		if (err) {
   1470			dev_err(&btt->nd_btt->dev,
   1471					"io error in %s sector %lld, len %d,\n",
   1472					(op_is_write(bio_op(bio))) ? "WRITE" :
   1473					"READ",
   1474					(unsigned long long) iter.bi_sector, len);
   1475			bio->bi_status = errno_to_blk_status(err);
   1476			break;
   1477		}
   1478	}
   1479	if (do_acct)
   1480		bio_end_io_acct(bio, start);
   1481
   1482	bio_endio(bio);
   1483}
   1484
   1485static int btt_rw_page(struct block_device *bdev, sector_t sector,
   1486		struct page *page, unsigned int op)
   1487{
   1488	struct btt *btt = bdev->bd_disk->private_data;
   1489	int rc;
   1490
   1491	rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
   1492	if (rc == 0)
   1493		page_endio(page, op_is_write(op), 0);
   1494
   1495	return rc;
   1496}
   1497
   1498
   1499static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
   1500{
   1501	/* some standard values */
   1502	geo->heads = 1 << 6;
   1503	geo->sectors = 1 << 5;
   1504	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
   1505	return 0;
   1506}
   1507
   1508static const struct block_device_operations btt_fops = {
   1509	.owner =		THIS_MODULE,
   1510	.submit_bio =		btt_submit_bio,
   1511	.rw_page =		btt_rw_page,
   1512	.getgeo =		btt_getgeo,
   1513};
   1514
   1515static int btt_blk_init(struct btt *btt)
   1516{
   1517	struct nd_btt *nd_btt = btt->nd_btt;
   1518	struct nd_namespace_common *ndns = nd_btt->ndns;
   1519	int rc = -ENOMEM;
   1520
   1521	btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
   1522	if (!btt->btt_disk)
   1523		return -ENOMEM;
   1524
   1525	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
   1526	btt->btt_disk->first_minor = 0;
   1527	btt->btt_disk->fops = &btt_fops;
   1528	btt->btt_disk->private_data = btt;
   1529
   1530	blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
   1531	blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
   1532	blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
   1533
   1534	if (btt_meta_size(btt)) {
   1535		rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
   1536		if (rc)
   1537			goto out_cleanup_disk;
   1538	}
   1539
   1540	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
   1541	rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
   1542	if (rc)
   1543		goto out_cleanup_disk;
   1544
   1545	btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
   1546	nvdimm_check_and_set_ro(btt->btt_disk);
   1547
   1548	return 0;
   1549
   1550out_cleanup_disk:
   1551	blk_cleanup_disk(btt->btt_disk);
   1552	return rc;
   1553}
   1554
   1555static void btt_blk_cleanup(struct btt *btt)
   1556{
   1557	del_gendisk(btt->btt_disk);
   1558	blk_cleanup_disk(btt->btt_disk);
   1559}
   1560
   1561/**
   1562 * btt_init - initialize a block translation table for the given device
   1563 * @nd_btt:	device with BTT geometry and backing device info
   1564 * @rawsize:	raw size in bytes of the backing device
   1565 * @lbasize:	lba size of the backing device
   1566 * @uuid:	A uuid for the backing device - this is stored on media
   1567 * @maxlane:	maximum number of parallel requests the device can handle
   1568 *
   1569 * Initialize a Block Translation Table on a backing device to provide
   1570 * single sector power fail atomicity.
   1571 *
   1572 * Context:
   1573 * Might sleep.
   1574 *
   1575 * Returns:
   1576 * Pointer to a new struct btt on success, NULL on failure.
   1577 */
   1578static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
   1579			    u32 lbasize, uuid_t *uuid,
   1580			    struct nd_region *nd_region)
   1581{
   1582	int ret;
   1583	struct btt *btt;
   1584	struct nd_namespace_io *nsio;
   1585	struct device *dev = &nd_btt->dev;
   1586
   1587	btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
   1588	if (!btt)
   1589		return NULL;
   1590
   1591	btt->nd_btt = nd_btt;
   1592	btt->rawsize = rawsize;
   1593	btt->lbasize = lbasize;
   1594	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
   1595	INIT_LIST_HEAD(&btt->arena_list);
   1596	mutex_init(&btt->init_lock);
   1597	btt->nd_region = nd_region;
   1598	nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
   1599	btt->phys_bb = &nsio->bb;
   1600
   1601	ret = discover_arenas(btt);
   1602	if (ret) {
   1603		dev_err(dev, "init: error in arena_discover: %d\n", ret);
   1604		return NULL;
   1605	}
   1606
   1607	if (btt->init_state != INIT_READY && nd_region->ro) {
   1608		dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
   1609				dev_name(&nd_region->dev));
   1610		return NULL;
   1611	} else if (btt->init_state != INIT_READY) {
   1612		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
   1613			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
   1614		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
   1615				btt->num_arenas, rawsize);
   1616
   1617		ret = create_arenas(btt);
   1618		if (ret) {
   1619			dev_info(dev, "init: create_arenas: %d\n", ret);
   1620			return NULL;
   1621		}
   1622
   1623		ret = btt_meta_init(btt);
   1624		if (ret) {
   1625			dev_err(dev, "init: error in meta_init: %d\n", ret);
   1626			return NULL;
   1627		}
   1628	}
   1629
   1630	ret = btt_blk_init(btt);
   1631	if (ret) {
   1632		dev_err(dev, "init: error in blk_init: %d\n", ret);
   1633		return NULL;
   1634	}
   1635
   1636	btt_debugfs_init(btt);
   1637
   1638	return btt;
   1639}
   1640
   1641/**
   1642 * btt_fini - de-initialize a BTT
   1643 * @btt:	the BTT handle that was generated by btt_init
   1644 *
   1645 * De-initialize a Block Translation Table on device removal
   1646 *
   1647 * Context:
   1648 * Might sleep.
   1649 */
   1650static void btt_fini(struct btt *btt)
   1651{
   1652	if (btt) {
   1653		btt_blk_cleanup(btt);
   1654		free_arenas(btt);
   1655		debugfs_remove_recursive(btt->debugfs_dir);
   1656	}
   1657}
   1658
   1659int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
   1660{
   1661	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
   1662	struct nd_region *nd_region;
   1663	struct btt_sb *btt_sb;
   1664	struct btt *btt;
   1665	size_t size, rawsize;
   1666	int rc;
   1667
   1668	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
   1669		dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
   1670		return -ENODEV;
   1671	}
   1672
   1673	btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
   1674	if (!btt_sb)
   1675		return -ENOMEM;
   1676
   1677	size = nvdimm_namespace_capacity(ndns);
   1678	rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
   1679	if (rc)
   1680		return rc;
   1681
   1682	/*
   1683	 * If this returns < 0, that is ok as it just means there wasn't
   1684	 * an existing BTT, and we're creating a new one. We still need to
   1685	 * call this as we need the version dependent fields in nd_btt to be
   1686	 * set correctly based on the holder class
   1687	 */
   1688	nd_btt_version(nd_btt, ndns, btt_sb);
   1689
   1690	rawsize = size - nd_btt->initial_offset;
   1691	if (rawsize < ARENA_MIN_SIZE) {
   1692		dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
   1693				dev_name(&ndns->dev),
   1694				ARENA_MIN_SIZE + nd_btt->initial_offset);
   1695		return -ENXIO;
   1696	}
   1697	nd_region = to_nd_region(nd_btt->dev.parent);
   1698	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
   1699		       nd_region);
   1700	if (!btt)
   1701		return -ENOMEM;
   1702	nd_btt->btt = btt;
   1703
   1704	return 0;
   1705}
   1706EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
   1707
   1708int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
   1709{
   1710	struct btt *btt = nd_btt->btt;
   1711
   1712	btt_fini(btt);
   1713	nd_btt->btt = NULL;
   1714
   1715	return 0;
   1716}
   1717EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
   1718
   1719static int __init nd_btt_init(void)
   1720{
   1721	int rc = 0;
   1722
   1723	debugfs_root = debugfs_create_dir("btt", NULL);
   1724	if (IS_ERR_OR_NULL(debugfs_root))
   1725		rc = -ENXIO;
   1726
   1727	return rc;
   1728}
   1729
   1730static void __exit nd_btt_exit(void)
   1731{
   1732	debugfs_remove_recursive(debugfs_root);
   1733}
   1734
   1735MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
   1736MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
   1737MODULE_LICENSE("GPL v2");
   1738module_init(nd_btt_init);
   1739module_exit(nd_btt_exit);