cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dat.c (13045B)


      1// SPDX-License-Identifier: GPL-2.0+
      2/*
      3 * NILFS disk address translation.
      4 *
      5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
      6 *
      7 * Written by Koji Sato.
      8 */
      9
     10#include <linux/types.h>
     11#include <linux/buffer_head.h>
     12#include <linux/string.h>
     13#include <linux/errno.h>
     14#include "nilfs.h"
     15#include "mdt.h"
     16#include "alloc.h"
     17#include "dat.h"
     18
     19
     20#define NILFS_CNO_MIN	((__u64)1)
     21#define NILFS_CNO_MAX	(~(__u64)0)
     22
     23/**
     24 * struct nilfs_dat_info - on-memory private data of DAT file
     25 * @mi: on-memory private data of metadata file
     26 * @palloc_cache: persistent object allocator cache of DAT file
     27 * @shadow: shadow map of DAT file
     28 */
     29struct nilfs_dat_info {
     30	struct nilfs_mdt_info mi;
     31	struct nilfs_palloc_cache palloc_cache;
     32	struct nilfs_shadow_map shadow;
     33};
     34
     35static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
     36{
     37	return (struct nilfs_dat_info *)NILFS_MDT(dat);
     38}
     39
     40static int nilfs_dat_prepare_entry(struct inode *dat,
     41				   struct nilfs_palloc_req *req, int create)
     42{
     43	return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
     44					    create, &req->pr_entry_bh);
     45}
     46
     47static void nilfs_dat_commit_entry(struct inode *dat,
     48				   struct nilfs_palloc_req *req)
     49{
     50	mark_buffer_dirty(req->pr_entry_bh);
     51	nilfs_mdt_mark_dirty(dat);
     52	brelse(req->pr_entry_bh);
     53}
     54
     55static void nilfs_dat_abort_entry(struct inode *dat,
     56				  struct nilfs_palloc_req *req)
     57{
     58	brelse(req->pr_entry_bh);
     59}
     60
     61int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
     62{
     63	int ret;
     64
     65	ret = nilfs_palloc_prepare_alloc_entry(dat, req);
     66	if (ret < 0)
     67		return ret;
     68
     69	ret = nilfs_dat_prepare_entry(dat, req, 1);
     70	if (ret < 0)
     71		nilfs_palloc_abort_alloc_entry(dat, req);
     72
     73	return ret;
     74}
     75
     76void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
     77{
     78	struct nilfs_dat_entry *entry;
     79	void *kaddr;
     80
     81	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
     82	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
     83					     req->pr_entry_bh, kaddr);
     84	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
     85	entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
     86	entry->de_blocknr = cpu_to_le64(0);
     87	kunmap_atomic(kaddr);
     88
     89	nilfs_palloc_commit_alloc_entry(dat, req);
     90	nilfs_dat_commit_entry(dat, req);
     91}
     92
     93void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
     94{
     95	nilfs_dat_abort_entry(dat, req);
     96	nilfs_palloc_abort_alloc_entry(dat, req);
     97}
     98
     99static void nilfs_dat_commit_free(struct inode *dat,
    100				  struct nilfs_palloc_req *req)
    101{
    102	struct nilfs_dat_entry *entry;
    103	void *kaddr;
    104
    105	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
    106	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
    107					     req->pr_entry_bh, kaddr);
    108	entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
    109	entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
    110	entry->de_blocknr = cpu_to_le64(0);
    111	kunmap_atomic(kaddr);
    112
    113	nilfs_dat_commit_entry(dat, req);
    114	nilfs_palloc_commit_free_entry(dat, req);
    115}
    116
    117int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
    118{
    119	int ret;
    120
    121	ret = nilfs_dat_prepare_entry(dat, req, 0);
    122	WARN_ON(ret == -ENOENT);
    123	return ret;
    124}
    125
    126void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
    127			    sector_t blocknr)
    128{
    129	struct nilfs_dat_entry *entry;
    130	void *kaddr;
    131
    132	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
    133	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
    134					     req->pr_entry_bh, kaddr);
    135	entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
    136	entry->de_blocknr = cpu_to_le64(blocknr);
    137	kunmap_atomic(kaddr);
    138
    139	nilfs_dat_commit_entry(dat, req);
    140}
    141
    142int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
    143{
    144	struct nilfs_dat_entry *entry;
    145	sector_t blocknr;
    146	void *kaddr;
    147	int ret;
    148
    149	ret = nilfs_dat_prepare_entry(dat, req, 0);
    150	if (ret < 0) {
    151		WARN_ON(ret == -ENOENT);
    152		return ret;
    153	}
    154
    155	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
    156	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
    157					     req->pr_entry_bh, kaddr);
    158	blocknr = le64_to_cpu(entry->de_blocknr);
    159	kunmap_atomic(kaddr);
    160
    161	if (blocknr == 0) {
    162		ret = nilfs_palloc_prepare_free_entry(dat, req);
    163		if (ret < 0) {
    164			nilfs_dat_abort_entry(dat, req);
    165			return ret;
    166		}
    167	}
    168
    169	return 0;
    170}
    171
    172void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
    173			  int dead)
    174{
    175	struct nilfs_dat_entry *entry;
    176	__u64 start, end;
    177	sector_t blocknr;
    178	void *kaddr;
    179
    180	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
    181	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
    182					     req->pr_entry_bh, kaddr);
    183	end = start = le64_to_cpu(entry->de_start);
    184	if (!dead) {
    185		end = nilfs_mdt_cno(dat);
    186		WARN_ON(start > end);
    187	}
    188	entry->de_end = cpu_to_le64(end);
    189	blocknr = le64_to_cpu(entry->de_blocknr);
    190	kunmap_atomic(kaddr);
    191
    192	if (blocknr == 0)
    193		nilfs_dat_commit_free(dat, req);
    194	else
    195		nilfs_dat_commit_entry(dat, req);
    196}
    197
    198void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
    199{
    200	struct nilfs_dat_entry *entry;
    201	__u64 start;
    202	sector_t blocknr;
    203	void *kaddr;
    204
    205	kaddr = kmap_atomic(req->pr_entry_bh->b_page);
    206	entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
    207					     req->pr_entry_bh, kaddr);
    208	start = le64_to_cpu(entry->de_start);
    209	blocknr = le64_to_cpu(entry->de_blocknr);
    210	kunmap_atomic(kaddr);
    211
    212	if (start == nilfs_mdt_cno(dat) && blocknr == 0)
    213		nilfs_palloc_abort_free_entry(dat, req);
    214	nilfs_dat_abort_entry(dat, req);
    215}
    216
    217int nilfs_dat_prepare_update(struct inode *dat,
    218			     struct nilfs_palloc_req *oldreq,
    219			     struct nilfs_palloc_req *newreq)
    220{
    221	int ret;
    222
    223	ret = nilfs_dat_prepare_end(dat, oldreq);
    224	if (!ret) {
    225		ret = nilfs_dat_prepare_alloc(dat, newreq);
    226		if (ret < 0)
    227			nilfs_dat_abort_end(dat, oldreq);
    228	}
    229	return ret;
    230}
    231
    232void nilfs_dat_commit_update(struct inode *dat,
    233			     struct nilfs_palloc_req *oldreq,
    234			     struct nilfs_palloc_req *newreq, int dead)
    235{
    236	nilfs_dat_commit_end(dat, oldreq, dead);
    237	nilfs_dat_commit_alloc(dat, newreq);
    238}
    239
    240void nilfs_dat_abort_update(struct inode *dat,
    241			    struct nilfs_palloc_req *oldreq,
    242			    struct nilfs_palloc_req *newreq)
    243{
    244	nilfs_dat_abort_end(dat, oldreq);
    245	nilfs_dat_abort_alloc(dat, newreq);
    246}
    247
    248/**
    249 * nilfs_dat_mark_dirty -
    250 * @dat: DAT file inode
    251 * @vblocknr: virtual block number
    252 *
    253 * Description:
    254 *
    255 * Return Value: On success, 0 is returned. On error, one of the following
    256 * negative error codes is returned.
    257 *
    258 * %-EIO - I/O error.
    259 *
    260 * %-ENOMEM - Insufficient amount of memory available.
    261 */
    262int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
    263{
    264	struct nilfs_palloc_req req;
    265	int ret;
    266
    267	req.pr_entry_nr = vblocknr;
    268	ret = nilfs_dat_prepare_entry(dat, &req, 0);
    269	if (ret == 0)
    270		nilfs_dat_commit_entry(dat, &req);
    271	return ret;
    272}
    273
    274/**
    275 * nilfs_dat_freev - free virtual block numbers
    276 * @dat: DAT file inode
    277 * @vblocknrs: array of virtual block numbers
    278 * @nitems: number of virtual block numbers
    279 *
    280 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
    281 * @vblocknrs and @nitems.
    282 *
    283 * Return Value: On success, 0 is returned. On error, one of the following
    284 * negative error codes is returned.
    285 *
    286 * %-EIO - I/O error.
    287 *
    288 * %-ENOMEM - Insufficient amount of memory available.
    289 *
    290 * %-ENOENT - The virtual block number have not been allocated.
    291 */
    292int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
    293{
    294	return nilfs_palloc_freev(dat, vblocknrs, nitems);
    295}
    296
    297/**
    298 * nilfs_dat_move - change a block number
    299 * @dat: DAT file inode
    300 * @vblocknr: virtual block number
    301 * @blocknr: block number
    302 *
    303 * Description: nilfs_dat_move() changes the block number associated with
    304 * @vblocknr to @blocknr.
    305 *
    306 * Return Value: On success, 0 is returned. On error, one of the following
    307 * negative error codes is returned.
    308 *
    309 * %-EIO - I/O error.
    310 *
    311 * %-ENOMEM - Insufficient amount of memory available.
    312 */
    313int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
    314{
    315	struct buffer_head *entry_bh;
    316	struct nilfs_dat_entry *entry;
    317	void *kaddr;
    318	int ret;
    319
    320	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
    321	if (ret < 0)
    322		return ret;
    323
    324	/*
    325	 * The given disk block number (blocknr) is not yet written to
    326	 * the device at this point.
    327	 *
    328	 * To prevent nilfs_dat_translate() from returning the
    329	 * uncommitted block number, this makes a copy of the entry
    330	 * buffer and redirects nilfs_dat_translate() to the copy.
    331	 */
    332	if (!buffer_nilfs_redirected(entry_bh)) {
    333		ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
    334		if (ret) {
    335			brelse(entry_bh);
    336			return ret;
    337		}
    338	}
    339
    340	kaddr = kmap_atomic(entry_bh->b_page);
    341	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
    342	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
    343		nilfs_crit(dat->i_sb,
    344			   "%s: invalid vblocknr = %llu, [%llu, %llu)",
    345			   __func__, (unsigned long long)vblocknr,
    346			   (unsigned long long)le64_to_cpu(entry->de_start),
    347			   (unsigned long long)le64_to_cpu(entry->de_end));
    348		kunmap_atomic(kaddr);
    349		brelse(entry_bh);
    350		return -EINVAL;
    351	}
    352	WARN_ON(blocknr == 0);
    353	entry->de_blocknr = cpu_to_le64(blocknr);
    354	kunmap_atomic(kaddr);
    355
    356	mark_buffer_dirty(entry_bh);
    357	nilfs_mdt_mark_dirty(dat);
    358
    359	brelse(entry_bh);
    360
    361	return 0;
    362}
    363
    364/**
    365 * nilfs_dat_translate - translate a virtual block number to a block number
    366 * @dat: DAT file inode
    367 * @vblocknr: virtual block number
    368 * @blocknrp: pointer to a block number
    369 *
    370 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
    371 * to the corresponding block number.
    372 *
    373 * Return Value: On success, 0 is returned and the block number associated
    374 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
    375 * of the following negative error codes is returned.
    376 *
    377 * %-EIO - I/O error.
    378 *
    379 * %-ENOMEM - Insufficient amount of memory available.
    380 *
    381 * %-ENOENT - A block number associated with @vblocknr does not exist.
    382 */
    383int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
    384{
    385	struct buffer_head *entry_bh, *bh;
    386	struct nilfs_dat_entry *entry;
    387	sector_t blocknr;
    388	void *kaddr;
    389	int ret;
    390
    391	ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
    392	if (ret < 0)
    393		return ret;
    394
    395	if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
    396		bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
    397		if (bh) {
    398			WARN_ON(!buffer_uptodate(bh));
    399			brelse(entry_bh);
    400			entry_bh = bh;
    401		}
    402	}
    403
    404	kaddr = kmap_atomic(entry_bh->b_page);
    405	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
    406	blocknr = le64_to_cpu(entry->de_blocknr);
    407	if (blocknr == 0) {
    408		ret = -ENOENT;
    409		goto out;
    410	}
    411	*blocknrp = blocknr;
    412
    413 out:
    414	kunmap_atomic(kaddr);
    415	brelse(entry_bh);
    416	return ret;
    417}
    418
    419ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
    420			    size_t nvi)
    421{
    422	struct buffer_head *entry_bh;
    423	struct nilfs_dat_entry *entry;
    424	struct nilfs_vinfo *vinfo = buf;
    425	__u64 first, last;
    426	void *kaddr;
    427	unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
    428	int i, j, n, ret;
    429
    430	for (i = 0; i < nvi; i += n) {
    431		ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
    432						   0, &entry_bh);
    433		if (ret < 0)
    434			return ret;
    435		kaddr = kmap_atomic(entry_bh->b_page);
    436		/* last virtual block number in this block */
    437		first = vinfo->vi_vblocknr;
    438		do_div(first, entries_per_block);
    439		first *= entries_per_block;
    440		last = first + entries_per_block - 1;
    441		for (j = i, n = 0;
    442		     j < nvi && vinfo->vi_vblocknr >= first &&
    443			     vinfo->vi_vblocknr <= last;
    444		     j++, n++, vinfo = (void *)vinfo + visz) {
    445			entry = nilfs_palloc_block_get_entry(
    446				dat, vinfo->vi_vblocknr, entry_bh, kaddr);
    447			vinfo->vi_start = le64_to_cpu(entry->de_start);
    448			vinfo->vi_end = le64_to_cpu(entry->de_end);
    449			vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
    450		}
    451		kunmap_atomic(kaddr);
    452		brelse(entry_bh);
    453	}
    454
    455	return nvi;
    456}
    457
    458/**
    459 * nilfs_dat_read - read or get dat inode
    460 * @sb: super block instance
    461 * @entry_size: size of a dat entry
    462 * @raw_inode: on-disk dat inode
    463 * @inodep: buffer to store the inode
    464 */
    465int nilfs_dat_read(struct super_block *sb, size_t entry_size,
    466		   struct nilfs_inode *raw_inode, struct inode **inodep)
    467{
    468	static struct lock_class_key dat_lock_key;
    469	struct inode *dat;
    470	struct nilfs_dat_info *di;
    471	int err;
    472
    473	if (entry_size > sb->s_blocksize) {
    474		nilfs_err(sb, "too large DAT entry size: %zu bytes",
    475			  entry_size);
    476		return -EINVAL;
    477	} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
    478		nilfs_err(sb, "too small DAT entry size: %zu bytes",
    479			  entry_size);
    480		return -EINVAL;
    481	}
    482
    483	dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
    484	if (unlikely(!dat))
    485		return -ENOMEM;
    486	if (!(dat->i_state & I_NEW))
    487		goto out;
    488
    489	err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
    490	if (err)
    491		goto failed;
    492
    493	err = nilfs_palloc_init_blockgroup(dat, entry_size);
    494	if (err)
    495		goto failed;
    496
    497	di = NILFS_DAT_I(dat);
    498	lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
    499	nilfs_palloc_setup_cache(dat, &di->palloc_cache);
    500	err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
    501	if (err)
    502		goto failed;
    503
    504	err = nilfs_read_inode_common(dat, raw_inode);
    505	if (err)
    506		goto failed;
    507
    508	unlock_new_inode(dat);
    509 out:
    510	*inodep = dat;
    511	return 0;
    512 failed:
    513	iget_failed(dat);
    514	return err;
    515}