cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

jfs_metapage.c (18990B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *   Copyright (C) International Business Machines Corp., 2000-2005
      4 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
      5 */
      6
      7#include <linux/fs.h>
      8#include <linux/mm.h>
      9#include <linux/module.h>
     10#include <linux/bio.h>
     11#include <linux/slab.h>
     12#include <linux/init.h>
     13#include <linux/buffer_head.h>
     14#include <linux/mempool.h>
     15#include <linux/seq_file.h>
     16#include <linux/writeback.h>
     17#include "jfs_incore.h"
     18#include "jfs_superblock.h"
     19#include "jfs_filsys.h"
     20#include "jfs_metapage.h"
     21#include "jfs_txnmgr.h"
     22#include "jfs_debug.h"
     23
     24#ifdef CONFIG_JFS_STATISTICS
     25static struct {
     26	uint	pagealloc;	/* # of page allocations */
     27	uint	pagefree;	/* # of page frees */
     28	uint	lockwait;	/* # of sleeping lock_metapage() calls */
     29} mpStat;
     30#endif
     31
     32#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
     33#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
     34
     35static inline void unlock_metapage(struct metapage *mp)
     36{
     37	clear_bit_unlock(META_locked, &mp->flag);
     38	wake_up(&mp->wait);
     39}
     40
     41static inline void __lock_metapage(struct metapage *mp)
     42{
     43	DECLARE_WAITQUEUE(wait, current);
     44	INCREMENT(mpStat.lockwait);
     45	add_wait_queue_exclusive(&mp->wait, &wait);
     46	do {
     47		set_current_state(TASK_UNINTERRUPTIBLE);
     48		if (metapage_locked(mp)) {
     49			unlock_page(mp->page);
     50			io_schedule();
     51			lock_page(mp->page);
     52		}
     53	} while (trylock_metapage(mp));
     54	__set_current_state(TASK_RUNNING);
     55	remove_wait_queue(&mp->wait, &wait);
     56}
     57
     58/*
     59 * Must have mp->page locked
     60 */
     61static inline void lock_metapage(struct metapage *mp)
     62{
     63	if (trylock_metapage(mp))
     64		__lock_metapage(mp);
     65}
     66
     67#define METAPOOL_MIN_PAGES 32
     68static struct kmem_cache *metapage_cache;
     69static mempool_t *metapage_mempool;
     70
     71#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
     72
     73#if MPS_PER_PAGE > 1
     74
     75struct meta_anchor {
     76	int mp_count;
     77	atomic_t io_count;
     78	struct metapage *mp[MPS_PER_PAGE];
     79};
     80#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
     81
     82static inline struct metapage *page_to_mp(struct page *page, int offset)
     83{
     84	if (!PagePrivate(page))
     85		return NULL;
     86	return mp_anchor(page)->mp[offset >> L2PSIZE];
     87}
     88
     89static inline int insert_metapage(struct page *page, struct metapage *mp)
     90{
     91	struct meta_anchor *a;
     92	int index;
     93	int l2mp_blocks;	/* log2 blocks per metapage */
     94
     95	if (PagePrivate(page))
     96		a = mp_anchor(page);
     97	else {
     98		a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
     99		if (!a)
    100			return -ENOMEM;
    101		set_page_private(page, (unsigned long)a);
    102		SetPagePrivate(page);
    103		kmap(page);
    104	}
    105
    106	if (mp) {
    107		l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
    108		index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
    109		a->mp_count++;
    110		a->mp[index] = mp;
    111	}
    112
    113	return 0;
    114}
    115
    116static inline void remove_metapage(struct page *page, struct metapage *mp)
    117{
    118	struct meta_anchor *a = mp_anchor(page);
    119	int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
    120	int index;
    121
    122	index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
    123
    124	BUG_ON(a->mp[index] != mp);
    125
    126	a->mp[index] = NULL;
    127	if (--a->mp_count == 0) {
    128		kfree(a);
    129		set_page_private(page, 0);
    130		ClearPagePrivate(page);
    131		kunmap(page);
    132	}
    133}
    134
    135static inline void inc_io(struct page *page)
    136{
    137	atomic_inc(&mp_anchor(page)->io_count);
    138}
    139
    140static inline void dec_io(struct page *page, void (*handler) (struct page *))
    141{
    142	if (atomic_dec_and_test(&mp_anchor(page)->io_count))
    143		handler(page);
    144}
    145
    146#else
    147static inline struct metapage *page_to_mp(struct page *page, int offset)
    148{
    149	return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
    150}
    151
    152static inline int insert_metapage(struct page *page, struct metapage *mp)
    153{
    154	if (mp) {
    155		set_page_private(page, (unsigned long)mp);
    156		SetPagePrivate(page);
    157		kmap(page);
    158	}
    159	return 0;
    160}
    161
    162static inline void remove_metapage(struct page *page, struct metapage *mp)
    163{
    164	set_page_private(page, 0);
    165	ClearPagePrivate(page);
    166	kunmap(page);
    167}
    168
    169#define inc_io(page) do {} while(0)
    170#define dec_io(page, handler) handler(page)
    171
    172#endif
    173
    174static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
    175{
    176	struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
    177
    178	if (mp) {
    179		mp->lid = 0;
    180		mp->lsn = 0;
    181		mp->data = NULL;
    182		mp->clsn = 0;
    183		mp->log = NULL;
    184		init_waitqueue_head(&mp->wait);
    185	}
    186	return mp;
    187}
    188
    189static inline void free_metapage(struct metapage *mp)
    190{
    191	mempool_free(mp, metapage_mempool);
    192}
    193
    194int __init metapage_init(void)
    195{
    196	/*
    197	 * Allocate the metapage structures
    198	 */
    199	metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
    200					   0, 0, NULL);
    201	if (metapage_cache == NULL)
    202		return -ENOMEM;
    203
    204	metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
    205						    metapage_cache);
    206
    207	if (metapage_mempool == NULL) {
    208		kmem_cache_destroy(metapage_cache);
    209		return -ENOMEM;
    210	}
    211
    212	return 0;
    213}
    214
    215void metapage_exit(void)
    216{
    217	mempool_destroy(metapage_mempool);
    218	kmem_cache_destroy(metapage_cache);
    219}
    220
    221static inline void drop_metapage(struct page *page, struct metapage *mp)
    222{
    223	if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
    224	    test_bit(META_io, &mp->flag))
    225		return;
    226	remove_metapage(page, mp);
    227	INCREMENT(mpStat.pagefree);
    228	free_metapage(mp);
    229}
    230
    231/*
    232 * Metapage address space operations
    233 */
    234
    235static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
    236				    int *len)
    237{
    238	int rc = 0;
    239	int xflag;
    240	s64 xaddr;
    241	sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
    242			       inode->i_blkbits;
    243
    244	if (lblock >= file_blocks)
    245		return 0;
    246	if (lblock + *len > file_blocks)
    247		*len = file_blocks - lblock;
    248
    249	if (inode->i_ino) {
    250		rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
    251		if ((rc == 0) && *len)
    252			lblock = (sector_t)xaddr;
    253		else
    254			lblock = 0;
    255	} /* else no mapping */
    256
    257	return lblock;
    258}
    259
    260static void last_read_complete(struct page *page)
    261{
    262	if (!PageError(page))
    263		SetPageUptodate(page);
    264	unlock_page(page);
    265}
    266
    267static void metapage_read_end_io(struct bio *bio)
    268{
    269	struct page *page = bio->bi_private;
    270
    271	if (bio->bi_status) {
    272		printk(KERN_ERR "metapage_read_end_io: I/O error\n");
    273		SetPageError(page);
    274	}
    275
    276	dec_io(page, last_read_complete);
    277	bio_put(bio);
    278}
    279
    280static void remove_from_logsync(struct metapage *mp)
    281{
    282	struct jfs_log *log = mp->log;
    283	unsigned long flags;
    284/*
    285 * This can race.  Recheck that log hasn't been set to null, and after
    286 * acquiring logsync lock, recheck lsn
    287 */
    288	if (!log)
    289		return;
    290
    291	LOGSYNC_LOCK(log, flags);
    292	if (mp->lsn) {
    293		mp->log = NULL;
    294		mp->lsn = 0;
    295		mp->clsn = 0;
    296		log->count--;
    297		list_del(&mp->synclist);
    298	}
    299	LOGSYNC_UNLOCK(log, flags);
    300}
    301
    302static void last_write_complete(struct page *page)
    303{
    304	struct metapage *mp;
    305	unsigned int offset;
    306
    307	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
    308		mp = page_to_mp(page, offset);
    309		if (mp && test_bit(META_io, &mp->flag)) {
    310			if (mp->lsn)
    311				remove_from_logsync(mp);
    312			clear_bit(META_io, &mp->flag);
    313		}
    314		/*
    315		 * I'd like to call drop_metapage here, but I don't think it's
    316		 * safe unless I have the page locked
    317		 */
    318	}
    319	end_page_writeback(page);
    320}
    321
    322static void metapage_write_end_io(struct bio *bio)
    323{
    324	struct page *page = bio->bi_private;
    325
    326	BUG_ON(!PagePrivate(page));
    327
    328	if (bio->bi_status) {
    329		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
    330		SetPageError(page);
    331	}
    332	dec_io(page, last_write_complete);
    333	bio_put(bio);
    334}
    335
    336static int metapage_writepage(struct page *page, struct writeback_control *wbc)
    337{
    338	struct bio *bio = NULL;
    339	int block_offset;	/* block offset of mp within page */
    340	struct inode *inode = page->mapping->host;
    341	int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
    342	int len;
    343	int xlen;
    344	struct metapage *mp;
    345	int redirty = 0;
    346	sector_t lblock;
    347	int nr_underway = 0;
    348	sector_t pblock;
    349	sector_t next_block = 0;
    350	sector_t page_start;
    351	unsigned long bio_bytes = 0;
    352	unsigned long bio_offset = 0;
    353	int offset;
    354	int bad_blocks = 0;
    355
    356	page_start = (sector_t)page->index <<
    357		     (PAGE_SHIFT - inode->i_blkbits);
    358	BUG_ON(!PageLocked(page));
    359	BUG_ON(PageWriteback(page));
    360	set_page_writeback(page);
    361
    362	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
    363		mp = page_to_mp(page, offset);
    364
    365		if (!mp || !test_bit(META_dirty, &mp->flag))
    366			continue;
    367
    368		if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
    369			redirty = 1;
    370			/*
    371			 * Make sure this page isn't blocked indefinitely.
    372			 * If the journal isn't undergoing I/O, push it
    373			 */
    374			if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
    375				jfs_flush_journal(mp->log, 0);
    376			continue;
    377		}
    378
    379		clear_bit(META_dirty, &mp->flag);
    380		set_bit(META_io, &mp->flag);
    381		block_offset = offset >> inode->i_blkbits;
    382		lblock = page_start + block_offset;
    383		if (bio) {
    384			if (xlen && lblock == next_block) {
    385				/* Contiguous, in memory & on disk */
    386				len = min(xlen, blocks_per_mp);
    387				xlen -= len;
    388				bio_bytes += len << inode->i_blkbits;
    389				continue;
    390			}
    391			/* Not contiguous */
    392			if (bio_add_page(bio, page, bio_bytes, bio_offset) <
    393			    bio_bytes)
    394				goto add_failed;
    395			/*
    396			 * Increment counter before submitting i/o to keep
    397			 * count from hitting zero before we're through
    398			 */
    399			inc_io(page);
    400			if (!bio->bi_iter.bi_size)
    401				goto dump_bio;
    402			submit_bio(bio);
    403			nr_underway++;
    404			bio = NULL;
    405		} else
    406			inc_io(page);
    407		xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
    408		pblock = metapage_get_blocks(inode, lblock, &xlen);
    409		if (!pblock) {
    410			printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
    411			/*
    412			 * We already called inc_io(), but can't cancel it
    413			 * with dec_io() until we're done with the page
    414			 */
    415			bad_blocks++;
    416			continue;
    417		}
    418		len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
    419
    420		bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
    421		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
    422		bio->bi_end_io = metapage_write_end_io;
    423		bio->bi_private = page;
    424
    425		/* Don't call bio_add_page yet, we may add to this vec */
    426		bio_offset = offset;
    427		bio_bytes = len << inode->i_blkbits;
    428
    429		xlen -= len;
    430		next_block = lblock + len;
    431	}
    432	if (bio) {
    433		if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
    434				goto add_failed;
    435		if (!bio->bi_iter.bi_size)
    436			goto dump_bio;
    437
    438		submit_bio(bio);
    439		nr_underway++;
    440	}
    441	if (redirty)
    442		redirty_page_for_writepage(wbc, page);
    443
    444	unlock_page(page);
    445
    446	if (bad_blocks)
    447		goto err_out;
    448
    449	if (nr_underway == 0)
    450		end_page_writeback(page);
    451
    452	return 0;
    453add_failed:
    454	/* We should never reach here, since we're only adding one vec */
    455	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
    456	goto skip;
    457dump_bio:
    458	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
    459		       4, bio, sizeof(*bio), 0);
    460skip:
    461	bio_put(bio);
    462	unlock_page(page);
    463	dec_io(page, last_write_complete);
    464err_out:
    465	while (bad_blocks--)
    466		dec_io(page, last_write_complete);
    467	return -EIO;
    468}
    469
    470static int metapage_read_folio(struct file *fp, struct folio *folio)
    471{
    472	struct page *page = &folio->page;
    473	struct inode *inode = page->mapping->host;
    474	struct bio *bio = NULL;
    475	int block_offset;
    476	int blocks_per_page = i_blocks_per_page(inode, page);
    477	sector_t page_start;	/* address of page in fs blocks */
    478	sector_t pblock;
    479	int xlen;
    480	unsigned int len;
    481	int offset;
    482
    483	BUG_ON(!PageLocked(page));
    484	page_start = (sector_t)page->index <<
    485		     (PAGE_SHIFT - inode->i_blkbits);
    486
    487	block_offset = 0;
    488	while (block_offset < blocks_per_page) {
    489		xlen = blocks_per_page - block_offset;
    490		pblock = metapage_get_blocks(inode, page_start + block_offset,
    491					     &xlen);
    492		if (pblock) {
    493			if (!PagePrivate(page))
    494				insert_metapage(page, NULL);
    495			inc_io(page);
    496			if (bio)
    497				submit_bio(bio);
    498
    499			bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
    500					GFP_NOFS);
    501			bio->bi_iter.bi_sector =
    502				pblock << (inode->i_blkbits - 9);
    503			bio->bi_end_io = metapage_read_end_io;
    504			bio->bi_private = page;
    505			len = xlen << inode->i_blkbits;
    506			offset = block_offset << inode->i_blkbits;
    507			if (bio_add_page(bio, page, len, offset) < len)
    508				goto add_failed;
    509			block_offset += xlen;
    510		} else
    511			block_offset++;
    512	}
    513	if (bio)
    514		submit_bio(bio);
    515	else
    516		unlock_page(page);
    517
    518	return 0;
    519
    520add_failed:
    521	printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
    522	bio_put(bio);
    523	dec_io(page, last_read_complete);
    524	return -EIO;
    525}
    526
    527static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
    528{
    529	struct metapage *mp;
    530	bool ret = true;
    531	int offset;
    532
    533	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
    534		mp = page_to_mp(&folio->page, offset);
    535
    536		if (!mp)
    537			continue;
    538
    539		jfs_info("metapage_release_folio: mp = 0x%p", mp);
    540		if (mp->count || mp->nohomeok ||
    541		    test_bit(META_dirty, &mp->flag)) {
    542			jfs_info("count = %ld, nohomeok = %d", mp->count,
    543				 mp->nohomeok);
    544			ret = false;
    545			continue;
    546		}
    547		if (mp->lsn)
    548			remove_from_logsync(mp);
    549		remove_metapage(&folio->page, mp);
    550		INCREMENT(mpStat.pagefree);
    551		free_metapage(mp);
    552	}
    553	return ret;
    554}
    555
    556static void metapage_invalidate_folio(struct folio *folio, size_t offset,
    557				    size_t length)
    558{
    559	BUG_ON(offset || length < folio_size(folio));
    560
    561	BUG_ON(folio_test_writeback(folio));
    562
    563	metapage_release_folio(folio, 0);
    564}
    565
    566const struct address_space_operations jfs_metapage_aops = {
    567	.read_folio	= metapage_read_folio,
    568	.writepage	= metapage_writepage,
    569	.release_folio	= metapage_release_folio,
    570	.invalidate_folio = metapage_invalidate_folio,
    571	.dirty_folio	= filemap_dirty_folio,
    572};
    573
    574struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
    575				unsigned int size, int absolute,
    576				unsigned long new)
    577{
    578	int l2BlocksPerPage;
    579	int l2bsize;
    580	struct address_space *mapping;
    581	struct metapage *mp = NULL;
    582	struct page *page;
    583	unsigned long page_index;
    584	unsigned long page_offset;
    585
    586	jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
    587		 inode->i_ino, lblock, absolute);
    588
    589	l2bsize = inode->i_blkbits;
    590	l2BlocksPerPage = PAGE_SHIFT - l2bsize;
    591	page_index = lblock >> l2BlocksPerPage;
    592	page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
    593	if ((page_offset + size) > PAGE_SIZE) {
    594		jfs_err("MetaData crosses page boundary!!");
    595		jfs_err("lblock = %lx, size  = %d", lblock, size);
    596		dump_stack();
    597		return NULL;
    598	}
    599	if (absolute)
    600		mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
    601	else {
    602		/*
    603		 * If an nfs client tries to read an inode that is larger
    604		 * than any existing inodes, we may try to read past the
    605		 * end of the inode map
    606		 */
    607		if ((lblock << inode->i_blkbits) >= inode->i_size)
    608			return NULL;
    609		mapping = inode->i_mapping;
    610	}
    611
    612	if (new && (PSIZE == PAGE_SIZE)) {
    613		page = grab_cache_page(mapping, page_index);
    614		if (!page) {
    615			jfs_err("grab_cache_page failed!");
    616			return NULL;
    617		}
    618		SetPageUptodate(page);
    619	} else {
    620		page = read_mapping_page(mapping, page_index, NULL);
    621		if (IS_ERR(page) || !PageUptodate(page)) {
    622			jfs_err("read_mapping_page failed!");
    623			return NULL;
    624		}
    625		lock_page(page);
    626	}
    627
    628	mp = page_to_mp(page, page_offset);
    629	if (mp) {
    630		if (mp->logical_size != size) {
    631			jfs_error(inode->i_sb,
    632				  "get_mp->logical_size != size\n");
    633			jfs_err("logical_size = %d, size = %d",
    634				mp->logical_size, size);
    635			dump_stack();
    636			goto unlock;
    637		}
    638		mp->count++;
    639		lock_metapage(mp);
    640		if (test_bit(META_discard, &mp->flag)) {
    641			if (!new) {
    642				jfs_error(inode->i_sb,
    643					  "using a discarded metapage\n");
    644				discard_metapage(mp);
    645				goto unlock;
    646			}
    647			clear_bit(META_discard, &mp->flag);
    648		}
    649	} else {
    650		INCREMENT(mpStat.pagealloc);
    651		mp = alloc_metapage(GFP_NOFS);
    652		if (!mp)
    653			goto unlock;
    654		mp->page = page;
    655		mp->sb = inode->i_sb;
    656		mp->flag = 0;
    657		mp->xflag = COMMIT_PAGE;
    658		mp->count = 1;
    659		mp->nohomeok = 0;
    660		mp->logical_size = size;
    661		mp->data = page_address(page) + page_offset;
    662		mp->index = lblock;
    663		if (unlikely(insert_metapage(page, mp))) {
    664			free_metapage(mp);
    665			goto unlock;
    666		}
    667		lock_metapage(mp);
    668	}
    669
    670	if (new) {
    671		jfs_info("zeroing mp = 0x%p", mp);
    672		memset(mp->data, 0, PSIZE);
    673	}
    674
    675	unlock_page(page);
    676	jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
    677	return mp;
    678
    679unlock:
    680	unlock_page(page);
    681	return NULL;
    682}
    683
    684void grab_metapage(struct metapage * mp)
    685{
    686	jfs_info("grab_metapage: mp = 0x%p", mp);
    687	get_page(mp->page);
    688	lock_page(mp->page);
    689	mp->count++;
    690	lock_metapage(mp);
    691	unlock_page(mp->page);
    692}
    693
    694void force_metapage(struct metapage *mp)
    695{
    696	struct page *page = mp->page;
    697	jfs_info("force_metapage: mp = 0x%p", mp);
    698	set_bit(META_forcewrite, &mp->flag);
    699	clear_bit(META_sync, &mp->flag);
    700	get_page(page);
    701	lock_page(page);
    702	set_page_dirty(page);
    703	if (write_one_page(page))
    704		jfs_error(mp->sb, "write_one_page() failed\n");
    705	clear_bit(META_forcewrite, &mp->flag);
    706	put_page(page);
    707}
    708
    709void hold_metapage(struct metapage *mp)
    710{
    711	lock_page(mp->page);
    712}
    713
    714void put_metapage(struct metapage *mp)
    715{
    716	if (mp->count || mp->nohomeok) {
    717		/* Someone else will release this */
    718		unlock_page(mp->page);
    719		return;
    720	}
    721	get_page(mp->page);
    722	mp->count++;
    723	lock_metapage(mp);
    724	unlock_page(mp->page);
    725	release_metapage(mp);
    726}
    727
    728void release_metapage(struct metapage * mp)
    729{
    730	struct page *page = mp->page;
    731	jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
    732
    733	BUG_ON(!page);
    734
    735	lock_page(page);
    736	unlock_metapage(mp);
    737
    738	assert(mp->count);
    739	if (--mp->count || mp->nohomeok) {
    740		unlock_page(page);
    741		put_page(page);
    742		return;
    743	}
    744
    745	if (test_bit(META_dirty, &mp->flag)) {
    746		set_page_dirty(page);
    747		if (test_bit(META_sync, &mp->flag)) {
    748			clear_bit(META_sync, &mp->flag);
    749			if (write_one_page(page))
    750				jfs_error(mp->sb, "write_one_page() failed\n");
    751			lock_page(page); /* write_one_page unlocks the page */
    752		}
    753	} else if (mp->lsn)	/* discard_metapage doesn't remove it */
    754		remove_from_logsync(mp);
    755
    756	/* Try to keep metapages from using up too much memory */
    757	drop_metapage(page, mp);
    758
    759	unlock_page(page);
    760	put_page(page);
    761}
    762
    763void __invalidate_metapages(struct inode *ip, s64 addr, int len)
    764{
    765	sector_t lblock;
    766	int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
    767	int BlocksPerPage = 1 << l2BlocksPerPage;
    768	/* All callers are interested in block device's mapping */
    769	struct address_space *mapping =
    770		JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
    771	struct metapage *mp;
    772	struct page *page;
    773	unsigned int offset;
    774
    775	/*
    776	 * Mark metapages to discard.  They will eventually be
    777	 * released, but should not be written.
    778	 */
    779	for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
    780	     lblock += BlocksPerPage) {
    781		page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
    782		if (!page)
    783			continue;
    784		for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
    785			mp = page_to_mp(page, offset);
    786			if (!mp)
    787				continue;
    788			if (mp->index < addr)
    789				continue;
    790			if (mp->index >= addr + len)
    791				break;
    792
    793			clear_bit(META_dirty, &mp->flag);
    794			set_bit(META_discard, &mp->flag);
    795			if (mp->lsn)
    796				remove_from_logsync(mp);
    797		}
    798		unlock_page(page);
    799		put_page(page);
    800	}
    801}
    802
    803#ifdef CONFIG_JFS_STATISTICS
    804int jfs_mpstat_proc_show(struct seq_file *m, void *v)
    805{
    806	seq_printf(m,
    807		       "JFS Metapage statistics\n"
    808		       "=======================\n"
    809		       "page allocations = %d\n"
    810		       "page frees = %d\n"
    811		       "lock waits = %d\n",
    812		       mpStat.pagealloc,
    813		       mpStat.pagefree,
    814		       mpStat.lockwait);
    815	return 0;
    816}
    817#endif