cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bmap.c (65920B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
      4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
      5 */
      6
      7#include <linux/spinlock.h>
      8#include <linux/completion.h>
      9#include <linux/buffer_head.h>
     10#include <linux/blkdev.h>
     11#include <linux/gfs2_ondisk.h>
     12#include <linux/crc32.h>
     13#include <linux/iomap.h>
     14#include <linux/ktime.h>
     15
     16#include "gfs2.h"
     17#include "incore.h"
     18#include "bmap.h"
     19#include "glock.h"
     20#include "inode.h"
     21#include "meta_io.h"
     22#include "quota.h"
     23#include "rgrp.h"
     24#include "log.h"
     25#include "super.h"
     26#include "trans.h"
     27#include "dir.h"
     28#include "util.h"
     29#include "aops.h"
     30#include "trace_gfs2.h"
     31
     32/* This doesn't need to be that large as max 64 bit pointers in a 4k
     33 * block is 512, so __u16 is fine for that. It saves stack space to
     34 * keep it small.
     35 */
     36struct metapath {
     37	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
     38	__u16 mp_list[GFS2_MAX_META_HEIGHT];
     39	int mp_fheight; /* find_metapath height */
     40	int mp_aheight; /* actual height (lookup height) */
     41};
     42
     43static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
     44
     45/**
     46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
     47 * @ip: the inode
     48 * @dibh: the dinode buffer
     49 * @block: the block number that was allocated
     50 * @page: The (optional) page. This is looked up if @page is NULL
     51 *
     52 * Returns: errno
     53 */
     54
     55static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
     56			       u64 block, struct page *page)
     57{
     58	struct inode *inode = &ip->i_inode;
     59
     60	if (!PageUptodate(page)) {
     61		void *kaddr = kmap(page);
     62		u64 dsize = i_size_read(inode);
     63 
     64		if (dsize > gfs2_max_stuffed_size(ip))
     65			dsize = gfs2_max_stuffed_size(ip);
     66
     67		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
     68		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
     69		kunmap(page);
     70
     71		SetPageUptodate(page);
     72	}
     73
     74	if (gfs2_is_jdata(ip)) {
     75		struct buffer_head *bh;
     76
     77		if (!page_has_buffers(page))
     78			create_empty_buffers(page, BIT(inode->i_blkbits),
     79					     BIT(BH_Uptodate));
     80
     81		bh = page_buffers(page);
     82		if (!buffer_mapped(bh))
     83			map_bh(bh, inode->i_sb, block);
     84
     85		set_buffer_uptodate(bh);
     86		gfs2_trans_add_data(ip->i_gl, bh);
     87	} else {
     88		set_page_dirty(page);
     89		gfs2_ordered_add_inode(ip);
     90	}
     91
     92	return 0;
     93}
     94
     95static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
     96{
     97	struct buffer_head *bh, *dibh;
     98	struct gfs2_dinode *di;
     99	u64 block = 0;
    100	int isdir = gfs2_is_dir(ip);
    101	int error;
    102
    103	error = gfs2_meta_inode_buffer(ip, &dibh);
    104	if (error)
    105		return error;
    106
    107	if (i_size_read(&ip->i_inode)) {
    108		/* Get a free block, fill it with the stuffed data,
    109		   and write it out to disk */
    110
    111		unsigned int n = 1;
    112		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
    113		if (error)
    114			goto out_brelse;
    115		if (isdir) {
    116			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
    117			error = gfs2_dir_get_new_buffer(ip, block, &bh);
    118			if (error)
    119				goto out_brelse;
    120			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
    121					      dibh, sizeof(struct gfs2_dinode));
    122			brelse(bh);
    123		} else {
    124			error = gfs2_unstuffer_page(ip, dibh, block, page);
    125			if (error)
    126				goto out_brelse;
    127		}
    128	}
    129
    130	/*  Set up the pointer to the new block  */
    131
    132	gfs2_trans_add_meta(ip->i_gl, dibh);
    133	di = (struct gfs2_dinode *)dibh->b_data;
    134	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
    135
    136	if (i_size_read(&ip->i_inode)) {
    137		*(__be64 *)(di + 1) = cpu_to_be64(block);
    138		gfs2_add_inode_blocks(&ip->i_inode, 1);
    139		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
    140	}
    141
    142	ip->i_height = 1;
    143	di->di_height = cpu_to_be16(1);
    144
    145out_brelse:
    146	brelse(dibh);
    147	return error;
    148}
    149
    150/**
    151 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
    152 * @ip: The GFS2 inode to unstuff
    153 *
    154 * This routine unstuffs a dinode and returns it to a "normal" state such
    155 * that the height can be grown in the traditional way.
    156 *
    157 * Returns: errno
    158 */
    159
    160int gfs2_unstuff_dinode(struct gfs2_inode *ip)
    161{
    162	struct inode *inode = &ip->i_inode;
    163	struct page *page;
    164	int error;
    165
    166	down_write(&ip->i_rw_mutex);
    167	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
    168	error = -ENOMEM;
    169	if (!page)
    170		goto out;
    171	error = __gfs2_unstuff_inode(ip, page);
    172	unlock_page(page);
    173	put_page(page);
    174out:
    175	up_write(&ip->i_rw_mutex);
    176	return error;
    177}
    178
    179/**
    180 * find_metapath - Find path through the metadata tree
    181 * @sdp: The superblock
    182 * @block: The disk block to look up
    183 * @mp: The metapath to return the result in
    184 * @height: The pre-calculated height of the metadata tree
    185 *
    186 *   This routine returns a struct metapath structure that defines a path
    187 *   through the metadata of inode "ip" to get to block "block".
    188 *
    189 *   Example:
    190 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
    191 *   filesystem with a blocksize of 4096.
    192 *
    193 *   find_metapath() would return a struct metapath structure set to:
    194 *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
    195 *
    196 *   That means that in order to get to the block containing the byte at
    197 *   offset 101342453, we would load the indirect block pointed to by pointer
    198 *   0 in the dinode.  We would then load the indirect block pointed to by
    199 *   pointer 48 in that indirect block.  We would then load the data block
    200 *   pointed to by pointer 165 in that indirect block.
    201 *
    202 *             ----------------------------------------
    203 *             | Dinode |                             |
    204 *             |        |                            4|
    205 *             |        |0 1 2 3 4 5                 9|
    206 *             |        |                            6|
    207 *             ----------------------------------------
    208 *                       |
    209 *                       |
    210 *                       V
    211 *             ----------------------------------------
    212 *             | Indirect Block                       |
    213 *             |                                     5|
    214 *             |            4 4 4 4 4 5 5            1|
    215 *             |0           5 6 7 8 9 0 1            2|
    216 *             ----------------------------------------
    217 *                                |
    218 *                                |
    219 *                                V
    220 *             ----------------------------------------
    221 *             | Indirect Block                       |
    222 *             |                         1 1 1 1 1   5|
    223 *             |                         6 6 6 6 6   1|
    224 *             |0                        3 4 5 6 7   2|
    225 *             ----------------------------------------
    226 *                                           |
    227 *                                           |
    228 *                                           V
    229 *             ----------------------------------------
    230 *             | Data block containing offset         |
    231 *             |            101342453                 |
    232 *             |                                      |
    233 *             |                                      |
    234 *             ----------------------------------------
    235 *
    236 */
    237
    238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
    239			  struct metapath *mp, unsigned int height)
    240{
    241	unsigned int i;
    242
    243	mp->mp_fheight = height;
    244	for (i = height; i--;)
    245		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
    246}
    247
    248static inline unsigned int metapath_branch_start(const struct metapath *mp)
    249{
    250	if (mp->mp_list[0] == 0)
    251		return 2;
    252	return 1;
    253}
    254
    255/**
    256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
    257 * @height: The metadata height (0 = dinode)
    258 * @mp: The metapath
    259 */
    260static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
    261{
    262	struct buffer_head *bh = mp->mp_bh[height];
    263	if (height == 0)
    264		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
    265	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
    266}
    267
    268/**
    269 * metapointer - Return pointer to start of metadata in a buffer
    270 * @height: The metadata height (0 = dinode)
    271 * @mp: The metapath
    272 *
    273 * Return a pointer to the block number of the next height of the metadata
    274 * tree given a buffer containing the pointer to the current height of the
    275 * metadata tree.
    276 */
    277
    278static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
    279{
    280	__be64 *p = metaptr1(height, mp);
    281	return p + mp->mp_list[height];
    282}
    283
    284static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
    285{
    286	const struct buffer_head *bh = mp->mp_bh[height];
    287	return (const __be64 *)(bh->b_data + bh->b_size);
    288}
    289
    290static void clone_metapath(struct metapath *clone, struct metapath *mp)
    291{
    292	unsigned int hgt;
    293
    294	*clone = *mp;
    295	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
    296		get_bh(clone->mp_bh[hgt]);
    297}
    298
    299static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
    300{
    301	const __be64 *t;
    302
    303	for (t = start; t < end; t++) {
    304		struct buffer_head *rabh;
    305
    306		if (!*t)
    307			continue;
    308
    309		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
    310		if (trylock_buffer(rabh)) {
    311			if (!buffer_uptodate(rabh)) {
    312				rabh->b_end_io = end_buffer_read_sync;
    313				submit_bh(REQ_OP_READ,
    314					  REQ_RAHEAD | REQ_META | REQ_PRIO,
    315					  rabh);
    316				continue;
    317			}
    318			unlock_buffer(rabh);
    319		}
    320		brelse(rabh);
    321	}
    322}
    323
    324static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
    325			     unsigned int x, unsigned int h)
    326{
    327	for (; x < h; x++) {
    328		__be64 *ptr = metapointer(x, mp);
    329		u64 dblock = be64_to_cpu(*ptr);
    330		int ret;
    331
    332		if (!dblock)
    333			break;
    334		ret = gfs2_meta_buffer(ip, GFS2_METATYPE_IN, dblock, &mp->mp_bh[x + 1]);
    335		if (ret)
    336			return ret;
    337	}
    338	mp->mp_aheight = x + 1;
    339	return 0;
    340}
    341
    342/**
    343 * lookup_metapath - Walk the metadata tree to a specific point
    344 * @ip: The inode
    345 * @mp: The metapath
    346 *
    347 * Assumes that the inode's buffer has already been looked up and
    348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
    349 * by find_metapath().
    350 *
    351 * If this function encounters part of the tree which has not been
    352 * allocated, it returns the current height of the tree at the point
    353 * at which it found the unallocated block. Blocks which are found are
    354 * added to the mp->mp_bh[] list.
    355 *
    356 * Returns: error
    357 */
    358
    359static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
    360{
    361	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
    362}
    363
    364/**
    365 * fillup_metapath - fill up buffers for the metadata path to a specific height
    366 * @ip: The inode
    367 * @mp: The metapath
    368 * @h: The height to which it should be mapped
    369 *
    370 * Similar to lookup_metapath, but does lookups for a range of heights
    371 *
    372 * Returns: error or the number of buffers filled
    373 */
    374
    375static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
    376{
    377	unsigned int x = 0;
    378	int ret;
    379
    380	if (h) {
    381		/* find the first buffer we need to look up. */
    382		for (x = h - 1; x > 0; x--) {
    383			if (mp->mp_bh[x])
    384				break;
    385		}
    386	}
    387	ret = __fillup_metapath(ip, mp, x, h);
    388	if (ret)
    389		return ret;
    390	return mp->mp_aheight - x - 1;
    391}
    392
    393static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
    394{
    395	sector_t factor = 1, block = 0;
    396	int hgt;
    397
    398	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
    399		if (hgt < mp->mp_aheight)
    400			block += mp->mp_list[hgt] * factor;
    401		factor *= sdp->sd_inptrs;
    402	}
    403	return block;
    404}
    405
    406static void release_metapath(struct metapath *mp)
    407{
    408	int i;
    409
    410	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
    411		if (mp->mp_bh[i] == NULL)
    412			break;
    413		brelse(mp->mp_bh[i]);
    414		mp->mp_bh[i] = NULL;
    415	}
    416}
    417
    418/**
    419 * gfs2_extent_length - Returns length of an extent of blocks
    420 * @bh: The metadata block
    421 * @ptr: Current position in @bh
    422 * @limit: Max extent length to return
    423 * @eob: Set to 1 if we hit "end of block"
    424 *
    425 * Returns: The length of the extent (minimum of one block)
    426 */
    427
    428static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
    429{
    430	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
    431	const __be64 *first = ptr;
    432	u64 d = be64_to_cpu(*ptr);
    433
    434	*eob = 0;
    435	do {
    436		ptr++;
    437		if (ptr >= end)
    438			break;
    439		d++;
    440	} while(be64_to_cpu(*ptr) == d);
    441	if (ptr >= end)
    442		*eob = 1;
    443	return ptr - first;
    444}
    445
    446enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
    447
    448/*
    449 * gfs2_metadata_walker - walk an indirect block
    450 * @mp: Metapath to indirect block
    451 * @ptrs: Number of pointers to look at
    452 *
    453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
    454 * indirect block to follow.
    455 */
    456typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
    457						   unsigned int ptrs);
    458
    459/*
    460 * gfs2_walk_metadata - walk a tree of indirect blocks
    461 * @inode: The inode
    462 * @mp: Starting point of walk
    463 * @max_len: Maximum number of blocks to walk
    464 * @walker: Called during the walk
    465 *
    466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
    467 * past the end of metadata, and a negative error code otherwise.
    468 */
    469
    470static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
    471		u64 max_len, gfs2_metadata_walker walker)
    472{
    473	struct gfs2_inode *ip = GFS2_I(inode);
    474	struct gfs2_sbd *sdp = GFS2_SB(inode);
    475	u64 factor = 1;
    476	unsigned int hgt;
    477	int ret;
    478
    479	/*
    480	 * The walk starts in the lowest allocated indirect block, which may be
    481	 * before the position indicated by @mp.  Adjust @max_len accordingly
    482	 * to avoid a short walk.
    483	 */
    484	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
    485		max_len += mp->mp_list[hgt] * factor;
    486		mp->mp_list[hgt] = 0;
    487		factor *= sdp->sd_inptrs;
    488	}
    489
    490	for (;;) {
    491		u16 start = mp->mp_list[hgt];
    492		enum walker_status status;
    493		unsigned int ptrs;
    494		u64 len;
    495
    496		/* Walk indirect block. */
    497		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
    498		len = ptrs * factor;
    499		if (len > max_len)
    500			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
    501		status = walker(mp, ptrs);
    502		switch (status) {
    503		case WALK_STOP:
    504			return 1;
    505		case WALK_FOLLOW:
    506			BUG_ON(mp->mp_aheight == mp->mp_fheight);
    507			ptrs = mp->mp_list[hgt] - start;
    508			len = ptrs * factor;
    509			break;
    510		case WALK_CONTINUE:
    511			break;
    512		}
    513		if (len >= max_len)
    514			break;
    515		max_len -= len;
    516		if (status == WALK_FOLLOW)
    517			goto fill_up_metapath;
    518
    519lower_metapath:
    520		/* Decrease height of metapath. */
    521		brelse(mp->mp_bh[hgt]);
    522		mp->mp_bh[hgt] = NULL;
    523		mp->mp_list[hgt] = 0;
    524		if (!hgt)
    525			break;
    526		hgt--;
    527		factor *= sdp->sd_inptrs;
    528
    529		/* Advance in metadata tree. */
    530		(mp->mp_list[hgt])++;
    531		if (hgt) {
    532			if (mp->mp_list[hgt] >= sdp->sd_inptrs)
    533				goto lower_metapath;
    534		} else {
    535			if (mp->mp_list[hgt] >= sdp->sd_diptrs)
    536				break;
    537		}
    538
    539fill_up_metapath:
    540		/* Increase height of metapath. */
    541		ret = fillup_metapath(ip, mp, ip->i_height - 1);
    542		if (ret < 0)
    543			return ret;
    544		hgt += ret;
    545		for (; ret; ret--)
    546			do_div(factor, sdp->sd_inptrs);
    547		mp->mp_aheight = hgt + 1;
    548	}
    549	return 0;
    550}
    551
    552static enum walker_status gfs2_hole_walker(struct metapath *mp,
    553					   unsigned int ptrs)
    554{
    555	const __be64 *start, *ptr, *end;
    556	unsigned int hgt;
    557
    558	hgt = mp->mp_aheight - 1;
    559	start = metapointer(hgt, mp);
    560	end = start + ptrs;
    561
    562	for (ptr = start; ptr < end; ptr++) {
    563		if (*ptr) {
    564			mp->mp_list[hgt] += ptr - start;
    565			if (mp->mp_aheight == mp->mp_fheight)
    566				return WALK_STOP;
    567			return WALK_FOLLOW;
    568		}
    569	}
    570	return WALK_CONTINUE;
    571}
    572
    573/**
    574 * gfs2_hole_size - figure out the size of a hole
    575 * @inode: The inode
    576 * @lblock: The logical starting block number
    577 * @len: How far to look (in blocks)
    578 * @mp: The metapath at lblock
    579 * @iomap: The iomap to store the hole size in
    580 *
    581 * This function modifies @mp.
    582 *
    583 * Returns: errno on error
    584 */
    585static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
    586			  struct metapath *mp, struct iomap *iomap)
    587{
    588	struct metapath clone;
    589	u64 hole_size;
    590	int ret;
    591
    592	clone_metapath(&clone, mp);
    593	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
    594	if (ret < 0)
    595		goto out;
    596
    597	if (ret == 1)
    598		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
    599	else
    600		hole_size = len;
    601	iomap->length = hole_size << inode->i_blkbits;
    602	ret = 0;
    603
    604out:
    605	release_metapath(&clone);
    606	return ret;
    607}
    608
    609static inline void gfs2_indirect_init(struct metapath *mp,
    610				      struct gfs2_glock *gl, unsigned int i,
    611				      unsigned offset, u64 bn)
    612{
    613	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
    614		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
    615				 sizeof(struct gfs2_dinode)));
    616	BUG_ON(i < 1);
    617	BUG_ON(mp->mp_bh[i] != NULL);
    618	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
    619	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
    620	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
    621	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
    622	ptr += offset;
    623	*ptr = cpu_to_be64(bn);
    624}
    625
    626enum alloc_state {
    627	ALLOC_DATA = 0,
    628	ALLOC_GROW_DEPTH = 1,
    629	ALLOC_GROW_HEIGHT = 2,
    630	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
    631};
    632
    633/**
    634 * __gfs2_iomap_alloc - Build a metadata tree of the requested height
    635 * @inode: The GFS2 inode
    636 * @iomap: The iomap structure
    637 * @mp: The metapath, with proper height information calculated
    638 *
    639 * In this routine we may have to alloc:
    640 *   i) Indirect blocks to grow the metadata tree height
    641 *  ii) Indirect blocks to fill in lower part of the metadata tree
    642 * iii) Data blocks
    643 *
    644 * This function is called after __gfs2_iomap_get, which works out the
    645 * total number of blocks which we need via gfs2_alloc_size.
    646 *
    647 * We then do the actual allocation asking for an extent at a time (if
    648 * enough contiguous free blocks are available, there will only be one
    649 * allocation request per call) and uses the state machine to initialise
    650 * the blocks in order.
    651 *
    652 * Right now, this function will allocate at most one indirect block
    653 * worth of data -- with a default block size of 4K, that's slightly
    654 * less than 2M.  If this limitation is ever removed to allow huge
    655 * allocations, we would probably still want to limit the iomap size we
    656 * return to avoid stalling other tasks during huge writes; the next
    657 * iomap iteration would then find the blocks already allocated.
    658 *
    659 * Returns: errno on error
    660 */
    661
    662static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
    663			      struct metapath *mp)
    664{
    665	struct gfs2_inode *ip = GFS2_I(inode);
    666	struct gfs2_sbd *sdp = GFS2_SB(inode);
    667	struct buffer_head *dibh = mp->mp_bh[0];
    668	u64 bn;
    669	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
    670	size_t dblks = iomap->length >> inode->i_blkbits;
    671	const unsigned end_of_metadata = mp->mp_fheight - 1;
    672	int ret;
    673	enum alloc_state state;
    674	__be64 *ptr;
    675	__be64 zero_bn = 0;
    676
    677	BUG_ON(mp->mp_aheight < 1);
    678	BUG_ON(dibh == NULL);
    679	BUG_ON(dblks < 1);
    680
    681	gfs2_trans_add_meta(ip->i_gl, dibh);
    682
    683	down_write(&ip->i_rw_mutex);
    684
    685	if (mp->mp_fheight == mp->mp_aheight) {
    686		/* Bottom indirect block exists */
    687		state = ALLOC_DATA;
    688	} else {
    689		/* Need to allocate indirect blocks */
    690		if (mp->mp_fheight == ip->i_height) {
    691			/* Writing into existing tree, extend tree down */
    692			iblks = mp->mp_fheight - mp->mp_aheight;
    693			state = ALLOC_GROW_DEPTH;
    694		} else {
    695			/* Building up tree height */
    696			state = ALLOC_GROW_HEIGHT;
    697			iblks = mp->mp_fheight - ip->i_height;
    698			branch_start = metapath_branch_start(mp);
    699			iblks += (mp->mp_fheight - branch_start);
    700		}
    701	}
    702
    703	/* start of the second part of the function (state machine) */
    704
    705	blks = dblks + iblks;
    706	i = mp->mp_aheight;
    707	do {
    708		n = blks - alloced;
    709		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
    710		if (ret)
    711			goto out;
    712		alloced += n;
    713		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
    714			gfs2_trans_remove_revoke(sdp, bn, n);
    715		switch (state) {
    716		/* Growing height of tree */
    717		case ALLOC_GROW_HEIGHT:
    718			if (i == 1) {
    719				ptr = (__be64 *)(dibh->b_data +
    720						 sizeof(struct gfs2_dinode));
    721				zero_bn = *ptr;
    722			}
    723			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
    724			     i++, n--)
    725				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
    726			if (i - 1 == mp->mp_fheight - ip->i_height) {
    727				i--;
    728				gfs2_buffer_copy_tail(mp->mp_bh[i],
    729						sizeof(struct gfs2_meta_header),
    730						dibh, sizeof(struct gfs2_dinode));
    731				gfs2_buffer_clear_tail(dibh,
    732						sizeof(struct gfs2_dinode) +
    733						sizeof(__be64));
    734				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
    735					sizeof(struct gfs2_meta_header));
    736				*ptr = zero_bn;
    737				state = ALLOC_GROW_DEPTH;
    738				for(i = branch_start; i < mp->mp_fheight; i++) {
    739					if (mp->mp_bh[i] == NULL)
    740						break;
    741					brelse(mp->mp_bh[i]);
    742					mp->mp_bh[i] = NULL;
    743				}
    744				i = branch_start;
    745			}
    746			if (n == 0)
    747				break;
    748			fallthrough;	/* To branching from existing tree */
    749		case ALLOC_GROW_DEPTH:
    750			if (i > 1 && i < mp->mp_fheight)
    751				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
    752			for (; i < mp->mp_fheight && n > 0; i++, n--)
    753				gfs2_indirect_init(mp, ip->i_gl, i,
    754						   mp->mp_list[i-1], bn++);
    755			if (i == mp->mp_fheight)
    756				state = ALLOC_DATA;
    757			if (n == 0)
    758				break;
    759			fallthrough;	/* To tree complete, adding data blocks */
    760		case ALLOC_DATA:
    761			BUG_ON(n > dblks);
    762			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
    763			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
    764			dblks = n;
    765			ptr = metapointer(end_of_metadata, mp);
    766			iomap->addr = bn << inode->i_blkbits;
    767			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
    768			while (n-- > 0)
    769				*ptr++ = cpu_to_be64(bn++);
    770			break;
    771		}
    772	} while (iomap->addr == IOMAP_NULL_ADDR);
    773
    774	iomap->type = IOMAP_MAPPED;
    775	iomap->length = (u64)dblks << inode->i_blkbits;
    776	ip->i_height = mp->mp_fheight;
    777	gfs2_add_inode_blocks(&ip->i_inode, alloced);
    778	gfs2_dinode_out(ip, dibh->b_data);
    779out:
    780	up_write(&ip->i_rw_mutex);
    781	return ret;
    782}
    783
    784#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
    785
    786/**
    787 * gfs2_alloc_size - Compute the maximum allocation size
    788 * @inode: The inode
    789 * @mp: The metapath
    790 * @size: Requested size in blocks
    791 *
    792 * Compute the maximum size of the next allocation at @mp.
    793 *
    794 * Returns: size in blocks
    795 */
    796static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
    797{
    798	struct gfs2_inode *ip = GFS2_I(inode);
    799	struct gfs2_sbd *sdp = GFS2_SB(inode);
    800	const __be64 *first, *ptr, *end;
    801
    802	/*
    803	 * For writes to stuffed files, this function is called twice via
    804	 * __gfs2_iomap_get, before and after unstuffing. The size we return the
    805	 * first time needs to be large enough to get the reservation and
    806	 * allocation sizes right.  The size we return the second time must
    807	 * be exact or else __gfs2_iomap_alloc won't do the right thing.
    808	 */
    809
    810	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
    811		unsigned int maxsize = mp->mp_fheight > 1 ?
    812			sdp->sd_inptrs : sdp->sd_diptrs;
    813		maxsize -= mp->mp_list[mp->mp_fheight - 1];
    814		if (size > maxsize)
    815			size = maxsize;
    816		return size;
    817	}
    818
    819	first = metapointer(ip->i_height - 1, mp);
    820	end = metaend(ip->i_height - 1, mp);
    821	if (end - first > size)
    822		end = first + size;
    823	for (ptr = first; ptr < end; ptr++) {
    824		if (*ptr)
    825			break;
    826	}
    827	return ptr - first;
    828}
    829
    830/**
    831 * __gfs2_iomap_get - Map blocks from an inode to disk blocks
    832 * @inode: The inode
    833 * @pos: Starting position in bytes
    834 * @length: Length to map, in bytes
    835 * @flags: iomap flags
    836 * @iomap: The iomap structure
    837 * @mp: The metapath
    838 *
    839 * Returns: errno
    840 */
    841static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
    842			    unsigned flags, struct iomap *iomap,
    843			    struct metapath *mp)
    844{
    845	struct gfs2_inode *ip = GFS2_I(inode);
    846	struct gfs2_sbd *sdp = GFS2_SB(inode);
    847	loff_t size = i_size_read(inode);
    848	__be64 *ptr;
    849	sector_t lblock;
    850	sector_t lblock_stop;
    851	int ret;
    852	int eob;
    853	u64 len;
    854	struct buffer_head *dibh = NULL, *bh;
    855	u8 height;
    856
    857	if (!length)
    858		return -EINVAL;
    859
    860	down_read(&ip->i_rw_mutex);
    861
    862	ret = gfs2_meta_inode_buffer(ip, &dibh);
    863	if (ret)
    864		goto unlock;
    865	mp->mp_bh[0] = dibh;
    866
    867	if (gfs2_is_stuffed(ip)) {
    868		if (flags & IOMAP_WRITE) {
    869			loff_t max_size = gfs2_max_stuffed_size(ip);
    870
    871			if (pos + length > max_size)
    872				goto unstuff;
    873			iomap->length = max_size;
    874		} else {
    875			if (pos >= size) {
    876				if (flags & IOMAP_REPORT) {
    877					ret = -ENOENT;
    878					goto unlock;
    879				} else {
    880					iomap->offset = pos;
    881					iomap->length = length;
    882					goto hole_found;
    883				}
    884			}
    885			iomap->length = size;
    886		}
    887		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
    888			      sizeof(struct gfs2_dinode);
    889		iomap->type = IOMAP_INLINE;
    890		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
    891		goto out;
    892	}
    893
    894unstuff:
    895	lblock = pos >> inode->i_blkbits;
    896	iomap->offset = lblock << inode->i_blkbits;
    897	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
    898	len = lblock_stop - lblock + 1;
    899	iomap->length = len << inode->i_blkbits;
    900
    901	height = ip->i_height;
    902	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
    903		height++;
    904	find_metapath(sdp, lblock, mp, height);
    905	if (height > ip->i_height || gfs2_is_stuffed(ip))
    906		goto do_alloc;
    907
    908	ret = lookup_metapath(ip, mp);
    909	if (ret)
    910		goto unlock;
    911
    912	if (mp->mp_aheight != ip->i_height)
    913		goto do_alloc;
    914
    915	ptr = metapointer(ip->i_height - 1, mp);
    916	if (*ptr == 0)
    917		goto do_alloc;
    918
    919	bh = mp->mp_bh[ip->i_height - 1];
    920	len = gfs2_extent_length(bh, ptr, len, &eob);
    921
    922	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
    923	iomap->length = len << inode->i_blkbits;
    924	iomap->type = IOMAP_MAPPED;
    925	iomap->flags |= IOMAP_F_MERGED;
    926	if (eob)
    927		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
    928
    929out:
    930	iomap->bdev = inode->i_sb->s_bdev;
    931unlock:
    932	up_read(&ip->i_rw_mutex);
    933	return ret;
    934
    935do_alloc:
    936	if (flags & IOMAP_REPORT) {
    937		if (pos >= size)
    938			ret = -ENOENT;
    939		else if (height == ip->i_height)
    940			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
    941		else
    942			iomap->length = size - iomap->offset;
    943	} else if (flags & IOMAP_WRITE) {
    944		u64 alloc_size;
    945
    946		if (flags & IOMAP_DIRECT)
    947			goto out;  /* (see gfs2_file_direct_write) */
    948
    949		len = gfs2_alloc_size(inode, mp, len);
    950		alloc_size = len << inode->i_blkbits;
    951		if (alloc_size < iomap->length)
    952			iomap->length = alloc_size;
    953	} else {
    954		if (pos < size && height == ip->i_height)
    955			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
    956	}
    957hole_found:
    958	iomap->addr = IOMAP_NULL_ADDR;
    959	iomap->type = IOMAP_HOLE;
    960	goto out;
    961}
    962
    963static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
    964				   unsigned len)
    965{
    966	unsigned int blockmask = i_blocksize(inode) - 1;
    967	struct gfs2_sbd *sdp = GFS2_SB(inode);
    968	unsigned int blocks;
    969
    970	blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
    971	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
    972}
    973
    974static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
    975				 unsigned copied, struct page *page)
    976{
    977	struct gfs2_trans *tr = current->journal_info;
    978	struct gfs2_inode *ip = GFS2_I(inode);
    979	struct gfs2_sbd *sdp = GFS2_SB(inode);
    980
    981	if (page && !gfs2_is_stuffed(ip))
    982		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
    983
    984	if (tr->tr_num_buf_new)
    985		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
    986
    987	gfs2_trans_end(sdp);
    988}
    989
    990static const struct iomap_page_ops gfs2_iomap_page_ops = {
    991	.page_prepare = gfs2_iomap_page_prepare,
    992	.page_done = gfs2_iomap_page_done,
    993};
    994
    995static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
    996				  loff_t length, unsigned flags,
    997				  struct iomap *iomap,
    998				  struct metapath *mp)
    999{
   1000	struct gfs2_inode *ip = GFS2_I(inode);
   1001	struct gfs2_sbd *sdp = GFS2_SB(inode);
   1002	bool unstuff;
   1003	int ret;
   1004
   1005	unstuff = gfs2_is_stuffed(ip) &&
   1006		  pos + length > gfs2_max_stuffed_size(ip);
   1007
   1008	if (unstuff || iomap->type == IOMAP_HOLE) {
   1009		unsigned int data_blocks, ind_blocks;
   1010		struct gfs2_alloc_parms ap = {};
   1011		unsigned int rblocks;
   1012		struct gfs2_trans *tr;
   1013
   1014		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
   1015				       &ind_blocks);
   1016		ap.target = data_blocks + ind_blocks;
   1017		ret = gfs2_quota_lock_check(ip, &ap);
   1018		if (ret)
   1019			return ret;
   1020
   1021		ret = gfs2_inplace_reserve(ip, &ap);
   1022		if (ret)
   1023			goto out_qunlock;
   1024
   1025		rblocks = RES_DINODE + ind_blocks;
   1026		if (gfs2_is_jdata(ip))
   1027			rblocks += data_blocks;
   1028		if (ind_blocks || data_blocks)
   1029			rblocks += RES_STATFS + RES_QUOTA;
   1030		if (inode == sdp->sd_rindex)
   1031			rblocks += 2 * RES_STATFS;
   1032		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
   1033
   1034		ret = gfs2_trans_begin(sdp, rblocks,
   1035				       iomap->length >> inode->i_blkbits);
   1036		if (ret)
   1037			goto out_trans_fail;
   1038
   1039		if (unstuff) {
   1040			ret = gfs2_unstuff_dinode(ip);
   1041			if (ret)
   1042				goto out_trans_end;
   1043			release_metapath(mp);
   1044			ret = __gfs2_iomap_get(inode, iomap->offset,
   1045					       iomap->length, flags, iomap, mp);
   1046			if (ret)
   1047				goto out_trans_end;
   1048		}
   1049
   1050		if (iomap->type == IOMAP_HOLE) {
   1051			ret = __gfs2_iomap_alloc(inode, iomap, mp);
   1052			if (ret) {
   1053				gfs2_trans_end(sdp);
   1054				gfs2_inplace_release(ip);
   1055				punch_hole(ip, iomap->offset, iomap->length);
   1056				goto out_qunlock;
   1057			}
   1058		}
   1059
   1060		tr = current->journal_info;
   1061		if (tr->tr_num_buf_new)
   1062			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
   1063
   1064		gfs2_trans_end(sdp);
   1065	}
   1066
   1067	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
   1068		iomap->page_ops = &gfs2_iomap_page_ops;
   1069	return 0;
   1070
   1071out_trans_end:
   1072	gfs2_trans_end(sdp);
   1073out_trans_fail:
   1074	gfs2_inplace_release(ip);
   1075out_qunlock:
   1076	gfs2_quota_unlock(ip);
   1077	return ret;
   1078}
   1079
   1080static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
   1081			    unsigned flags, struct iomap *iomap,
   1082			    struct iomap *srcmap)
   1083{
   1084	struct gfs2_inode *ip = GFS2_I(inode);
   1085	struct metapath mp = { .mp_aheight = 1, };
   1086	int ret;
   1087
   1088	if (gfs2_is_jdata(ip))
   1089		iomap->flags |= IOMAP_F_BUFFER_HEAD;
   1090
   1091	trace_gfs2_iomap_start(ip, pos, length, flags);
   1092	ret = __gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
   1093	if (ret)
   1094		goto out_unlock;
   1095
   1096	switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
   1097	case IOMAP_WRITE:
   1098		if (flags & IOMAP_DIRECT) {
   1099			/*
   1100			 * Silently fall back to buffered I/O for stuffed files
   1101			 * or if we've got a hole (see gfs2_file_direct_write).
   1102			 */
   1103			if (iomap->type != IOMAP_MAPPED)
   1104				ret = -ENOTBLK;
   1105			goto out_unlock;
   1106		}
   1107		break;
   1108	case IOMAP_ZERO:
   1109		if (iomap->type == IOMAP_HOLE)
   1110			goto out_unlock;
   1111		break;
   1112	default:
   1113		goto out_unlock;
   1114	}
   1115
   1116	ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
   1117
   1118out_unlock:
   1119	release_metapath(&mp);
   1120	trace_gfs2_iomap_end(ip, iomap, ret);
   1121	return ret;
   1122}
   1123
   1124static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
   1125			  ssize_t written, unsigned flags, struct iomap *iomap)
   1126{
   1127	struct gfs2_inode *ip = GFS2_I(inode);
   1128	struct gfs2_sbd *sdp = GFS2_SB(inode);
   1129
   1130	switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
   1131	case IOMAP_WRITE:
   1132		if (flags & IOMAP_DIRECT)
   1133			return 0;
   1134		break;
   1135	case IOMAP_ZERO:
   1136		 if (iomap->type == IOMAP_HOLE)
   1137			 return 0;
   1138		 break;
   1139	default:
   1140		 return 0;
   1141	}
   1142
   1143	if (!gfs2_is_stuffed(ip))
   1144		gfs2_ordered_add_inode(ip);
   1145
   1146	if (inode == sdp->sd_rindex)
   1147		adjust_fs_space(inode);
   1148
   1149	gfs2_inplace_release(ip);
   1150
   1151	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
   1152		gfs2_quota_unlock(ip);
   1153
   1154	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
   1155		/* Deallocate blocks that were just allocated. */
   1156		loff_t hstart = round_up(pos + written, i_blocksize(inode));
   1157		loff_t hend = iomap->offset + iomap->length;
   1158
   1159		if (hstart < hend) {
   1160			truncate_pagecache_range(inode, hstart, hend - 1);
   1161			punch_hole(ip, hstart, hend - hstart);
   1162		}
   1163	}
   1164
   1165	if (unlikely(!written))
   1166		return 0;
   1167
   1168	if (iomap->flags & IOMAP_F_SIZE_CHANGED)
   1169		mark_inode_dirty(inode);
   1170	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
   1171	return 0;
   1172}
   1173
   1174const struct iomap_ops gfs2_iomap_ops = {
   1175	.iomap_begin = gfs2_iomap_begin,
   1176	.iomap_end = gfs2_iomap_end,
   1177};
   1178
   1179/**
   1180 * gfs2_block_map - Map one or more blocks of an inode to a disk block
   1181 * @inode: The inode
   1182 * @lblock: The logical block number
   1183 * @bh_map: The bh to be mapped
   1184 * @create: True if its ok to alloc blocks to satify the request
   1185 *
   1186 * The size of the requested mapping is defined in bh_map->b_size.
   1187 *
   1188 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
   1189 * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
   1190 * bh_map->b_size to indicate the size of the mapping when @lblock and
   1191 * successive blocks are mapped, up to the requested size.
   1192 *
   1193 * Sets buffer_boundary() if a read of metadata will be required
   1194 * before the next block can be mapped. Sets buffer_new() if new
   1195 * blocks were allocated.
   1196 *
   1197 * Returns: errno
   1198 */
   1199
   1200int gfs2_block_map(struct inode *inode, sector_t lblock,
   1201		   struct buffer_head *bh_map, int create)
   1202{
   1203	struct gfs2_inode *ip = GFS2_I(inode);
   1204	loff_t pos = (loff_t)lblock << inode->i_blkbits;
   1205	loff_t length = bh_map->b_size;
   1206	struct iomap iomap = { };
   1207	int ret;
   1208
   1209	clear_buffer_mapped(bh_map);
   1210	clear_buffer_new(bh_map);
   1211	clear_buffer_boundary(bh_map);
   1212	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
   1213
   1214	if (!create)
   1215		ret = gfs2_iomap_get(inode, pos, length, &iomap);
   1216	else
   1217		ret = gfs2_iomap_alloc(inode, pos, length, &iomap);
   1218	if (ret)
   1219		goto out;
   1220
   1221	if (iomap.length > bh_map->b_size) {
   1222		iomap.length = bh_map->b_size;
   1223		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
   1224	}
   1225	if (iomap.addr != IOMAP_NULL_ADDR)
   1226		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
   1227	bh_map->b_size = iomap.length;
   1228	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
   1229		set_buffer_boundary(bh_map);
   1230	if (iomap.flags & IOMAP_F_NEW)
   1231		set_buffer_new(bh_map);
   1232
   1233out:
   1234	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
   1235	return ret;
   1236}
   1237
   1238int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
   1239		    unsigned int *extlen)
   1240{
   1241	unsigned int blkbits = inode->i_blkbits;
   1242	struct iomap iomap = { };
   1243	unsigned int len;
   1244	int ret;
   1245
   1246	ret = gfs2_iomap_get(inode, lblock << blkbits, *extlen << blkbits,
   1247			     &iomap);
   1248	if (ret)
   1249		return ret;
   1250	if (iomap.type != IOMAP_MAPPED)
   1251		return -EIO;
   1252	*dblock = iomap.addr >> blkbits;
   1253	len = iomap.length >> blkbits;
   1254	if (len < *extlen)
   1255		*extlen = len;
   1256	return 0;
   1257}
   1258
   1259int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
   1260		      unsigned int *extlen, bool *new)
   1261{
   1262	unsigned int blkbits = inode->i_blkbits;
   1263	struct iomap iomap = { };
   1264	unsigned int len;
   1265	int ret;
   1266
   1267	ret = gfs2_iomap_alloc(inode, lblock << blkbits, *extlen << blkbits,
   1268			       &iomap);
   1269	if (ret)
   1270		return ret;
   1271	if (iomap.type != IOMAP_MAPPED)
   1272		return -EIO;
   1273	*dblock = iomap.addr >> blkbits;
   1274	len = iomap.length >> blkbits;
   1275	if (len < *extlen)
   1276		*extlen = len;
   1277	*new = iomap.flags & IOMAP_F_NEW;
   1278	return 0;
   1279}
   1280
   1281/*
   1282 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
   1283 * uses iomap write to perform its actions, which begin their own transactions
   1284 * (iomap_begin, page_prepare, etc.)
   1285 */
   1286static int gfs2_block_zero_range(struct inode *inode, loff_t from,
   1287				 unsigned int length)
   1288{
   1289	BUG_ON(current->journal_info);
   1290	return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
   1291}
   1292
   1293#define GFS2_JTRUNC_REVOKES 8192
   1294
   1295/**
   1296 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
   1297 * @inode: The inode being truncated
   1298 * @oldsize: The original (larger) size
   1299 * @newsize: The new smaller size
   1300 *
   1301 * With jdata files, we have to journal a revoke for each block which is
   1302 * truncated. As a result, we need to split this into separate transactions
   1303 * if the number of pages being truncated gets too large.
   1304 */
   1305
   1306static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
   1307{
   1308	struct gfs2_sbd *sdp = GFS2_SB(inode);
   1309	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
   1310	u64 chunk;
   1311	int error;
   1312
   1313	while (oldsize != newsize) {
   1314		struct gfs2_trans *tr;
   1315		unsigned int offs;
   1316
   1317		chunk = oldsize - newsize;
   1318		if (chunk > max_chunk)
   1319			chunk = max_chunk;
   1320
   1321		offs = oldsize & ~PAGE_MASK;
   1322		if (offs && chunk > PAGE_SIZE)
   1323			chunk = offs + ((chunk - offs) & PAGE_MASK);
   1324
   1325		truncate_pagecache(inode, oldsize - chunk);
   1326		oldsize -= chunk;
   1327
   1328		tr = current->journal_info;
   1329		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
   1330			continue;
   1331
   1332		gfs2_trans_end(sdp);
   1333		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
   1334		if (error)
   1335			return error;
   1336	}
   1337
   1338	return 0;
   1339}
   1340
   1341static int trunc_start(struct inode *inode, u64 newsize)
   1342{
   1343	struct gfs2_inode *ip = GFS2_I(inode);
   1344	struct gfs2_sbd *sdp = GFS2_SB(inode);
   1345	struct buffer_head *dibh = NULL;
   1346	int journaled = gfs2_is_jdata(ip);
   1347	u64 oldsize = inode->i_size;
   1348	int error;
   1349
   1350	if (!gfs2_is_stuffed(ip)) {
   1351		unsigned int blocksize = i_blocksize(inode);
   1352		unsigned int offs = newsize & (blocksize - 1);
   1353		if (offs) {
   1354			error = gfs2_block_zero_range(inode, newsize,
   1355						      blocksize - offs);
   1356			if (error)
   1357				return error;
   1358		}
   1359	}
   1360	if (journaled)
   1361		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
   1362	else
   1363		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
   1364	if (error)
   1365		return error;
   1366
   1367	error = gfs2_meta_inode_buffer(ip, &dibh);
   1368	if (error)
   1369		goto out;
   1370
   1371	gfs2_trans_add_meta(ip->i_gl, dibh);
   1372
   1373	if (gfs2_is_stuffed(ip))
   1374		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
   1375	else
   1376		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
   1377
   1378	i_size_write(inode, newsize);
   1379	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
   1380	gfs2_dinode_out(ip, dibh->b_data);
   1381
   1382	if (journaled)
   1383		error = gfs2_journaled_truncate(inode, oldsize, newsize);
   1384	else
   1385		truncate_pagecache(inode, newsize);
   1386
   1387out:
   1388	brelse(dibh);
   1389	if (current->journal_info)
   1390		gfs2_trans_end(sdp);
   1391	return error;
   1392}
   1393
   1394int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
   1395		   struct iomap *iomap)
   1396{
   1397	struct metapath mp = { .mp_aheight = 1, };
   1398	int ret;
   1399
   1400	ret = __gfs2_iomap_get(inode, pos, length, 0, iomap, &mp);
   1401	release_metapath(&mp);
   1402	return ret;
   1403}
   1404
   1405int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
   1406		     struct iomap *iomap)
   1407{
   1408	struct metapath mp = { .mp_aheight = 1, };
   1409	int ret;
   1410
   1411	ret = __gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
   1412	if (!ret && iomap->type == IOMAP_HOLE)
   1413		ret = __gfs2_iomap_alloc(inode, iomap, &mp);
   1414	release_metapath(&mp);
   1415	return ret;
   1416}
   1417
   1418/**
   1419 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
   1420 * @ip: inode
   1421 * @rd_gh: holder of resource group glock
   1422 * @bh: buffer head to sweep
   1423 * @start: starting point in bh
   1424 * @end: end point in bh
   1425 * @meta: true if bh points to metadata (rather than data)
   1426 * @btotal: place to keep count of total blocks freed
   1427 *
   1428 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
   1429 * free, and free them all. However, we do it one rgrp at a time. If this
   1430 * block has references to multiple rgrps, we break it into individual
   1431 * transactions. This allows other processes to use the rgrps while we're
   1432 * focused on a single one, for better concurrency / performance.
   1433 * At every transaction boundary, we rewrite the inode into the journal.
   1434 * That way the bitmaps are kept consistent with the inode and we can recover
   1435 * if we're interrupted by power-outages.
   1436 *
   1437 * Returns: 0, or return code if an error occurred.
   1438 *          *btotal has the total number of blocks freed
   1439 */
   1440static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
   1441			      struct buffer_head *bh, __be64 *start, __be64 *end,
   1442			      bool meta, u32 *btotal)
   1443{
   1444	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
   1445	struct gfs2_rgrpd *rgd;
   1446	struct gfs2_trans *tr;
   1447	__be64 *p;
   1448	int blks_outside_rgrp;
   1449	u64 bn, bstart, isize_blks;
   1450	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
   1451	int ret = 0;
   1452	bool buf_in_tr = false; /* buffer was added to transaction */
   1453
   1454more_rgrps:
   1455	rgd = NULL;
   1456	if (gfs2_holder_initialized(rd_gh)) {
   1457		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
   1458		gfs2_assert_withdraw(sdp,
   1459			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
   1460	}
   1461	blks_outside_rgrp = 0;
   1462	bstart = 0;
   1463	blen = 0;
   1464
   1465	for (p = start; p < end; p++) {
   1466		if (!*p)
   1467			continue;
   1468		bn = be64_to_cpu(*p);
   1469
   1470		if (rgd) {
   1471			if (!rgrp_contains_block(rgd, bn)) {
   1472				blks_outside_rgrp++;
   1473				continue;
   1474			}
   1475		} else {
   1476			rgd = gfs2_blk2rgrpd(sdp, bn, true);
   1477			if (unlikely(!rgd)) {
   1478				ret = -EIO;
   1479				goto out;
   1480			}
   1481			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
   1482						 LM_FLAG_NODE_SCOPE, rd_gh);
   1483			if (ret)
   1484				goto out;
   1485
   1486			/* Must be done with the rgrp glock held: */
   1487			if (gfs2_rs_active(&ip->i_res) &&
   1488			    rgd == ip->i_res.rs_rgd)
   1489				gfs2_rs_deltree(&ip->i_res);
   1490		}
   1491
   1492		/* The size of our transactions will be unknown until we
   1493		   actually process all the metadata blocks that relate to
   1494		   the rgrp. So we estimate. We know it can't be more than
   1495		   the dinode's i_blocks and we don't want to exceed the
   1496		   journal flush threshold, sd_log_thresh2. */
   1497		if (current->journal_info == NULL) {
   1498			unsigned int jblocks_rqsted, revokes;
   1499
   1500			jblocks_rqsted = rgd->rd_length + RES_DINODE +
   1501				RES_INDIRECT;
   1502			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
   1503			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
   1504				jblocks_rqsted +=
   1505					atomic_read(&sdp->sd_log_thresh2);
   1506			else
   1507				jblocks_rqsted += isize_blks;
   1508			revokes = jblocks_rqsted;
   1509			if (meta)
   1510				revokes += end - start;
   1511			else if (ip->i_depth)
   1512				revokes += sdp->sd_inptrs;
   1513			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
   1514			if (ret)
   1515				goto out_unlock;
   1516			down_write(&ip->i_rw_mutex);
   1517		}
   1518		/* check if we will exceed the transaction blocks requested */
   1519		tr = current->journal_info;
   1520		if (tr->tr_num_buf_new + RES_STATFS +
   1521		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
   1522			/* We set blks_outside_rgrp to ensure the loop will
   1523			   be repeated for the same rgrp, but with a new
   1524			   transaction. */
   1525			blks_outside_rgrp++;
   1526			/* This next part is tricky. If the buffer was added
   1527			   to the transaction, we've already set some block
   1528			   pointers to 0, so we better follow through and free
   1529			   them, or we will introduce corruption (so break).
   1530			   This may be impossible, or at least rare, but I
   1531			   decided to cover the case regardless.
   1532
   1533			   If the buffer was not added to the transaction
   1534			   (this call), doing so would exceed our transaction
   1535			   size, so we need to end the transaction and start a
   1536			   new one (so goto). */
   1537
   1538			if (buf_in_tr)
   1539				break;
   1540			goto out_unlock;
   1541		}
   1542
   1543		gfs2_trans_add_meta(ip->i_gl, bh);
   1544		buf_in_tr = true;
   1545		*p = 0;
   1546		if (bstart + blen == bn) {
   1547			blen++;
   1548			continue;
   1549		}
   1550		if (bstart) {
   1551			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
   1552			(*btotal) += blen;
   1553			gfs2_add_inode_blocks(&ip->i_inode, -blen);
   1554		}
   1555		bstart = bn;
   1556		blen = 1;
   1557	}
   1558	if (bstart) {
   1559		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
   1560		(*btotal) += blen;
   1561		gfs2_add_inode_blocks(&ip->i_inode, -blen);
   1562	}
   1563out_unlock:
   1564	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
   1565					    outside the rgrp we just processed,
   1566					    do it all over again. */
   1567		if (current->journal_info) {
   1568			struct buffer_head *dibh;
   1569
   1570			ret = gfs2_meta_inode_buffer(ip, &dibh);
   1571			if (ret)
   1572				goto out;
   1573
   1574			/* Every transaction boundary, we rewrite the dinode
   1575			   to keep its di_blocks current in case of failure. */
   1576			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
   1577				current_time(&ip->i_inode);
   1578			gfs2_trans_add_meta(ip->i_gl, dibh);
   1579			gfs2_dinode_out(ip, dibh->b_data);
   1580			brelse(dibh);
   1581			up_write(&ip->i_rw_mutex);
   1582			gfs2_trans_end(sdp);
   1583			buf_in_tr = false;
   1584		}
   1585		gfs2_glock_dq_uninit(rd_gh);
   1586		cond_resched();
   1587		goto more_rgrps;
   1588	}
   1589out:
   1590	return ret;
   1591}
   1592
   1593static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
   1594{
   1595	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
   1596		return false;
   1597	return true;
   1598}
   1599
   1600/**
   1601 * find_nonnull_ptr - find a non-null pointer given a metapath and height
   1602 * @sdp: The superblock
   1603 * @mp: starting metapath
   1604 * @h: desired height to search
   1605 * @end_list: See punch_hole().
   1606 * @end_aligned: See punch_hole().
   1607 *
   1608 * Assumes the metapath is valid (with buffers) out to height h.
   1609 * Returns: true if a non-null pointer was found in the metapath buffer
   1610 *          false if all remaining pointers are NULL in the buffer
   1611 */
   1612static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
   1613			     unsigned int h,
   1614			     __u16 *end_list, unsigned int end_aligned)
   1615{
   1616	struct buffer_head *bh = mp->mp_bh[h];
   1617	__be64 *first, *ptr, *end;
   1618
   1619	first = metaptr1(h, mp);
   1620	ptr = first + mp->mp_list[h];
   1621	end = (__be64 *)(bh->b_data + bh->b_size);
   1622	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
   1623		bool keep_end = h < end_aligned;
   1624		end = first + end_list[h] + keep_end;
   1625	}
   1626
   1627	while (ptr < end) {
   1628		if (*ptr) { /* if we have a non-null pointer */
   1629			mp->mp_list[h] = ptr - first;
   1630			h++;
   1631			if (h < GFS2_MAX_META_HEIGHT)
   1632				mp->mp_list[h] = 0;
   1633			return true;
   1634		}
   1635		ptr++;
   1636	}
   1637	return false;
   1638}
   1639
   1640enum dealloc_states {
   1641	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
   1642	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
   1643	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
   1644	DEALLOC_DONE = 3,       /* process complete */
   1645};
   1646
   1647static inline void
   1648metapointer_range(struct metapath *mp, int height,
   1649		  __u16 *start_list, unsigned int start_aligned,
   1650		  __u16 *end_list, unsigned int end_aligned,
   1651		  __be64 **start, __be64 **end)
   1652{
   1653	struct buffer_head *bh = mp->mp_bh[height];
   1654	__be64 *first;
   1655
   1656	first = metaptr1(height, mp);
   1657	*start = first;
   1658	if (mp_eq_to_hgt(mp, start_list, height)) {
   1659		bool keep_start = height < start_aligned;
   1660		*start = first + start_list[height] + keep_start;
   1661	}
   1662	*end = (__be64 *)(bh->b_data + bh->b_size);
   1663	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
   1664		bool keep_end = height < end_aligned;
   1665		*end = first + end_list[height] + keep_end;
   1666	}
   1667}
   1668
   1669static inline bool walk_done(struct gfs2_sbd *sdp,
   1670			     struct metapath *mp, int height,
   1671			     __u16 *end_list, unsigned int end_aligned)
   1672{
   1673	__u16 end;
   1674
   1675	if (end_list) {
   1676		bool keep_end = height < end_aligned;
   1677		if (!mp_eq_to_hgt(mp, end_list, height))
   1678			return false;
   1679		end = end_list[height] + keep_end;
   1680	} else
   1681		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
   1682	return mp->mp_list[height] >= end;
   1683}
   1684
   1685/**
   1686 * punch_hole - deallocate blocks in a file
   1687 * @ip: inode to truncate
   1688 * @offset: the start of the hole
   1689 * @length: the size of the hole (or 0 for truncate)
   1690 *
   1691 * Punch a hole into a file or truncate a file at a given position.  This
   1692 * function operates in whole blocks (@offset and @length are rounded
   1693 * accordingly); partially filled blocks must be cleared otherwise.
   1694 *
   1695 * This function works from the bottom up, and from the right to the left. In
   1696 * other words, it strips off the highest layer (data) before stripping any of
   1697 * the metadata. Doing it this way is best in case the operation is interrupted
   1698 * by power failure, etc.  The dinode is rewritten in every transaction to
   1699 * guarantee integrity.
   1700 */
   1701static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
   1702{
   1703	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
   1704	u64 maxsize = sdp->sd_heightsize[ip->i_height];
   1705	struct metapath mp = {};
   1706	struct buffer_head *dibh, *bh;
   1707	struct gfs2_holder rd_gh;
   1708	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
   1709	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
   1710	__u16 start_list[GFS2_MAX_META_HEIGHT];
   1711	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
   1712	unsigned int start_aligned, end_aligned;
   1713	unsigned int strip_h = ip->i_height - 1;
   1714	u32 btotal = 0;
   1715	int ret, state;
   1716	int mp_h; /* metapath buffers are read in to this height */
   1717	u64 prev_bnr = 0;
   1718	__be64 *start, *end;
   1719
   1720	if (offset >= maxsize) {
   1721		/*
   1722		 * The starting point lies beyond the allocated meta-data;
   1723		 * there are no blocks do deallocate.
   1724		 */
   1725		return 0;
   1726	}
   1727
   1728	/*
   1729	 * The start position of the hole is defined by lblock, start_list, and
   1730	 * start_aligned.  The end position of the hole is defined by lend,
   1731	 * end_list, and end_aligned.
   1732	 *
   1733	 * start_aligned and end_aligned define down to which height the start
   1734	 * and end positions are aligned to the metadata tree (i.e., the
   1735	 * position is a multiple of the metadata granularity at the height
   1736	 * above).  This determines at which heights additional meta pointers
   1737	 * needs to be preserved for the remaining data.
   1738	 */
   1739
   1740	if (length) {
   1741		u64 end_offset = offset + length;
   1742		u64 lend;
   1743
   1744		/*
   1745		 * Clip the end at the maximum file size for the given height:
   1746		 * that's how far the metadata goes; files bigger than that
   1747		 * will have additional layers of indirection.
   1748		 */
   1749		if (end_offset > maxsize)
   1750			end_offset = maxsize;
   1751		lend = end_offset >> bsize_shift;
   1752
   1753		if (lblock >= lend)
   1754			return 0;
   1755
   1756		find_metapath(sdp, lend, &mp, ip->i_height);
   1757		end_list = __end_list;
   1758		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
   1759
   1760		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
   1761			if (end_list[mp_h])
   1762				break;
   1763		}
   1764		end_aligned = mp_h;
   1765	}
   1766
   1767	find_metapath(sdp, lblock, &mp, ip->i_height);
   1768	memcpy(start_list, mp.mp_list, sizeof(start_list));
   1769
   1770	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
   1771		if (start_list[mp_h])
   1772			break;
   1773	}
   1774	start_aligned = mp_h;
   1775
   1776	ret = gfs2_meta_inode_buffer(ip, &dibh);
   1777	if (ret)
   1778		return ret;
   1779
   1780	mp.mp_bh[0] = dibh;
   1781	ret = lookup_metapath(ip, &mp);
   1782	if (ret)
   1783		goto out_metapath;
   1784
   1785	/* issue read-ahead on metadata */
   1786	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
   1787		metapointer_range(&mp, mp_h, start_list, start_aligned,
   1788				  end_list, end_aligned, &start, &end);
   1789		gfs2_metapath_ra(ip->i_gl, start, end);
   1790	}
   1791
   1792	if (mp.mp_aheight == ip->i_height)
   1793		state = DEALLOC_MP_FULL; /* We have a complete metapath */
   1794	else
   1795		state = DEALLOC_FILL_MP; /* deal with partial metapath */
   1796
   1797	ret = gfs2_rindex_update(sdp);
   1798	if (ret)
   1799		goto out_metapath;
   1800
   1801	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
   1802	if (ret)
   1803		goto out_metapath;
   1804	gfs2_holder_mark_uninitialized(&rd_gh);
   1805
   1806	mp_h = strip_h;
   1807
   1808	while (state != DEALLOC_DONE) {
   1809		switch (state) {
   1810		/* Truncate a full metapath at the given strip height.
   1811		 * Note that strip_h == mp_h in order to be in this state. */
   1812		case DEALLOC_MP_FULL:
   1813			bh = mp.mp_bh[mp_h];
   1814			gfs2_assert_withdraw(sdp, bh);
   1815			if (gfs2_assert_withdraw(sdp,
   1816						 prev_bnr != bh->b_blocknr)) {
   1817				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
   1818					 "s_h:%u, mp_h:%u\n",
   1819				       (unsigned long long)ip->i_no_addr,
   1820				       prev_bnr, ip->i_height, strip_h, mp_h);
   1821			}
   1822			prev_bnr = bh->b_blocknr;
   1823
   1824			if (gfs2_metatype_check(sdp, bh,
   1825						(mp_h ? GFS2_METATYPE_IN :
   1826							GFS2_METATYPE_DI))) {
   1827				ret = -EIO;
   1828				goto out;
   1829			}
   1830
   1831			/*
   1832			 * Below, passing end_aligned as 0 gives us the
   1833			 * metapointer range excluding the end point: the end
   1834			 * point is the first metapath we must not deallocate!
   1835			 */
   1836
   1837			metapointer_range(&mp, mp_h, start_list, start_aligned,
   1838					  end_list, 0 /* end_aligned */,
   1839					  &start, &end);
   1840			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
   1841						 start, end,
   1842						 mp_h != ip->i_height - 1,
   1843						 &btotal);
   1844
   1845			/* If we hit an error or just swept dinode buffer,
   1846			   just exit. */
   1847			if (ret || !mp_h) {
   1848				state = DEALLOC_DONE;
   1849				break;
   1850			}
   1851			state = DEALLOC_MP_LOWER;
   1852			break;
   1853
   1854		/* lower the metapath strip height */
   1855		case DEALLOC_MP_LOWER:
   1856			/* We're done with the current buffer, so release it,
   1857			   unless it's the dinode buffer. Then back up to the
   1858			   previous pointer. */
   1859			if (mp_h) {
   1860				brelse(mp.mp_bh[mp_h]);
   1861				mp.mp_bh[mp_h] = NULL;
   1862			}
   1863			/* If we can't get any lower in height, we've stripped
   1864			   off all we can. Next step is to back up and start
   1865			   stripping the previous level of metadata. */
   1866			if (mp_h == 0) {
   1867				strip_h--;
   1868				memcpy(mp.mp_list, start_list, sizeof(start_list));
   1869				mp_h = strip_h;
   1870				state = DEALLOC_FILL_MP;
   1871				break;
   1872			}
   1873			mp.mp_list[mp_h] = 0;
   1874			mp_h--; /* search one metadata height down */
   1875			mp.mp_list[mp_h]++;
   1876			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
   1877				break;
   1878			/* Here we've found a part of the metapath that is not
   1879			 * allocated. We need to search at that height for the
   1880			 * next non-null pointer. */
   1881			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
   1882				state = DEALLOC_FILL_MP;
   1883				mp_h++;
   1884			}
   1885			/* No more non-null pointers at this height. Back up
   1886			   to the previous height and try again. */
   1887			break; /* loop around in the same state */
   1888
   1889		/* Fill the metapath with buffers to the given height. */
   1890		case DEALLOC_FILL_MP:
   1891			/* Fill the buffers out to the current height. */
   1892			ret = fillup_metapath(ip, &mp, mp_h);
   1893			if (ret < 0)
   1894				goto out;
   1895
   1896			/* On the first pass, issue read-ahead on metadata. */
   1897			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
   1898				unsigned int height = mp.mp_aheight - 1;
   1899
   1900				/* No read-ahead for data blocks. */
   1901				if (mp.mp_aheight - 1 == strip_h)
   1902					height--;
   1903
   1904				for (; height >= mp.mp_aheight - ret; height--) {
   1905					metapointer_range(&mp, height,
   1906							  start_list, start_aligned,
   1907							  end_list, end_aligned,
   1908							  &start, &end);
   1909					gfs2_metapath_ra(ip->i_gl, start, end);
   1910				}
   1911			}
   1912
   1913			/* If buffers found for the entire strip height */
   1914			if (mp.mp_aheight - 1 == strip_h) {
   1915				state = DEALLOC_MP_FULL;
   1916				break;
   1917			}
   1918			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
   1919				mp_h = mp.mp_aheight - 1;
   1920
   1921			/* If we find a non-null block pointer, crawl a bit
   1922			   higher up in the metapath and try again, otherwise
   1923			   we need to look lower for a new starting point. */
   1924			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
   1925				mp_h++;
   1926			else
   1927				state = DEALLOC_MP_LOWER;
   1928			break;
   1929		}
   1930	}
   1931
   1932	if (btotal) {
   1933		if (current->journal_info == NULL) {
   1934			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
   1935					       RES_QUOTA, 0);
   1936			if (ret)
   1937				goto out;
   1938			down_write(&ip->i_rw_mutex);
   1939		}
   1940		gfs2_statfs_change(sdp, 0, +btotal, 0);
   1941		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
   1942				  ip->i_inode.i_gid);
   1943		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
   1944		gfs2_trans_add_meta(ip->i_gl, dibh);
   1945		gfs2_dinode_out(ip, dibh->b_data);
   1946		up_write(&ip->i_rw_mutex);
   1947		gfs2_trans_end(sdp);
   1948	}
   1949
   1950out:
   1951	if (gfs2_holder_initialized(&rd_gh))
   1952		gfs2_glock_dq_uninit(&rd_gh);
   1953	if (current->journal_info) {
   1954		up_write(&ip->i_rw_mutex);
   1955		gfs2_trans_end(sdp);
   1956		cond_resched();
   1957	}
   1958	gfs2_quota_unhold(ip);
   1959out_metapath:
   1960	release_metapath(&mp);
   1961	return ret;
   1962}
   1963
   1964static int trunc_end(struct gfs2_inode *ip)
   1965{
   1966	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
   1967	struct buffer_head *dibh;
   1968	int error;
   1969
   1970	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
   1971	if (error)
   1972		return error;
   1973
   1974	down_write(&ip->i_rw_mutex);
   1975
   1976	error = gfs2_meta_inode_buffer(ip, &dibh);
   1977	if (error)
   1978		goto out;
   1979
   1980	if (!i_size_read(&ip->i_inode)) {
   1981		ip->i_height = 0;
   1982		ip->i_goal = ip->i_no_addr;
   1983		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
   1984		gfs2_ordered_del_inode(ip);
   1985	}
   1986	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
   1987	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
   1988
   1989	gfs2_trans_add_meta(ip->i_gl, dibh);
   1990	gfs2_dinode_out(ip, dibh->b_data);
   1991	brelse(dibh);
   1992
   1993out:
   1994	up_write(&ip->i_rw_mutex);
   1995	gfs2_trans_end(sdp);
   1996	return error;
   1997}
   1998
   1999/**
   2000 * do_shrink - make a file smaller
   2001 * @inode: the inode
   2002 * @newsize: the size to make the file
   2003 *
   2004 * Called with an exclusive lock on @inode. The @size must
   2005 * be equal to or smaller than the current inode size.
   2006 *
   2007 * Returns: errno
   2008 */
   2009
   2010static int do_shrink(struct inode *inode, u64 newsize)
   2011{
   2012	struct gfs2_inode *ip = GFS2_I(inode);
   2013	int error;
   2014
   2015	error = trunc_start(inode, newsize);
   2016	if (error < 0)
   2017		return error;
   2018	if (gfs2_is_stuffed(ip))
   2019		return 0;
   2020
   2021	error = punch_hole(ip, newsize, 0);
   2022	if (error == 0)
   2023		error = trunc_end(ip);
   2024
   2025	return error;
   2026}
   2027
   2028void gfs2_trim_blocks(struct inode *inode)
   2029{
   2030	int ret;
   2031
   2032	ret = do_shrink(inode, inode->i_size);
   2033	WARN_ON(ret != 0);
   2034}
   2035
   2036/**
   2037 * do_grow - Touch and update inode size
   2038 * @inode: The inode
   2039 * @size: The new size
   2040 *
   2041 * This function updates the timestamps on the inode and
   2042 * may also increase the size of the inode. This function
   2043 * must not be called with @size any smaller than the current
   2044 * inode size.
   2045 *
   2046 * Although it is not strictly required to unstuff files here,
   2047 * earlier versions of GFS2 have a bug in the stuffed file reading
   2048 * code which will result in a buffer overrun if the size is larger
   2049 * than the max stuffed file size. In order to prevent this from
   2050 * occurring, such files are unstuffed, but in other cases we can
   2051 * just update the inode size directly.
   2052 *
   2053 * Returns: 0 on success, or -ve on error
   2054 */
   2055
   2056static int do_grow(struct inode *inode, u64 size)
   2057{
   2058	struct gfs2_inode *ip = GFS2_I(inode);
   2059	struct gfs2_sbd *sdp = GFS2_SB(inode);
   2060	struct gfs2_alloc_parms ap = { .target = 1, };
   2061	struct buffer_head *dibh;
   2062	int error;
   2063	int unstuff = 0;
   2064
   2065	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
   2066		error = gfs2_quota_lock_check(ip, &ap);
   2067		if (error)
   2068			return error;
   2069
   2070		error = gfs2_inplace_reserve(ip, &ap);
   2071		if (error)
   2072			goto do_grow_qunlock;
   2073		unstuff = 1;
   2074	}
   2075
   2076	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
   2077				 (unstuff &&
   2078				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
   2079				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
   2080				  0 : RES_QUOTA), 0);
   2081	if (error)
   2082		goto do_grow_release;
   2083
   2084	if (unstuff) {
   2085		error = gfs2_unstuff_dinode(ip);
   2086		if (error)
   2087			goto do_end_trans;
   2088	}
   2089
   2090	error = gfs2_meta_inode_buffer(ip, &dibh);
   2091	if (error)
   2092		goto do_end_trans;
   2093
   2094	truncate_setsize(inode, size);
   2095	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
   2096	gfs2_trans_add_meta(ip->i_gl, dibh);
   2097	gfs2_dinode_out(ip, dibh->b_data);
   2098	brelse(dibh);
   2099
   2100do_end_trans:
   2101	gfs2_trans_end(sdp);
   2102do_grow_release:
   2103	if (unstuff) {
   2104		gfs2_inplace_release(ip);
   2105do_grow_qunlock:
   2106		gfs2_quota_unlock(ip);
   2107	}
   2108	return error;
   2109}
   2110
   2111/**
   2112 * gfs2_setattr_size - make a file a given size
   2113 * @inode: the inode
   2114 * @newsize: the size to make the file
   2115 *
   2116 * The file size can grow, shrink, or stay the same size. This
   2117 * is called holding i_rwsem and an exclusive glock on the inode
   2118 * in question.
   2119 *
   2120 * Returns: errno
   2121 */
   2122
   2123int gfs2_setattr_size(struct inode *inode, u64 newsize)
   2124{
   2125	struct gfs2_inode *ip = GFS2_I(inode);
   2126	int ret;
   2127
   2128	BUG_ON(!S_ISREG(inode->i_mode));
   2129
   2130	ret = inode_newsize_ok(inode, newsize);
   2131	if (ret)
   2132		return ret;
   2133
   2134	inode_dio_wait(inode);
   2135
   2136	ret = gfs2_qa_get(ip);
   2137	if (ret)
   2138		goto out;
   2139
   2140	if (newsize >= inode->i_size) {
   2141		ret = do_grow(inode, newsize);
   2142		goto out;
   2143	}
   2144
   2145	ret = do_shrink(inode, newsize);
   2146out:
   2147	gfs2_rs_delete(ip);
   2148	gfs2_qa_put(ip);
   2149	return ret;
   2150}
   2151
   2152int gfs2_truncatei_resume(struct gfs2_inode *ip)
   2153{
   2154	int error;
   2155	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
   2156	if (!error)
   2157		error = trunc_end(ip);
   2158	return error;
   2159}
   2160
   2161int gfs2_file_dealloc(struct gfs2_inode *ip)
   2162{
   2163	return punch_hole(ip, 0, 0);
   2164}
   2165
   2166/**
   2167 * gfs2_free_journal_extents - Free cached journal bmap info
   2168 * @jd: The journal
   2169 *
   2170 */
   2171
   2172void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
   2173{
   2174	struct gfs2_journal_extent *jext;
   2175
   2176	while(!list_empty(&jd->extent_list)) {
   2177		jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
   2178		list_del(&jext->list);
   2179		kfree(jext);
   2180	}
   2181}
   2182
   2183/**
   2184 * gfs2_add_jextent - Add or merge a new extent to extent cache
   2185 * @jd: The journal descriptor
   2186 * @lblock: The logical block at start of new extent
   2187 * @dblock: The physical block at start of new extent
   2188 * @blocks: Size of extent in fs blocks
   2189 *
   2190 * Returns: 0 on success or -ENOMEM
   2191 */
   2192
   2193static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
   2194{
   2195	struct gfs2_journal_extent *jext;
   2196
   2197	if (!list_empty(&jd->extent_list)) {
   2198		jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
   2199		if ((jext->dblock + jext->blocks) == dblock) {
   2200			jext->blocks += blocks;
   2201			return 0;
   2202		}
   2203	}
   2204
   2205	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
   2206	if (jext == NULL)
   2207		return -ENOMEM;
   2208	jext->dblock = dblock;
   2209	jext->lblock = lblock;
   2210	jext->blocks = blocks;
   2211	list_add_tail(&jext->list, &jd->extent_list);
   2212	jd->nr_extents++;
   2213	return 0;
   2214}
   2215
   2216/**
   2217 * gfs2_map_journal_extents - Cache journal bmap info
   2218 * @sdp: The super block
   2219 * @jd: The journal to map
   2220 *
   2221 * Create a reusable "extent" mapping from all logical
   2222 * blocks to all physical blocks for the given journal.  This will save
   2223 * us time when writing journal blocks.  Most journals will have only one
   2224 * extent that maps all their logical blocks.  That's because gfs2.mkfs
   2225 * arranges the journal blocks sequentially to maximize performance.
   2226 * So the extent would map the first block for the entire file length.
   2227 * However, gfs2_jadd can happen while file activity is happening, so
   2228 * those journals may not be sequential.  Less likely is the case where
   2229 * the users created their own journals by mounting the metafs and
   2230 * laying it out.  But it's still possible.  These journals might have
   2231 * several extents.
   2232 *
   2233 * Returns: 0 on success, or error on failure
   2234 */
   2235
   2236int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
   2237{
   2238	u64 lblock = 0;
   2239	u64 lblock_stop;
   2240	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
   2241	struct buffer_head bh;
   2242	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
   2243	u64 size;
   2244	int rc;
   2245	ktime_t start, end;
   2246
   2247	start = ktime_get();
   2248	lblock_stop = i_size_read(jd->jd_inode) >> shift;
   2249	size = (lblock_stop - lblock) << shift;
   2250	jd->nr_extents = 0;
   2251	WARN_ON(!list_empty(&jd->extent_list));
   2252
   2253	do {
   2254		bh.b_state = 0;
   2255		bh.b_blocknr = 0;
   2256		bh.b_size = size;
   2257		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
   2258		if (rc || !buffer_mapped(&bh))
   2259			goto fail;
   2260		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
   2261		if (rc)
   2262			goto fail;
   2263		size -= bh.b_size;
   2264		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
   2265	} while(size > 0);
   2266
   2267	end = ktime_get();
   2268	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
   2269		jd->nr_extents, ktime_ms_delta(end, start));
   2270	return 0;
   2271
   2272fail:
   2273	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
   2274		rc, jd->jd_jid,
   2275		(unsigned long long)(i_size_read(jd->jd_inode) - size),
   2276		jd->nr_extents);
   2277	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
   2278		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
   2279		bh.b_state, (unsigned long long)bh.b_size);
   2280	gfs2_free_journal_extents(jd);
   2281	return rc;
   2282}
   2283
   2284/**
   2285 * gfs2_write_alloc_required - figure out if a write will require an allocation
   2286 * @ip: the file being written to
   2287 * @offset: the offset to write to
   2288 * @len: the number of bytes being written
   2289 *
   2290 * Returns: 1 if an alloc is required, 0 otherwise
   2291 */
   2292
   2293int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
   2294			      unsigned int len)
   2295{
   2296	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
   2297	struct buffer_head bh;
   2298	unsigned int shift;
   2299	u64 lblock, lblock_stop, size;
   2300	u64 end_of_file;
   2301
   2302	if (!len)
   2303		return 0;
   2304
   2305	if (gfs2_is_stuffed(ip)) {
   2306		if (offset + len > gfs2_max_stuffed_size(ip))
   2307			return 1;
   2308		return 0;
   2309	}
   2310
   2311	shift = sdp->sd_sb.sb_bsize_shift;
   2312	BUG_ON(gfs2_is_dir(ip));
   2313	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
   2314	lblock = offset >> shift;
   2315	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
   2316	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
   2317		return 1;
   2318
   2319	size = (lblock_stop - lblock) << shift;
   2320	do {
   2321		bh.b_state = 0;
   2322		bh.b_size = size;
   2323		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
   2324		if (!buffer_mapped(&bh))
   2325			return 1;
   2326		size -= bh.b_size;
   2327		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
   2328	} while(size > 0);
   2329
   2330	return 0;
   2331}
   2332
   2333static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
   2334{
   2335	struct gfs2_inode *ip = GFS2_I(inode);
   2336	struct buffer_head *dibh;
   2337	int error;
   2338
   2339	if (offset >= inode->i_size)
   2340		return 0;
   2341	if (offset + length > inode->i_size)
   2342		length = inode->i_size - offset;
   2343
   2344	error = gfs2_meta_inode_buffer(ip, &dibh);
   2345	if (error)
   2346		return error;
   2347	gfs2_trans_add_meta(ip->i_gl, dibh);
   2348	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
   2349	       length);
   2350	brelse(dibh);
   2351	return 0;
   2352}
   2353
   2354static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
   2355					 loff_t length)
   2356{
   2357	struct gfs2_sbd *sdp = GFS2_SB(inode);
   2358	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
   2359	int error;
   2360
   2361	while (length) {
   2362		struct gfs2_trans *tr;
   2363		loff_t chunk;
   2364		unsigned int offs;
   2365
   2366		chunk = length;
   2367		if (chunk > max_chunk)
   2368			chunk = max_chunk;
   2369
   2370		offs = offset & ~PAGE_MASK;
   2371		if (offs && chunk > PAGE_SIZE)
   2372			chunk = offs + ((chunk - offs) & PAGE_MASK);
   2373
   2374		truncate_pagecache_range(inode, offset, chunk);
   2375		offset += chunk;
   2376		length -= chunk;
   2377
   2378		tr = current->journal_info;
   2379		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
   2380			continue;
   2381
   2382		gfs2_trans_end(sdp);
   2383		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
   2384		if (error)
   2385			return error;
   2386	}
   2387	return 0;
   2388}
   2389
   2390int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
   2391{
   2392	struct inode *inode = file_inode(file);
   2393	struct gfs2_inode *ip = GFS2_I(inode);
   2394	struct gfs2_sbd *sdp = GFS2_SB(inode);
   2395	unsigned int blocksize = i_blocksize(inode);
   2396	loff_t start, end;
   2397	int error;
   2398
   2399	if (!gfs2_is_stuffed(ip)) {
   2400		unsigned int start_off, end_len;
   2401
   2402		start_off = offset & (blocksize - 1);
   2403		end_len = (offset + length) & (blocksize - 1);
   2404		if (start_off) {
   2405			unsigned int len = length;
   2406			if (length > blocksize - start_off)
   2407				len = blocksize - start_off;
   2408			error = gfs2_block_zero_range(inode, offset, len);
   2409			if (error)
   2410				goto out;
   2411			if (start_off + length < blocksize)
   2412				end_len = 0;
   2413		}
   2414		if (end_len) {
   2415			error = gfs2_block_zero_range(inode,
   2416				offset + length - end_len, end_len);
   2417			if (error)
   2418				goto out;
   2419		}
   2420	}
   2421
   2422	start = round_down(offset, blocksize);
   2423	end = round_up(offset + length, blocksize) - 1;
   2424	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
   2425	if (error)
   2426		return error;
   2427
   2428	if (gfs2_is_jdata(ip))
   2429		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
   2430					 GFS2_JTRUNC_REVOKES);
   2431	else
   2432		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
   2433	if (error)
   2434		return error;
   2435
   2436	if (gfs2_is_stuffed(ip)) {
   2437		error = stuffed_zero_range(inode, offset, length);
   2438		if (error)
   2439			goto out;
   2440	}
   2441
   2442	if (gfs2_is_jdata(ip)) {
   2443		BUG_ON(!current->journal_info);
   2444		gfs2_journaled_truncate_range(inode, offset, length);
   2445	} else
   2446		truncate_pagecache_range(inode, offset, offset + length - 1);
   2447
   2448	file_update_time(file);
   2449	mark_inode_dirty(inode);
   2450
   2451	if (current->journal_info)
   2452		gfs2_trans_end(sdp);
   2453
   2454	if (!gfs2_is_stuffed(ip))
   2455		error = punch_hole(ip, offset, length);
   2456
   2457out:
   2458	if (current->journal_info)
   2459		gfs2_trans_end(sdp);
   2460	return error;
   2461}
   2462
   2463static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
   2464		loff_t offset)
   2465{
   2466	int ret;
   2467
   2468	if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
   2469		return -EIO;
   2470
   2471	if (offset >= wpc->iomap.offset &&
   2472	    offset < wpc->iomap.offset + wpc->iomap.length)
   2473		return 0;
   2474
   2475	memset(&wpc->iomap, 0, sizeof(wpc->iomap));
   2476	ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
   2477	return ret;
   2478}
   2479
   2480const struct iomap_writeback_ops gfs2_writeback_ops = {
   2481	.map_blocks		= gfs2_map_blocks,
   2482};