cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

glops.c (21806B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
      4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
      5 */
      6
      7#include <linux/spinlock.h>
      8#include <linux/completion.h>
      9#include <linux/buffer_head.h>
     10#include <linux/gfs2_ondisk.h>
     11#include <linux/bio.h>
     12#include <linux/posix_acl.h>
     13#include <linux/security.h>
     14
     15#include "gfs2.h"
     16#include "incore.h"
     17#include "bmap.h"
     18#include "glock.h"
     19#include "glops.h"
     20#include "inode.h"
     21#include "log.h"
     22#include "meta_io.h"
     23#include "recovery.h"
     24#include "rgrp.h"
     25#include "util.h"
     26#include "trans.h"
     27#include "dir.h"
     28#include "lops.h"
     29
     30struct workqueue_struct *gfs2_freeze_wq;
     31
     32extern struct workqueue_struct *gfs2_control_wq;
     33
     34static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
     35{
     36	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
     37
     38	fs_err(sdp,
     39	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
     40	       "state 0x%lx\n",
     41	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
     42	       bh->b_page->mapping, bh->b_page->flags);
     43	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
     44	       gl->gl_name.ln_type, gl->gl_name.ln_number,
     45	       gfs2_glock2aspace(gl));
     46	gfs2_lm(sdp, "AIL error\n");
     47	gfs2_withdraw_delayed(sdp);
     48}
     49
     50/**
     51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
     52 * @gl: the glock
     53 * @fsync: set when called from fsync (not all buffers will be clean)
     54 * @nr_revokes: Number of buffers to revoke
     55 *
     56 * None of the buffers should be dirty, locked, or pinned.
     57 */
     58
     59static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
     60			     unsigned int nr_revokes)
     61{
     62	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
     63	struct list_head *head = &gl->gl_ail_list;
     64	struct gfs2_bufdata *bd, *tmp;
     65	struct buffer_head *bh;
     66	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
     67
     68	gfs2_log_lock(sdp);
     69	spin_lock(&sdp->sd_ail_lock);
     70	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
     71		if (nr_revokes == 0)
     72			break;
     73		bh = bd->bd_bh;
     74		if (bh->b_state & b_state) {
     75			if (fsync)
     76				continue;
     77			gfs2_ail_error(gl, bh);
     78		}
     79		gfs2_trans_add_revoke(sdp, bd);
     80		nr_revokes--;
     81	}
     82	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
     83	spin_unlock(&sdp->sd_ail_lock);
     84	gfs2_log_unlock(sdp);
     85}
     86
     87
     88static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
     89{
     90	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
     91	struct gfs2_trans tr;
     92	unsigned int revokes;
     93	int ret;
     94
     95	revokes = atomic_read(&gl->gl_ail_count);
     96
     97	if (!revokes) {
     98		bool have_revokes;
     99		bool log_in_flight;
    100
    101		/*
    102		 * We have nothing on the ail, but there could be revokes on
    103		 * the sdp revoke queue, in which case, we still want to flush
    104		 * the log and wait for it to finish.
    105		 *
    106		 * If the sdp revoke list is empty too, we might still have an
    107		 * io outstanding for writing revokes, so we should wait for
    108		 * it before returning.
    109		 *
    110		 * If none of these conditions are true, our revokes are all
    111		 * flushed and we can return.
    112		 */
    113		gfs2_log_lock(sdp);
    114		have_revokes = !list_empty(&sdp->sd_log_revokes);
    115		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
    116		gfs2_log_unlock(sdp);
    117		if (have_revokes)
    118			goto flush;
    119		if (log_in_flight)
    120			log_flush_wait(sdp);
    121		return 0;
    122	}
    123
    124	memset(&tr, 0, sizeof(tr));
    125	set_bit(TR_ONSTACK, &tr.tr_flags);
    126	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
    127	if (ret)
    128		goto flush;
    129	__gfs2_ail_flush(gl, 0, revokes);
    130	gfs2_trans_end(sdp);
    131
    132flush:
    133	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
    134		       GFS2_LFC_AIL_EMPTY_GL);
    135	return 0;
    136}
    137
    138void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
    139{
    140	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    141	unsigned int revokes = atomic_read(&gl->gl_ail_count);
    142	int ret;
    143
    144	if (!revokes)
    145		return;
    146
    147	ret = gfs2_trans_begin(sdp, 0, revokes);
    148	if (ret)
    149		return;
    150	__gfs2_ail_flush(gl, fsync, revokes);
    151	gfs2_trans_end(sdp);
    152	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
    153		       GFS2_LFC_AIL_FLUSH);
    154}
    155
    156/**
    157 * gfs2_rgrp_metasync - sync out the metadata of a resource group
    158 * @gl: the glock protecting the resource group
    159 *
    160 */
    161
    162static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
    163{
    164	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    165	struct address_space *metamapping = &sdp->sd_aspace;
    166	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
    167	const unsigned bsize = sdp->sd_sb.sb_bsize;
    168	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
    169	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
    170	int error;
    171
    172	filemap_fdatawrite_range(metamapping, start, end);
    173	error = filemap_fdatawait_range(metamapping, start, end);
    174	WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
    175	mapping_set_error(metamapping, error);
    176	if (error)
    177		gfs2_io_error(sdp);
    178	return error;
    179}
    180
    181/**
    182 * rgrp_go_sync - sync out the metadata for this glock
    183 * @gl: the glock
    184 *
    185 * Called when demoting or unlocking an EX glock.  We must flush
    186 * to disk all dirty buffers/pages relating to this glock, and must not
    187 * return to caller to demote/unlock the glock until I/O is complete.
    188 */
    189
    190static int rgrp_go_sync(struct gfs2_glock *gl)
    191{
    192	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    193	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
    194	int error;
    195
    196	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
    197		return 0;
    198	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
    199
    200	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
    201		       GFS2_LFC_RGRP_GO_SYNC);
    202	error = gfs2_rgrp_metasync(gl);
    203	if (!error)
    204		error = gfs2_ail_empty_gl(gl);
    205	gfs2_free_clones(rgd);
    206	return error;
    207}
    208
    209/**
    210 * rgrp_go_inval - invalidate the metadata for this glock
    211 * @gl: the glock
    212 * @flags:
    213 *
    214 * We never used LM_ST_DEFERRED with resource groups, so that we
    215 * should always see the metadata flag set here.
    216 *
    217 */
    218
    219static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
    220{
    221	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    222	struct address_space *mapping = &sdp->sd_aspace;
    223	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
    224	const unsigned bsize = sdp->sd_sb.sb_bsize;
    225	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
    226	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
    227
    228	gfs2_rgrp_brelse(rgd);
    229	WARN_ON_ONCE(!(flags & DIO_METADATA));
    230	truncate_inode_pages_range(mapping, start, end);
    231}
    232
    233static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
    234			      const char *fs_id_buf)
    235{
    236	struct gfs2_rgrpd *rgd = gl->gl_object;
    237
    238	if (rgd)
    239		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
    240}
    241
    242static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
    243{
    244	struct gfs2_inode *ip;
    245
    246	spin_lock(&gl->gl_lockref.lock);
    247	ip = gl->gl_object;
    248	if (ip)
    249		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
    250	spin_unlock(&gl->gl_lockref.lock);
    251	return ip;
    252}
    253
    254struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
    255{
    256	struct gfs2_rgrpd *rgd;
    257
    258	spin_lock(&gl->gl_lockref.lock);
    259	rgd = gl->gl_object;
    260	spin_unlock(&gl->gl_lockref.lock);
    261
    262	return rgd;
    263}
    264
    265static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
    266{
    267	if (!ip)
    268		return;
    269
    270	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
    271	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
    272}
    273
    274/**
    275 * gfs2_inode_metasync - sync out the metadata of an inode
    276 * @gl: the glock protecting the inode
    277 *
    278 */
    279int gfs2_inode_metasync(struct gfs2_glock *gl)
    280{
    281	struct address_space *metamapping = gfs2_glock2aspace(gl);
    282	int error;
    283
    284	filemap_fdatawrite(metamapping);
    285	error = filemap_fdatawait(metamapping);
    286	if (error)
    287		gfs2_io_error(gl->gl_name.ln_sbd);
    288	return error;
    289}
    290
    291/**
    292 * inode_go_sync - Sync the dirty metadata of an inode
    293 * @gl: the glock protecting the inode
    294 *
    295 */
    296
    297static int inode_go_sync(struct gfs2_glock *gl)
    298{
    299	struct gfs2_inode *ip = gfs2_glock2inode(gl);
    300	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
    301	struct address_space *metamapping = gfs2_glock2aspace(gl);
    302	int error = 0, ret;
    303
    304	if (isreg) {
    305		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
    306			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
    307		inode_dio_wait(&ip->i_inode);
    308	}
    309	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
    310		goto out;
    311
    312	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
    313
    314	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
    315		       GFS2_LFC_INODE_GO_SYNC);
    316	filemap_fdatawrite(metamapping);
    317	if (isreg) {
    318		struct address_space *mapping = ip->i_inode.i_mapping;
    319		filemap_fdatawrite(mapping);
    320		error = filemap_fdatawait(mapping);
    321		mapping_set_error(mapping, error);
    322	}
    323	ret = gfs2_inode_metasync(gl);
    324	if (!error)
    325		error = ret;
    326	gfs2_ail_empty_gl(gl);
    327	/*
    328	 * Writeback of the data mapping may cause the dirty flag to be set
    329	 * so we have to clear it again here.
    330	 */
    331	smp_mb__before_atomic();
    332	clear_bit(GLF_DIRTY, &gl->gl_flags);
    333
    334out:
    335	gfs2_clear_glop_pending(ip);
    336	return error;
    337}
    338
    339/**
    340 * inode_go_inval - prepare a inode glock to be released
    341 * @gl: the glock
    342 * @flags:
    343 *
    344 * Normally we invalidate everything, but if we are moving into
    345 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
    346 * can keep hold of the metadata, since it won't have changed.
    347 *
    348 */
    349
    350static void inode_go_inval(struct gfs2_glock *gl, int flags)
    351{
    352	struct gfs2_inode *ip = gfs2_glock2inode(gl);
    353
    354	if (flags & DIO_METADATA) {
    355		struct address_space *mapping = gfs2_glock2aspace(gl);
    356		truncate_inode_pages(mapping, 0);
    357		if (ip) {
    358			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
    359			forget_all_cached_acls(&ip->i_inode);
    360			security_inode_invalidate_secctx(&ip->i_inode);
    361			gfs2_dir_hash_inval(ip);
    362		}
    363	}
    364
    365	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
    366		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
    367			       GFS2_LOG_HEAD_FLUSH_NORMAL |
    368			       GFS2_LFC_INODE_GO_INVAL);
    369		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
    370	}
    371	if (ip && S_ISREG(ip->i_inode.i_mode))
    372		truncate_inode_pages(ip->i_inode.i_mapping, 0);
    373
    374	gfs2_clear_glop_pending(ip);
    375}
    376
    377/**
    378 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
    379 * @gl: the glock
    380 *
    381 * Returns: 1 if it's ok
    382 */
    383
    384static int inode_go_demote_ok(const struct gfs2_glock *gl)
    385{
    386	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    387
    388	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
    389		return 0;
    390
    391	return 1;
    392}
    393
    394static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
    395{
    396	const struct gfs2_dinode *str = buf;
    397	struct timespec64 atime;
    398	u16 height, depth;
    399	umode_t mode = be32_to_cpu(str->di_mode);
    400	bool is_new = ip->i_inode.i_state & I_NEW;
    401
    402	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
    403		goto corrupt;
    404	if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode)))
    405		goto corrupt;
    406	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
    407	ip->i_inode.i_mode = mode;
    408	if (is_new) {
    409		ip->i_inode.i_rdev = 0;
    410		switch (mode & S_IFMT) {
    411		case S_IFBLK:
    412		case S_IFCHR:
    413			ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
    414						   be32_to_cpu(str->di_minor));
    415			break;
    416		}
    417	}
    418
    419	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
    420	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
    421	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
    422	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
    423	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
    424	atime.tv_sec = be64_to_cpu(str->di_atime);
    425	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
    426	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
    427		ip->i_inode.i_atime = atime;
    428	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
    429	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
    430	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
    431	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
    432
    433	ip->i_goal = be64_to_cpu(str->di_goal_meta);
    434	ip->i_generation = be64_to_cpu(str->di_generation);
    435
    436	ip->i_diskflags = be32_to_cpu(str->di_flags);
    437	ip->i_eattr = be64_to_cpu(str->di_eattr);
    438	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
    439	gfs2_set_inode_flags(&ip->i_inode);
    440	height = be16_to_cpu(str->di_height);
    441	if (unlikely(height > GFS2_MAX_META_HEIGHT))
    442		goto corrupt;
    443	ip->i_height = (u8)height;
    444
    445	depth = be16_to_cpu(str->di_depth);
    446	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
    447		goto corrupt;
    448	ip->i_depth = (u8)depth;
    449	ip->i_entries = be32_to_cpu(str->di_entries);
    450
    451	if (S_ISREG(ip->i_inode.i_mode))
    452		gfs2_set_aops(&ip->i_inode);
    453
    454	return 0;
    455corrupt:
    456	gfs2_consist_inode(ip);
    457	return -EIO;
    458}
    459
    460/**
    461 * gfs2_inode_refresh - Refresh the incore copy of the dinode
    462 * @ip: The GFS2 inode
    463 *
    464 * Returns: errno
    465 */
    466
    467int gfs2_inode_refresh(struct gfs2_inode *ip)
    468{
    469	struct buffer_head *dibh;
    470	int error;
    471
    472	error = gfs2_meta_inode_buffer(ip, &dibh);
    473	if (error)
    474		return error;
    475
    476	error = gfs2_dinode_in(ip, dibh->b_data);
    477	brelse(dibh);
    478	return error;
    479}
    480
    481/**
    482 * inode_go_instantiate - read in an inode if necessary
    483 * @gh: The glock holder
    484 *
    485 * Returns: errno
    486 */
    487
    488static int inode_go_instantiate(struct gfs2_holder *gh)
    489{
    490	struct gfs2_glock *gl = gh->gh_gl;
    491	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    492	struct gfs2_inode *ip = gl->gl_object;
    493	int error = 0;
    494
    495	if (!ip) /* no inode to populate - read it in later */
    496		goto out;
    497
    498	error = gfs2_inode_refresh(ip);
    499	if (error)
    500		goto out;
    501
    502	if (gh->gh_state != LM_ST_DEFERRED)
    503		inode_dio_wait(&ip->i_inode);
    504
    505	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
    506	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
    507	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
    508		spin_lock(&sdp->sd_trunc_lock);
    509		if (list_empty(&ip->i_trunc_list))
    510			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
    511		spin_unlock(&sdp->sd_trunc_lock);
    512		wake_up(&sdp->sd_quota_wait);
    513		error = 1;
    514	}
    515
    516out:
    517	return error;
    518}
    519
    520/**
    521 * inode_go_dump - print information about an inode
    522 * @seq: The iterator
    523 * @gl: The glock
    524 * @fs_id_buf: file system id (may be empty)
    525 *
    526 */
    527
    528static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
    529			  const char *fs_id_buf)
    530{
    531	struct gfs2_inode *ip = gl->gl_object;
    532	struct inode *inode = &ip->i_inode;
    533	unsigned long nrpages;
    534
    535	if (ip == NULL)
    536		return;
    537
    538	xa_lock_irq(&inode->i_data.i_pages);
    539	nrpages = inode->i_data.nrpages;
    540	xa_unlock_irq(&inode->i_data.i_pages);
    541
    542	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
    543		       "p:%lu\n", fs_id_buf,
    544		  (unsigned long long)ip->i_no_formal_ino,
    545		  (unsigned long long)ip->i_no_addr,
    546		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
    547		  (unsigned int)ip->i_diskflags,
    548		  (unsigned long long)i_size_read(inode), nrpages);
    549}
    550
    551/**
    552 * freeze_go_sync - promote/demote the freeze glock
    553 * @gl: the glock
    554 */
    555
    556static int freeze_go_sync(struct gfs2_glock *gl)
    557{
    558	int error = 0;
    559	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    560
    561	/*
    562	 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
    563	 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
    564	 * all the nodes should have the freeze glock in SH mode and they all
    565	 * call do_xmote: One for EX and the others for UN. They ALL must
    566	 * freeze locally, and they ALL must queue freeze work. The freeze_work
    567	 * calls freeze_func, which tries to reacquire the freeze glock in SH,
    568	 * effectively waiting for the thaw on the node who holds it in EX.
    569	 * Once thawed, the work func acquires the freeze glock in
    570	 * SH and everybody goes back to thawed.
    571	 */
    572	if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
    573	    !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
    574		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
    575		error = freeze_super(sdp->sd_vfs);
    576		if (error) {
    577			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
    578				error);
    579			if (gfs2_withdrawn(sdp)) {
    580				atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
    581				return 0;
    582			}
    583			gfs2_assert_withdraw(sdp, 0);
    584		}
    585		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
    586		if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
    587			gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
    588				       GFS2_LFC_FREEZE_GO_SYNC);
    589		else /* read-only mounts */
    590			atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
    591	}
    592	return 0;
    593}
    594
    595/**
    596 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
    597 * @gl: the glock
    598 */
    599static int freeze_go_xmote_bh(struct gfs2_glock *gl)
    600{
    601	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    602	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
    603	struct gfs2_glock *j_gl = ip->i_gl;
    604	struct gfs2_log_header_host head;
    605	int error;
    606
    607	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
    608		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
    609
    610		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
    611		if (gfs2_assert_withdraw_delayed(sdp, !error))
    612			return error;
    613		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
    614						 GFS2_LOG_HEAD_UNMOUNT))
    615			return -EIO;
    616		sdp->sd_log_sequence = head.lh_sequence + 1;
    617		gfs2_log_pointers_init(sdp, head.lh_blkno);
    618	}
    619	return 0;
    620}
    621
    622/**
    623 * freeze_go_demote_ok
    624 * @gl: the glock
    625 *
    626 * Always returns 0
    627 */
    628
    629static int freeze_go_demote_ok(const struct gfs2_glock *gl)
    630{
    631	return 0;
    632}
    633
    634/**
    635 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
    636 * @gl: the glock
    637 * @remote: true if this came from a different cluster node
    638 *
    639 * gl_lockref.lock lock is held while calling this
    640 */
    641static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
    642{
    643	struct gfs2_inode *ip = gl->gl_object;
    644	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    645
    646	if (!remote || sb_rdonly(sdp->sd_vfs))
    647		return;
    648
    649	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
    650	    gl->gl_state == LM_ST_SHARED && ip) {
    651		gl->gl_lockref.count++;
    652		if (!queue_delayed_work(gfs2_delete_workqueue,
    653					&gl->gl_delete, 0))
    654			gl->gl_lockref.count--;
    655	}
    656}
    657
    658static int iopen_go_demote_ok(const struct gfs2_glock *gl)
    659{
    660       return !gfs2_delete_work_queued(gl);
    661}
    662
    663/**
    664 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
    665 * @gl: glock being freed
    666 *
    667 * For now, this is only used for the journal inode glock. In withdraw
    668 * situations, we need to wait for the glock to be freed so that we know
    669 * other nodes may proceed with recovery / journal replay.
    670 */
    671static void inode_go_free(struct gfs2_glock *gl)
    672{
    673	/* Note that we cannot reference gl_object because it's already set
    674	 * to NULL by this point in its lifecycle. */
    675	if (!test_bit(GLF_FREEING, &gl->gl_flags))
    676		return;
    677	clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
    678	wake_up_bit(&gl->gl_flags, GLF_FREEING);
    679}
    680
    681/**
    682 * nondisk_go_callback - used to signal when a node did a withdraw
    683 * @gl: the nondisk glock
    684 * @remote: true if this came from a different cluster node
    685 *
    686 */
    687static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
    688{
    689	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
    690
    691	/* Ignore the callback unless it's from another node, and it's the
    692	   live lock. */
    693	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
    694		return;
    695
    696	/* First order of business is to cancel the demote request. We don't
    697	 * really want to demote a nondisk glock. At best it's just to inform
    698	 * us of another node's withdraw. We'll keep it in SH mode. */
    699	clear_bit(GLF_DEMOTE, &gl->gl_flags);
    700	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
    701
    702	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
    703	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
    704	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
    705	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
    706		return;
    707
    708	/* We only care when a node wants us to unlock, because that means
    709	 * they want a journal recovered. */
    710	if (gl->gl_demote_state != LM_ST_UNLOCKED)
    711		return;
    712
    713	if (sdp->sd_args.ar_spectator) {
    714		fs_warn(sdp, "Spectator node cannot recover journals.\n");
    715		return;
    716	}
    717
    718	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
    719	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
    720	/*
    721	 * We can't call remote_withdraw directly here or gfs2_recover_journal
    722	 * because this is called from the glock unlock function and the
    723	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
    724	 * we were called from. So we queue it to the control work queue in
    725	 * lock_dlm.
    726	 */
    727	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
    728}
    729
    730const struct gfs2_glock_operations gfs2_meta_glops = {
    731	.go_type = LM_TYPE_META,
    732	.go_flags = GLOF_NONDISK,
    733};
    734
    735const struct gfs2_glock_operations gfs2_inode_glops = {
    736	.go_sync = inode_go_sync,
    737	.go_inval = inode_go_inval,
    738	.go_demote_ok = inode_go_demote_ok,
    739	.go_instantiate = inode_go_instantiate,
    740	.go_dump = inode_go_dump,
    741	.go_type = LM_TYPE_INODE,
    742	.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
    743	.go_free = inode_go_free,
    744};
    745
    746const struct gfs2_glock_operations gfs2_rgrp_glops = {
    747	.go_sync = rgrp_go_sync,
    748	.go_inval = rgrp_go_inval,
    749	.go_instantiate = gfs2_rgrp_go_instantiate,
    750	.go_dump = gfs2_rgrp_go_dump,
    751	.go_type = LM_TYPE_RGRP,
    752	.go_flags = GLOF_LVB,
    753};
    754
    755const struct gfs2_glock_operations gfs2_freeze_glops = {
    756	.go_sync = freeze_go_sync,
    757	.go_xmote_bh = freeze_go_xmote_bh,
    758	.go_demote_ok = freeze_go_demote_ok,
    759	.go_type = LM_TYPE_NONDISK,
    760	.go_flags = GLOF_NONDISK,
    761};
    762
    763const struct gfs2_glock_operations gfs2_iopen_glops = {
    764	.go_type = LM_TYPE_IOPEN,
    765	.go_callback = iopen_go_callback,
    766	.go_dump = inode_go_dump,
    767	.go_demote_ok = iopen_go_demote_ok,
    768	.go_flags = GLOF_LRU | GLOF_NONDISK,
    769	.go_subclass = 1,
    770};
    771
    772const struct gfs2_glock_operations gfs2_flock_glops = {
    773	.go_type = LM_TYPE_FLOCK,
    774	.go_flags = GLOF_LRU | GLOF_NONDISK,
    775};
    776
    777const struct gfs2_glock_operations gfs2_nondisk_glops = {
    778	.go_type = LM_TYPE_NONDISK,
    779	.go_flags = GLOF_NONDISK,
    780	.go_callback = nondisk_go_callback,
    781};
    782
    783const struct gfs2_glock_operations gfs2_quota_glops = {
    784	.go_type = LM_TYPE_QUOTA,
    785	.go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
    786};
    787
    788const struct gfs2_glock_operations gfs2_journal_glops = {
    789	.go_type = LM_TYPE_JOURNAL,
    790	.go_flags = GLOF_NONDISK,
    791};
    792
    793const struct gfs2_glock_operations *gfs2_glops_list[] = {
    794	[LM_TYPE_META] = &gfs2_meta_glops,
    795	[LM_TYPE_INODE] = &gfs2_inode_glops,
    796	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
    797	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
    798	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
    799	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
    800	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
    801	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
    802};
    803