cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

glock.h (11189B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
      4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
      5 */
      6
      7#ifndef __GLOCK_DOT_H__
      8#define __GLOCK_DOT_H__
      9
     10#include <linux/sched.h>
     11#include <linux/parser.h>
     12#include "incore.h"
     13#include "util.h"
     14
     15/* Options for hostdata parser */
     16
     17enum {
     18	Opt_jid,
     19	Opt_id,
     20	Opt_first,
     21	Opt_nodir,
     22	Opt_err,
     23};
     24
     25/*
     26 * lm_lockname types
     27 */
     28
     29#define LM_TYPE_RESERVED	0x00
     30#define LM_TYPE_NONDISK		0x01
     31#define LM_TYPE_INODE		0x02
     32#define LM_TYPE_RGRP		0x03
     33#define LM_TYPE_META		0x04
     34#define LM_TYPE_IOPEN		0x05
     35#define LM_TYPE_FLOCK		0x06
     36#define LM_TYPE_PLOCK		0x07
     37#define LM_TYPE_QUOTA		0x08
     38#define LM_TYPE_JOURNAL		0x09
     39
     40/*
     41 * lm_lock() states
     42 *
     43 * SHARED is compatible with SHARED, not with DEFERRED or EX.
     44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
     45 */
     46
     47#define LM_ST_UNLOCKED		0
     48#define LM_ST_EXCLUSIVE		1
     49#define LM_ST_DEFERRED		2
     50#define LM_ST_SHARED		3
     51
     52/*
     53 * lm_lock() flags
     54 *
     55 * LM_FLAG_TRY
     56 * Don't wait to acquire the lock if it can't be granted immediately.
     57 *
     58 * LM_FLAG_TRY_1CB
     59 * Send one blocking callback if TRY is set and the lock is not granted.
     60 *
     61 * LM_FLAG_NOEXP
     62 * GFS sets this flag on lock requests it makes while doing journal recovery.
     63 * These special requests should not be blocked due to the recovery like
     64 * ordinary locks would be.
     65 *
     66 * LM_FLAG_ANY
     67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
     68 * also be granted in SHARED.  The preferred state is whichever is compatible
     69 * with other granted locks, or the specified state if no other locks exist.
     70 *
     71 * LM_FLAG_PRIORITY
     72 * Override fairness considerations.  Suppose a lock is held in a shared state
     73 * and there is a pending request for the deferred state.  A shared lock
     74 * request with the priority flag would be allowed to bypass the deferred
     75 * request and directly join the other shared lock.  A shared lock request
     76 * without the priority flag might be forced to wait until the deferred
     77 * requested had acquired and released the lock.
     78 *
     79 * LM_FLAG_NODE_SCOPE
     80 * This holder agrees to share the lock within this node. In other words,
     81 * the glock is held in EX mode according to DLM, but local holders on the
     82 * same node can share it.
     83 */
     84
     85#define LM_FLAG_TRY		0x0001
     86#define LM_FLAG_TRY_1CB		0x0002
     87#define LM_FLAG_NOEXP		0x0004
     88#define LM_FLAG_ANY		0x0008
     89#define LM_FLAG_PRIORITY	0x0010
     90#define LM_FLAG_NODE_SCOPE	0x0020
     91#define GL_ASYNC		0x0040
     92#define GL_EXACT		0x0080
     93#define GL_SKIP			0x0100
     94#define GL_NOCACHE		0x0400
     95  
     96/*
     97 * lm_async_cb return flags
     98 *
     99 * LM_OUT_ST_MASK
    100 * Masks the lower two bits of lock state in the returned value.
    101 *
    102 * LM_OUT_CANCELED
    103 * The lock request was canceled.
    104 *
    105 */
    106
    107#define LM_OUT_ST_MASK		0x00000003
    108#define LM_OUT_CANCELED		0x00000008
    109#define LM_OUT_ERROR		0x00000004
    110
    111/*
    112 * lm_recovery_done() messages
    113 */
    114
    115#define LM_RD_GAVEUP		308
    116#define LM_RD_SUCCESS		309
    117
    118#define GLR_TRYFAILED		13
    119
    120#define GL_GLOCK_MAX_HOLD        (long)(HZ / 5)
    121#define GL_GLOCK_DFT_HOLD        (long)(HZ / 5)
    122#define GL_GLOCK_MIN_HOLD        (long)(10)
    123#define GL_GLOCK_HOLD_INCR       (long)(HZ / 20)
    124#define GL_GLOCK_HOLD_DECR       (long)(HZ / 40)
    125
    126struct lm_lockops {
    127	const char *lm_proto_name;
    128	int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
    129	void (*lm_first_done) (struct gfs2_sbd *sdp);
    130	void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
    131				    unsigned int result);
    132	void (*lm_unmount) (struct gfs2_sbd *sdp);
    133	void (*lm_withdraw) (struct gfs2_sbd *sdp);
    134	void (*lm_put_lock) (struct gfs2_glock *gl);
    135	int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
    136			unsigned int flags);
    137	void (*lm_cancel) (struct gfs2_glock *gl);
    138	const match_table_t *lm_tokens;
    139};
    140
    141struct gfs2_glock_aspace {
    142	struct gfs2_glock glock;
    143	struct address_space mapping;
    144};
    145
    146extern struct workqueue_struct *gfs2_delete_workqueue;
    147static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
    148{
    149	struct gfs2_holder *gh;
    150	struct pid *pid;
    151
    152	/* Look in glock's list of holders for one with current task as owner */
    153	spin_lock(&gl->gl_lockref.lock);
    154	pid = task_pid(current);
    155	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
    156		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
    157			break;
    158		if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
    159			continue;
    160		if (gh->gh_owner_pid == pid)
    161			goto out;
    162	}
    163	gh = NULL;
    164out:
    165	spin_unlock(&gl->gl_lockref.lock);
    166
    167	return gh;
    168}
    169
    170static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
    171{
    172	return gl->gl_state == LM_ST_EXCLUSIVE;
    173}
    174
    175static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
    176{
    177	return gl->gl_state == LM_ST_DEFERRED;
    178}
    179
    180static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
    181{
    182	return gl->gl_state == LM_ST_SHARED;
    183}
    184
    185static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
    186{
    187	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
    188		struct gfs2_glock_aspace *gla =
    189			container_of(gl, struct gfs2_glock_aspace, glock);
    190		return &gla->mapping;
    191	}
    192	return NULL;
    193}
    194
    195extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
    196			  const struct gfs2_glock_operations *glops,
    197			  int create, struct gfs2_glock **glp);
    198extern void gfs2_glock_hold(struct gfs2_glock *gl);
    199extern void gfs2_glock_put(struct gfs2_glock *gl);
    200extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
    201
    202extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
    203			       u16 flags, struct gfs2_holder *gh,
    204			       unsigned long ip);
    205static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
    206				    u16 flags, struct gfs2_holder *gh) {
    207	__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
    208}
    209
    210extern void gfs2_holder_reinit(unsigned int state, u16 flags,
    211			       struct gfs2_holder *gh);
    212extern void gfs2_holder_uninit(struct gfs2_holder *gh);
    213extern int gfs2_glock_nq(struct gfs2_holder *gh);
    214extern int gfs2_glock_poll(struct gfs2_holder *gh);
    215extern int gfs2_instantiate(struct gfs2_holder *gh);
    216extern int gfs2_glock_wait(struct gfs2_holder *gh);
    217extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
    218extern void gfs2_glock_dq(struct gfs2_holder *gh);
    219extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
    220extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
    221extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
    222			     const struct gfs2_glock_operations *glops,
    223			     unsigned int state, u16 flags,
    224			     struct gfs2_holder *gh);
    225extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
    226extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
    227extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
    228			    bool fsid);
    229#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) {		\
    230			gfs2_dump_glock(NULL, gl, true);	\
    231			BUG(); } } while(0)
    232#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) {	\
    233			gfs2_dump_glock(NULL, gl, true);		\
    234			gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
    235	while (0)
    236#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) {	\
    237			gfs2_dump_glock(NULL, gl, true);		\
    238			gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
    239	while (0)
    240
    241extern __printf(2, 3)
    242void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
    243
    244/**
    245 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
    246 * @gl: the glock
    247 * @state: the state we're requesting
    248 * @flags: the modifier flags
    249 * @gh: the holder structure
    250 *
    251 * Returns: 0, GLR_*, or errno
    252 */
    253
    254static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
    255				     unsigned int state, u16 flags,
    256				     struct gfs2_holder *gh)
    257{
    258	int error;
    259
    260	__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
    261
    262	error = gfs2_glock_nq(gh);
    263	if (error)
    264		gfs2_holder_uninit(gh);
    265
    266	return error;
    267}
    268
    269extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
    270extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
    271extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
    272extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
    273extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
    274extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
    275extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
    276extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
    277extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
    278extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
    279extern void gfs2_glock_free(struct gfs2_glock *gl);
    280
    281extern int __init gfs2_glock_init(void);
    282extern void gfs2_glock_exit(void);
    283
    284extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
    285extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
    286extern void gfs2_register_debugfs(void);
    287extern void gfs2_unregister_debugfs(void);
    288
    289extern const struct lm_lockops gfs2_dlm_ops;
    290
    291static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
    292{
    293	gh->gh_gl = NULL;
    294}
    295
    296static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
    297{
    298	return gh->gh_gl;
    299}
    300
    301static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
    302{
    303	return !list_empty(&gh->gh_list);
    304}
    305
    306/**
    307 * glock_set_object - set the gl_object field of a glock
    308 * @gl: the glock
    309 * @object: the object
    310 */
    311static inline void glock_set_object(struct gfs2_glock *gl, void *object)
    312{
    313	spin_lock(&gl->gl_lockref.lock);
    314	if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
    315		gfs2_dump_glock(NULL, gl, true);
    316	gl->gl_object = object;
    317	spin_unlock(&gl->gl_lockref.lock);
    318}
    319
    320/**
    321 * glock_clear_object - clear the gl_object field of a glock
    322 * @gl: the glock
    323 * @object: the object
    324 *
    325 * I'd love to similarly add this:
    326 *	else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
    327 *		gfs2_dump_glock(NULL, gl, true);
    328 * Unfortunately, that's not possible because as soon as gfs2_delete_inode
    329 * frees the block in the rgrp, another process can reassign it for an I_NEW
    330 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
    331 * That means gfs2_delete_inode may subsequently try to call this function
    332 * for a glock that's already pointing to a brand new inode. If we clear the
    333 * new inode's gl_object, we'll introduce metadata corruption. Function
    334 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
    335 * tries to clear gl_object, so it's more than just gfs2_delete_inode.
    336 *
    337 */
    338static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
    339{
    340	spin_lock(&gl->gl_lockref.lock);
    341	if (gl->gl_object == object)
    342		gl->gl_object = NULL;
    343	spin_unlock(&gl->gl_lockref.lock);
    344}
    345
    346static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
    347{
    348	struct gfs2_glock *gl = gh->gh_gl;
    349
    350	spin_lock(&gl->gl_lockref.lock);
    351	set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
    352	spin_unlock(&gl->gl_lockref.lock);
    353}
    354
    355static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
    356{
    357	struct gfs2_glock *gl = gh->gh_gl;
    358
    359	spin_lock(&gl->gl_lockref.lock);
    360	clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
    361	spin_unlock(&gl->gl_lockref.lock);
    362}
    363
    364extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
    365extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
    366
    367#endif /* __GLOCK_DOT_H__ */