cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

backing-dev.h (10547B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * include/linux/backing-dev.h
      4 *
      5 * low-level device information and state which is propagated up through
      6 * to high-level code.
      7 */
      8
      9#ifndef _LINUX_BACKING_DEV_H
     10#define _LINUX_BACKING_DEV_H
     11
     12#include <linux/kernel.h>
     13#include <linux/fs.h>
     14#include <linux/sched.h>
     15#include <linux/device.h>
     16#include <linux/writeback.h>
     17#include <linux/backing-dev-defs.h>
     18#include <linux/slab.h>
     19
     20static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
     21{
     22	kref_get(&bdi->refcnt);
     23	return bdi;
     24}
     25
     26struct backing_dev_info *bdi_get_by_id(u64 id);
     27void bdi_put(struct backing_dev_info *bdi);
     28
     29__printf(2, 3)
     30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
     31__printf(2, 0)
     32int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
     33		    va_list args);
     34void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
     35void bdi_unregister(struct backing_dev_info *bdi);
     36
     37struct backing_dev_info *bdi_alloc(int node_id);
     38
     39void wb_start_background_writeback(struct bdi_writeback *wb);
     40void wb_workfn(struct work_struct *work);
     41void wb_wakeup_delayed(struct bdi_writeback *wb);
     42
     43void wb_wait_for_completion(struct wb_completion *done);
     44
     45extern spinlock_t bdi_lock;
     46extern struct list_head bdi_list;
     47
     48extern struct workqueue_struct *bdi_wq;
     49extern struct workqueue_struct *bdi_async_bio_wq;
     50
     51static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
     52{
     53	return test_bit(WB_has_dirty_io, &wb->state);
     54}
     55
     56static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
     57{
     58	/*
     59	 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
     60	 * any dirty wbs.  See wb_update_write_bandwidth().
     61	 */
     62	return atomic_long_read(&bdi->tot_write_bandwidth);
     63}
     64
     65static inline void wb_stat_mod(struct bdi_writeback *wb,
     66				 enum wb_stat_item item, s64 amount)
     67{
     68	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
     69}
     70
     71static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
     72{
     73	wb_stat_mod(wb, item, 1);
     74}
     75
     76static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
     77{
     78	wb_stat_mod(wb, item, -1);
     79}
     80
     81static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
     82{
     83	return percpu_counter_read_positive(&wb->stat[item]);
     84}
     85
     86static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
     87{
     88	return percpu_counter_sum_positive(&wb->stat[item]);
     89}
     90
     91extern void wb_writeout_inc(struct bdi_writeback *wb);
     92
     93/*
     94 * maximal error of a stat counter.
     95 */
     96static inline unsigned long wb_stat_error(void)
     97{
     98#ifdef CONFIG_SMP
     99	return nr_cpu_ids * WB_STAT_BATCH;
    100#else
    101	return 1;
    102#endif
    103}
    104
    105int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
    106int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
    107
    108/*
    109 * Flags in backing_dev_info::capability
    110 *
    111 * BDI_CAP_WRITEBACK:		Supports dirty page writeback, and dirty pages
    112 *				should contribute to accounting
    113 * BDI_CAP_WRITEBACK_ACCT:	Automatically account writeback pages
    114 * BDI_CAP_STRICTLIMIT:		Keep number of dirty pages below bdi threshold
    115 */
    116#define BDI_CAP_WRITEBACK		(1 << 0)
    117#define BDI_CAP_WRITEBACK_ACCT		(1 << 1)
    118#define BDI_CAP_STRICTLIMIT		(1 << 2)
    119
    120extern struct backing_dev_info noop_backing_dev_info;
    121
    122int bdi_init(struct backing_dev_info *bdi);
    123
    124/**
    125 * writeback_in_progress - determine whether there is writeback in progress
    126 * @wb: bdi_writeback of interest
    127 *
    128 * Determine whether there is writeback waiting to be handled against a
    129 * bdi_writeback.
    130 */
    131static inline bool writeback_in_progress(struct bdi_writeback *wb)
    132{
    133	return test_bit(WB_writeback_running, &wb->state);
    134}
    135
    136struct backing_dev_info *inode_to_bdi(struct inode *inode);
    137
    138static inline bool mapping_can_writeback(struct address_space *mapping)
    139{
    140	return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
    141}
    142
    143static inline int bdi_sched_wait(void *word)
    144{
    145	schedule();
    146	return 0;
    147}
    148
    149#ifdef CONFIG_CGROUP_WRITEBACK
    150
    151struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
    152				    struct cgroup_subsys_state *memcg_css);
    153struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
    154				    struct cgroup_subsys_state *memcg_css,
    155				    gfp_t gfp);
    156void wb_memcg_offline(struct mem_cgroup *memcg);
    157void wb_blkcg_offline(struct cgroup_subsys_state *css);
    158
    159/**
    160 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
    161 * @inode: inode of interest
    162 *
    163 * Cgroup writeback requires support from the filesystem.  Also, both memcg and
    164 * iocg have to be on the default hierarchy.  Test whether all conditions are
    165 * met.
    166 *
    167 * Note that the test result may change dynamically on the same inode
    168 * depending on how memcg and iocg are configured.
    169 */
    170static inline bool inode_cgwb_enabled(struct inode *inode)
    171{
    172	struct backing_dev_info *bdi = inode_to_bdi(inode);
    173
    174	return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
    175		cgroup_subsys_on_dfl(io_cgrp_subsys) &&
    176		(bdi->capabilities & BDI_CAP_WRITEBACK) &&
    177		(inode->i_sb->s_iflags & SB_I_CGROUPWB);
    178}
    179
    180/**
    181 * wb_find_current - find wb for %current on a bdi
    182 * @bdi: bdi of interest
    183 *
    184 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
    185 * Must be called under rcu_read_lock() which protects the returend wb.
    186 * NULL if not found.
    187 */
    188static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
    189{
    190	struct cgroup_subsys_state *memcg_css;
    191	struct bdi_writeback *wb;
    192
    193	memcg_css = task_css(current, memory_cgrp_id);
    194	if (!memcg_css->parent)
    195		return &bdi->wb;
    196
    197	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
    198
    199	/*
    200	 * %current's blkcg equals the effective blkcg of its memcg.  No
    201	 * need to use the relatively expensive cgroup_get_e_css().
    202	 */
    203	if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
    204		return wb;
    205	return NULL;
    206}
    207
    208/**
    209 * wb_get_create_current - get or create wb for %current on a bdi
    210 * @bdi: bdi of interest
    211 * @gfp: allocation mask
    212 *
    213 * Equivalent to wb_get_create() on %current's memcg.  This function is
    214 * called from a relatively hot path and optimizes the common cases using
    215 * wb_find_current().
    216 */
    217static inline struct bdi_writeback *
    218wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
    219{
    220	struct bdi_writeback *wb;
    221
    222	rcu_read_lock();
    223	wb = wb_find_current(bdi);
    224	if (wb && unlikely(!wb_tryget(wb)))
    225		wb = NULL;
    226	rcu_read_unlock();
    227
    228	if (unlikely(!wb)) {
    229		struct cgroup_subsys_state *memcg_css;
    230
    231		memcg_css = task_get_css(current, memory_cgrp_id);
    232		wb = wb_get_create(bdi, memcg_css, gfp);
    233		css_put(memcg_css);
    234	}
    235	return wb;
    236}
    237
    238/**
    239 * inode_to_wb_is_valid - test whether an inode has a wb associated
    240 * @inode: inode of interest
    241 *
    242 * Returns %true if @inode has a wb associated.  May be called without any
    243 * locking.
    244 */
    245static inline bool inode_to_wb_is_valid(struct inode *inode)
    246{
    247	return inode->i_wb;
    248}
    249
    250/**
    251 * inode_to_wb - determine the wb of an inode
    252 * @inode: inode of interest
    253 *
    254 * Returns the wb @inode is currently associated with.  The caller must be
    255 * holding either @inode->i_lock, the i_pages lock, or the
    256 * associated wb's list_lock.
    257 */
    258static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
    259{
    260#ifdef CONFIG_LOCKDEP
    261	WARN_ON_ONCE(debug_locks &&
    262		     (!lockdep_is_held(&inode->i_lock) &&
    263		      !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
    264		      !lockdep_is_held(&inode->i_wb->list_lock)));
    265#endif
    266	return inode->i_wb;
    267}
    268
    269static inline struct bdi_writeback *inode_to_wb_wbc(
    270				struct inode *inode,
    271				struct writeback_control *wbc)
    272{
    273	/*
    274	 * If wbc does not have inode attached, it means cgroup writeback was
    275	 * disabled when wbc started. Just use the default wb in that case.
    276	 */
    277	return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
    278}
    279
    280/**
    281 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
    282 * @inode: target inode
    283 * @cookie: output param, to be passed to the end function
    284 *
    285 * The caller wants to access the wb associated with @inode but isn't
    286 * holding inode->i_lock, the i_pages lock or wb->list_lock.  This
    287 * function determines the wb associated with @inode and ensures that the
    288 * association doesn't change until the transaction is finished with
    289 * unlocked_inode_to_wb_end().
    290 *
    291 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
    292 * can't sleep during the transaction.  IRQs may or may not be disabled on
    293 * return.
    294 */
    295static inline struct bdi_writeback *
    296unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
    297{
    298	rcu_read_lock();
    299
    300	/*
    301	 * Paired with store_release in inode_switch_wbs_work_fn() and
    302	 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
    303	 */
    304	cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
    305
    306	if (unlikely(cookie->locked))
    307		xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
    308
    309	/*
    310	 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
    311	 * lock.  inode_to_wb() will bark.  Deref directly.
    312	 */
    313	return inode->i_wb;
    314}
    315
    316/**
    317 * unlocked_inode_to_wb_end - end inode wb access transaction
    318 * @inode: target inode
    319 * @cookie: @cookie from unlocked_inode_to_wb_begin()
    320 */
    321static inline void unlocked_inode_to_wb_end(struct inode *inode,
    322					    struct wb_lock_cookie *cookie)
    323{
    324	if (unlikely(cookie->locked))
    325		xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
    326
    327	rcu_read_unlock();
    328}
    329
    330#else	/* CONFIG_CGROUP_WRITEBACK */
    331
    332static inline bool inode_cgwb_enabled(struct inode *inode)
    333{
    334	return false;
    335}
    336
    337static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
    338{
    339	return &bdi->wb;
    340}
    341
    342static inline struct bdi_writeback *
    343wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
    344{
    345	return &bdi->wb;
    346}
    347
    348static inline bool inode_to_wb_is_valid(struct inode *inode)
    349{
    350	return true;
    351}
    352
    353static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
    354{
    355	return &inode_to_bdi(inode)->wb;
    356}
    357
    358static inline struct bdi_writeback *inode_to_wb_wbc(
    359				struct inode *inode,
    360				struct writeback_control *wbc)
    361{
    362	return inode_to_wb(inode);
    363}
    364
    365
    366static inline struct bdi_writeback *
    367unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
    368{
    369	return inode_to_wb(inode);
    370}
    371
    372static inline void unlocked_inode_to_wb_end(struct inode *inode,
    373					    struct wb_lock_cookie *cookie)
    374{
    375}
    376
    377static inline void wb_memcg_offline(struct mem_cgroup *memcg)
    378{
    379}
    380
    381static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
    382{
    383}
    384
    385#endif	/* CONFIG_CGROUP_WRITEBACK */
    386
    387const char *bdi_dev_name(struct backing_dev_info *bdi);
    388
    389#endif	/* _LINUX_BACKING_DEV_H */