cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

freezer.h (9193B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/* Freezer declarations */
      3
      4#ifndef FREEZER_H_INCLUDED
      5#define FREEZER_H_INCLUDED
      6
      7#include <linux/debug_locks.h>
      8#include <linux/sched.h>
      9#include <linux/wait.h>
     10#include <linux/atomic.h>
     11
     12#ifdef CONFIG_FREEZER
     13extern atomic_t system_freezing_cnt;	/* nr of freezing conds in effect */
     14extern bool pm_freezing;		/* PM freezing in effect */
     15extern bool pm_nosig_freezing;		/* PM nosig freezing in effect */
     16
     17/*
     18 * Timeout for stopping processes
     19 */
     20extern unsigned int freeze_timeout_msecs;
     21
     22/*
     23 * Check if a process has been frozen
     24 */
     25static inline bool frozen(struct task_struct *p)
     26{
     27	return p->flags & PF_FROZEN;
     28}
     29
     30extern bool freezing_slow_path(struct task_struct *p);
     31
     32/*
     33 * Check if there is a request to freeze a process
     34 */
     35static inline bool freezing(struct task_struct *p)
     36{
     37	if (likely(!atomic_read(&system_freezing_cnt)))
     38		return false;
     39	return freezing_slow_path(p);
     40}
     41
     42/* Takes and releases task alloc lock using task_lock() */
     43extern void __thaw_task(struct task_struct *t);
     44
     45extern bool __refrigerator(bool check_kthr_stop);
     46extern int freeze_processes(void);
     47extern int freeze_kernel_threads(void);
     48extern void thaw_processes(void);
     49extern void thaw_kernel_threads(void);
     50
     51/*
     52 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
     53 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
     54 */
     55static inline bool try_to_freeze_unsafe(void)
     56{
     57	might_sleep();
     58	if (likely(!freezing(current)))
     59		return false;
     60	return __refrigerator(false);
     61}
     62
     63static inline bool try_to_freeze(void)
     64{
     65	if (!(current->flags & PF_NOFREEZE))
     66		debug_check_no_locks_held();
     67	return try_to_freeze_unsafe();
     68}
     69
     70extern bool freeze_task(struct task_struct *p);
     71extern bool set_freezable(void);
     72
     73#ifdef CONFIG_CGROUP_FREEZER
     74extern bool cgroup_freezing(struct task_struct *task);
     75#else /* !CONFIG_CGROUP_FREEZER */
     76static inline bool cgroup_freezing(struct task_struct *task)
     77{
     78	return false;
     79}
     80#endif /* !CONFIG_CGROUP_FREEZER */
     81
     82/*
     83 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
     84 * calls wait_for_completion(&vfork) and reset right after it returns from this
     85 * function.  Next, the parent should call try_to_freeze() to freeze itself
     86 * appropriately in case the child has exited before the freezing of tasks is
     87 * complete.  However, we don't want kernel threads to be frozen in unexpected
     88 * places, so we allow them to block freeze_processes() instead or to set
     89 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
     90 * parent won't really block freeze_processes(), since ____call_usermodehelper()
     91 * (the child) does a little before exec/exit and it can't be frozen before
     92 * waking up the parent.
     93 */
     94
     95
     96/**
     97 * freezer_do_not_count - tell freezer to ignore %current
     98 *
     99 * Tell freezers to ignore the current task when determining whether the
    100 * target frozen state is reached.  IOW, the current task will be
    101 * considered frozen enough by freezers.
    102 *
    103 * The caller shouldn't do anything which isn't allowed for a frozen task
    104 * until freezer_cont() is called.  Usually, freezer[_do_not]_count() pair
    105 * wrap a scheduling operation and nothing much else.
    106 */
    107static inline void freezer_do_not_count(void)
    108{
    109	current->flags |= PF_FREEZER_SKIP;
    110}
    111
    112/**
    113 * freezer_count - tell freezer to stop ignoring %current
    114 *
    115 * Undo freezer_do_not_count().  It tells freezers that %current should be
    116 * considered again and tries to freeze if freezing condition is already in
    117 * effect.
    118 */
    119static inline void freezer_count(void)
    120{
    121	current->flags &= ~PF_FREEZER_SKIP;
    122	/*
    123	 * If freezing is in progress, the following paired with smp_mb()
    124	 * in freezer_should_skip() ensures that either we see %true
    125	 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
    126	 */
    127	smp_mb();
    128	try_to_freeze();
    129}
    130
    131/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
    132static inline void freezer_count_unsafe(void)
    133{
    134	current->flags &= ~PF_FREEZER_SKIP;
    135	smp_mb();
    136	try_to_freeze_unsafe();
    137}
    138
    139/**
    140 * freezer_should_skip - whether to skip a task when determining frozen
    141 *			 state is reached
    142 * @p: task in quesion
    143 *
    144 * This function is used by freezers after establishing %true freezing() to
    145 * test whether a task should be skipped when determining the target frozen
    146 * state is reached.  IOW, if this function returns %true, @p is considered
    147 * frozen enough.
    148 */
    149static inline bool freezer_should_skip(struct task_struct *p)
    150{
    151	/*
    152	 * The following smp_mb() paired with the one in freezer_count()
    153	 * ensures that either freezer_count() sees %true freezing() or we
    154	 * see cleared %PF_FREEZER_SKIP and return %false.  This makes it
    155	 * impossible for a task to slip frozen state testing after
    156	 * clearing %PF_FREEZER_SKIP.
    157	 */
    158	smp_mb();
    159	return p->flags & PF_FREEZER_SKIP;
    160}
    161
    162/*
    163 * These functions are intended to be used whenever you want allow a sleeping
    164 * task to be frozen. Note that neither return any clear indication of
    165 * whether a freeze event happened while in this function.
    166 */
    167
    168/* Like schedule(), but should not block the freezer. */
    169static inline void freezable_schedule(void)
    170{
    171	freezer_do_not_count();
    172	schedule();
    173	freezer_count();
    174}
    175
    176/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
    177static inline void freezable_schedule_unsafe(void)
    178{
    179	freezer_do_not_count();
    180	schedule();
    181	freezer_count_unsafe();
    182}
    183
    184/*
    185 * Like schedule_timeout(), but should not block the freezer.  Do not
    186 * call this with locks held.
    187 */
    188static inline long freezable_schedule_timeout(long timeout)
    189{
    190	long __retval;
    191	freezer_do_not_count();
    192	__retval = schedule_timeout(timeout);
    193	freezer_count();
    194	return __retval;
    195}
    196
    197/*
    198 * Like schedule_timeout_interruptible(), but should not block the freezer.  Do not
    199 * call this with locks held.
    200 */
    201static inline long freezable_schedule_timeout_interruptible(long timeout)
    202{
    203	long __retval;
    204	freezer_do_not_count();
    205	__retval = schedule_timeout_interruptible(timeout);
    206	freezer_count();
    207	return __retval;
    208}
    209
    210/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
    211static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout)
    212{
    213	long __retval;
    214
    215	freezer_do_not_count();
    216	__retval = schedule_timeout_interruptible(timeout);
    217	freezer_count_unsafe();
    218	return __retval;
    219}
    220
    221/* Like schedule_timeout_killable(), but should not block the freezer. */
    222static inline long freezable_schedule_timeout_killable(long timeout)
    223{
    224	long __retval;
    225	freezer_do_not_count();
    226	__retval = schedule_timeout_killable(timeout);
    227	freezer_count();
    228	return __retval;
    229}
    230
    231/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
    232static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
    233{
    234	long __retval;
    235	freezer_do_not_count();
    236	__retval = schedule_timeout_killable(timeout);
    237	freezer_count_unsafe();
    238	return __retval;
    239}
    240
    241/*
    242 * Like schedule_hrtimeout_range(), but should not block the freezer.  Do not
    243 * call this with locks held.
    244 */
    245static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
    246		u64 delta, const enum hrtimer_mode mode)
    247{
    248	int __retval;
    249	freezer_do_not_count();
    250	__retval = schedule_hrtimeout_range(expires, delta, mode);
    251	freezer_count();
    252	return __retval;
    253}
    254
    255/*
    256 * Freezer-friendly wrappers around wait_event_interruptible(),
    257 * wait_event_killable() and wait_event_interruptible_timeout(), originally
    258 * defined in <linux/wait.h>
    259 */
    260
    261/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
    262#define wait_event_freezekillable_unsafe(wq, condition)			\
    263({									\
    264	int __retval;							\
    265	freezer_do_not_count();						\
    266	__retval = wait_event_killable(wq, (condition));		\
    267	freezer_count_unsafe();						\
    268	__retval;							\
    269})
    270
    271#else /* !CONFIG_FREEZER */
    272static inline bool frozen(struct task_struct *p) { return false; }
    273static inline bool freezing(struct task_struct *p) { return false; }
    274static inline void __thaw_task(struct task_struct *t) {}
    275
    276static inline bool __refrigerator(bool check_kthr_stop) { return false; }
    277static inline int freeze_processes(void) { return -ENOSYS; }
    278static inline int freeze_kernel_threads(void) { return -ENOSYS; }
    279static inline void thaw_processes(void) {}
    280static inline void thaw_kernel_threads(void) {}
    281
    282static inline bool try_to_freeze(void) { return false; }
    283
    284static inline void freezer_do_not_count(void) {}
    285static inline void freezer_count(void) {}
    286static inline int freezer_should_skip(struct task_struct *p) { return 0; }
    287static inline void set_freezable(void) {}
    288
    289#define freezable_schedule()  schedule()
    290
    291#define freezable_schedule_unsafe()  schedule()
    292
    293#define freezable_schedule_timeout(timeout)  schedule_timeout(timeout)
    294
    295#define freezable_schedule_timeout_interruptible(timeout)		\
    296	schedule_timeout_interruptible(timeout)
    297
    298#define freezable_schedule_timeout_interruptible_unsafe(timeout)	\
    299	schedule_timeout_interruptible(timeout)
    300
    301#define freezable_schedule_timeout_killable(timeout)			\
    302	schedule_timeout_killable(timeout)
    303
    304#define freezable_schedule_timeout_killable_unsafe(timeout)		\
    305	schedule_timeout_killable(timeout)
    306
    307#define freezable_schedule_hrtimeout_range(expires, delta, mode)	\
    308	schedule_hrtimeout_range(expires, delta, mode)
    309
    310#define wait_event_freezekillable_unsafe(wq, condition)			\
    311		wait_event_killable(wq, condition)
    312
    313#endif /* !CONFIG_FREEZER */
    314
    315#endif	/* FREEZER_H_INCLUDED */