cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stats.h (9178B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _KERNEL_STATS_H
      3#define _KERNEL_STATS_H
      4
      5#ifdef CONFIG_SCHEDSTATS
      6
      7extern struct static_key_false sched_schedstats;
      8
      9/*
     10 * Expects runqueue lock to be held for atomicity of update
     11 */
     12static inline void
     13rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
     14{
     15	if (rq) {
     16		rq->rq_sched_info.run_delay += delta;
     17		rq->rq_sched_info.pcount++;
     18	}
     19}
     20
     21/*
     22 * Expects runqueue lock to be held for atomicity of update
     23 */
     24static inline void
     25rq_sched_info_depart(struct rq *rq, unsigned long long delta)
     26{
     27	if (rq)
     28		rq->rq_cpu_time += delta;
     29}
     30
     31static inline void
     32rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
     33{
     34	if (rq)
     35		rq->rq_sched_info.run_delay += delta;
     36}
     37#define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
     38#define __schedstat_inc(var)		do { var++; } while (0)
     39#define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
     40#define __schedstat_add(var, amt)	do { var += (amt); } while (0)
     41#define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
     42#define __schedstat_set(var, val)	do { var = (val); } while (0)
     43#define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
     44#define   schedstat_val(var)		(var)
     45#define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
     46
     47void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
     48			       struct sched_statistics *stats);
     49
     50void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
     51			     struct sched_statistics *stats);
     52void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
     53				    struct sched_statistics *stats);
     54
     55static inline void
     56check_schedstat_required(void)
     57{
     58	if (schedstat_enabled())
     59		return;
     60
     61	/* Force schedstat enabled if a dependent tracepoint is active */
     62	if (trace_sched_stat_wait_enabled()    ||
     63	    trace_sched_stat_sleep_enabled()   ||
     64	    trace_sched_stat_iowait_enabled()  ||
     65	    trace_sched_stat_blocked_enabled() ||
     66	    trace_sched_stat_runtime_enabled())
     67		printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
     68}
     69
     70#else /* !CONFIG_SCHEDSTATS: */
     71
     72static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
     73static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
     74static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
     75# define   schedstat_enabled()		0
     76# define __schedstat_inc(var)		do { } while (0)
     77# define   schedstat_inc(var)		do { } while (0)
     78# define __schedstat_add(var, amt)	do { } while (0)
     79# define   schedstat_add(var, amt)	do { } while (0)
     80# define __schedstat_set(var, val)	do { } while (0)
     81# define   schedstat_set(var, val)	do { } while (0)
     82# define   schedstat_val(var)		0
     83# define   schedstat_val_or_zero(var)	0
     84
     85# define __update_stats_wait_start(rq, p, stats)       do { } while (0)
     86# define __update_stats_wait_end(rq, p, stats)         do { } while (0)
     87# define __update_stats_enqueue_sleeper(rq, p, stats)  do { } while (0)
     88# define check_schedstat_required()                    do { } while (0)
     89
     90#endif /* CONFIG_SCHEDSTATS */
     91
     92#ifdef CONFIG_FAIR_GROUP_SCHED
     93struct sched_entity_stats {
     94	struct sched_entity     se;
     95	struct sched_statistics stats;
     96} __no_randomize_layout;
     97#endif
     98
     99static inline struct sched_statistics *
    100__schedstats_from_se(struct sched_entity *se)
    101{
    102#ifdef CONFIG_FAIR_GROUP_SCHED
    103	if (!entity_is_task(se))
    104		return &container_of(se, struct sched_entity_stats, se)->stats;
    105#endif
    106	return &task_of(se)->stats;
    107}
    108
    109#ifdef CONFIG_PSI
    110/*
    111 * PSI tracks state that persists across sleeps, such as iowaits and
    112 * memory stalls. As a result, it has to distinguish between sleeps,
    113 * where a task's runnable state changes, and requeues, where a task
    114 * and its state are being moved between CPUs and runqueues.
    115 */
    116static inline void psi_enqueue(struct task_struct *p, bool wakeup)
    117{
    118	int clear = 0, set = TSK_RUNNING;
    119
    120	if (static_branch_likely(&psi_disabled))
    121		return;
    122
    123	if (p->in_memstall)
    124		set |= TSK_MEMSTALL_RUNNING;
    125
    126	if (!wakeup || p->sched_psi_wake_requeue) {
    127		if (p->in_memstall)
    128			set |= TSK_MEMSTALL;
    129		if (p->sched_psi_wake_requeue)
    130			p->sched_psi_wake_requeue = 0;
    131	} else {
    132		if (p->in_iowait)
    133			clear |= TSK_IOWAIT;
    134	}
    135
    136	psi_task_change(p, clear, set);
    137}
    138
    139static inline void psi_dequeue(struct task_struct *p, bool sleep)
    140{
    141	int clear = TSK_RUNNING;
    142
    143	if (static_branch_likely(&psi_disabled))
    144		return;
    145
    146	/*
    147	 * A voluntary sleep is a dequeue followed by a task switch. To
    148	 * avoid walking all ancestors twice, psi_task_switch() handles
    149	 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
    150	 * Do nothing here.
    151	 */
    152	if (sleep)
    153		return;
    154
    155	if (p->in_memstall)
    156		clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
    157
    158	psi_task_change(p, clear, 0);
    159}
    160
    161static inline void psi_ttwu_dequeue(struct task_struct *p)
    162{
    163	if (static_branch_likely(&psi_disabled))
    164		return;
    165	/*
    166	 * Is the task being migrated during a wakeup? Make sure to
    167	 * deregister its sleep-persistent psi states from the old
    168	 * queue, and let psi_enqueue() know it has to requeue.
    169	 */
    170	if (unlikely(p->in_iowait || p->in_memstall)) {
    171		struct rq_flags rf;
    172		struct rq *rq;
    173		int clear = 0;
    174
    175		if (p->in_iowait)
    176			clear |= TSK_IOWAIT;
    177		if (p->in_memstall)
    178			clear |= TSK_MEMSTALL;
    179
    180		rq = __task_rq_lock(p, &rf);
    181		psi_task_change(p, clear, 0);
    182		p->sched_psi_wake_requeue = 1;
    183		__task_rq_unlock(rq, &rf);
    184	}
    185}
    186
    187static inline void psi_sched_switch(struct task_struct *prev,
    188				    struct task_struct *next,
    189				    bool sleep)
    190{
    191	if (static_branch_likely(&psi_disabled))
    192		return;
    193
    194	psi_task_switch(prev, next, sleep);
    195}
    196
    197#else /* CONFIG_PSI */
    198static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
    199static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
    200static inline void psi_ttwu_dequeue(struct task_struct *p) {}
    201static inline void psi_sched_switch(struct task_struct *prev,
    202				    struct task_struct *next,
    203				    bool sleep) {}
    204#endif /* CONFIG_PSI */
    205
    206#ifdef CONFIG_SCHED_INFO
    207/*
    208 * We are interested in knowing how long it was from the *first* time a
    209 * task was queued to the time that it finally hit a CPU, we call this routine
    210 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
    211 * delta taken on each CPU would annul the skew.
    212 */
    213static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
    214{
    215	unsigned long long delta = 0;
    216
    217	if (!t->sched_info.last_queued)
    218		return;
    219
    220	delta = rq_clock(rq) - t->sched_info.last_queued;
    221	t->sched_info.last_queued = 0;
    222	t->sched_info.run_delay += delta;
    223
    224	rq_sched_info_dequeue(rq, delta);
    225}
    226
    227/*
    228 * Called when a task finally hits the CPU.  We can now calculate how
    229 * long it was waiting to run.  We also note when it began so that we
    230 * can keep stats on how long its timeslice is.
    231 */
    232static void sched_info_arrive(struct rq *rq, struct task_struct *t)
    233{
    234	unsigned long long now, delta = 0;
    235
    236	if (!t->sched_info.last_queued)
    237		return;
    238
    239	now = rq_clock(rq);
    240	delta = now - t->sched_info.last_queued;
    241	t->sched_info.last_queued = 0;
    242	t->sched_info.run_delay += delta;
    243	t->sched_info.last_arrival = now;
    244	t->sched_info.pcount++;
    245
    246	rq_sched_info_arrive(rq, delta);
    247}
    248
    249/*
    250 * This function is only called from enqueue_task(), but also only updates
    251 * the timestamp if it is already not set.  It's assumed that
    252 * sched_info_dequeue() will clear that stamp when appropriate.
    253 */
    254static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
    255{
    256	if (!t->sched_info.last_queued)
    257		t->sched_info.last_queued = rq_clock(rq);
    258}
    259
    260/*
    261 * Called when a process ceases being the active-running process involuntarily
    262 * due, typically, to expiring its time slice (this may also be called when
    263 * switching to the idle task).  Now we can calculate how long we ran.
    264 * Also, if the process is still in the TASK_RUNNING state, call
    265 * sched_info_enqueue() to mark that it has now again started waiting on
    266 * the runqueue.
    267 */
    268static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
    269{
    270	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
    271
    272	rq_sched_info_depart(rq, delta);
    273
    274	if (task_is_running(t))
    275		sched_info_enqueue(rq, t);
    276}
    277
    278/*
    279 * Called when tasks are switched involuntarily due, typically, to expiring
    280 * their time slice.  (This may also be called when switching to or from
    281 * the idle task.)  We are only called when prev != next.
    282 */
    283static inline void
    284sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
    285{
    286	/*
    287	 * prev now departs the CPU.  It's not interesting to record
    288	 * stats about how efficient we were at scheduling the idle
    289	 * process, however.
    290	 */
    291	if (prev != rq->idle)
    292		sched_info_depart(rq, prev);
    293
    294	if (next != rq->idle)
    295		sched_info_arrive(rq, next);
    296}
    297
    298#else /* !CONFIG_SCHED_INFO: */
    299# define sched_info_enqueue(rq, t)	do { } while (0)
    300# define sched_info_dequeue(rq, t)	do { } while (0)
    301# define sched_info_switch(rq, t, next)	do { } while (0)
    302#endif /* CONFIG_SCHED_INFO */
    303
    304#endif /* _KERNEL_STATS_H */