cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stats.c (5620B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * /proc/schedstat implementation
      4 */
      5
      6void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
      7			       struct sched_statistics *stats)
      8{
      9	u64 wait_start, prev_wait_start;
     10
     11	wait_start = rq_clock(rq);
     12	prev_wait_start = schedstat_val(stats->wait_start);
     13
     14	if (p && likely(wait_start > prev_wait_start))
     15		wait_start -= prev_wait_start;
     16
     17	__schedstat_set(stats->wait_start, wait_start);
     18}
     19
     20void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
     21			     struct sched_statistics *stats)
     22{
     23	u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
     24
     25	if (p) {
     26		if (task_on_rq_migrating(p)) {
     27			/*
     28			 * Preserve migrating task's wait time so wait_start
     29			 * time stamp can be adjusted to accumulate wait time
     30			 * prior to migration.
     31			 */
     32			__schedstat_set(stats->wait_start, delta);
     33
     34			return;
     35		}
     36
     37		trace_sched_stat_wait(p, delta);
     38	}
     39
     40	__schedstat_set(stats->wait_max,
     41			max(schedstat_val(stats->wait_max), delta));
     42	__schedstat_inc(stats->wait_count);
     43	__schedstat_add(stats->wait_sum, delta);
     44	__schedstat_set(stats->wait_start, 0);
     45}
     46
     47void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
     48				    struct sched_statistics *stats)
     49{
     50	u64 sleep_start, block_start;
     51
     52	sleep_start = schedstat_val(stats->sleep_start);
     53	block_start = schedstat_val(stats->block_start);
     54
     55	if (sleep_start) {
     56		u64 delta = rq_clock(rq) - sleep_start;
     57
     58		if ((s64)delta < 0)
     59			delta = 0;
     60
     61		if (unlikely(delta > schedstat_val(stats->sleep_max)))
     62			__schedstat_set(stats->sleep_max, delta);
     63
     64		__schedstat_set(stats->sleep_start, 0);
     65		__schedstat_add(stats->sum_sleep_runtime, delta);
     66
     67		if (p) {
     68			account_scheduler_latency(p, delta >> 10, 1);
     69			trace_sched_stat_sleep(p, delta);
     70		}
     71	}
     72
     73	if (block_start) {
     74		u64 delta = rq_clock(rq) - block_start;
     75
     76		if ((s64)delta < 0)
     77			delta = 0;
     78
     79		if (unlikely(delta > schedstat_val(stats->block_max)))
     80			__schedstat_set(stats->block_max, delta);
     81
     82		__schedstat_set(stats->block_start, 0);
     83		__schedstat_add(stats->sum_sleep_runtime, delta);
     84		__schedstat_add(stats->sum_block_runtime, delta);
     85
     86		if (p) {
     87			if (p->in_iowait) {
     88				__schedstat_add(stats->iowait_sum, delta);
     89				__schedstat_inc(stats->iowait_count);
     90				trace_sched_stat_iowait(p, delta);
     91			}
     92
     93			trace_sched_stat_blocked(p, delta);
     94
     95			/*
     96			 * Blocking time is in units of nanosecs, so shift by
     97			 * 20 to get a milliseconds-range estimation of the
     98			 * amount of time that the task spent sleeping:
     99			 */
    100			if (unlikely(prof_on == SLEEP_PROFILING)) {
    101				profile_hits(SLEEP_PROFILING,
    102					     (void *)get_wchan(p),
    103					     delta >> 20);
    104			}
    105			account_scheduler_latency(p, delta >> 10, 0);
    106		}
    107	}
    108}
    109
    110/*
    111 * Current schedstat API version.
    112 *
    113 * Bump this up when changing the output format or the meaning of an existing
    114 * format, so that tools can adapt (or abort)
    115 */
    116#define SCHEDSTAT_VERSION 15
    117
    118static int show_schedstat(struct seq_file *seq, void *v)
    119{
    120	int cpu;
    121
    122	if (v == (void *)1) {
    123		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
    124		seq_printf(seq, "timestamp %lu\n", jiffies);
    125	} else {
    126		struct rq *rq;
    127#ifdef CONFIG_SMP
    128		struct sched_domain *sd;
    129		int dcount = 0;
    130#endif
    131		cpu = (unsigned long)(v - 2);
    132		rq = cpu_rq(cpu);
    133
    134		/* runqueue-specific stats */
    135		seq_printf(seq,
    136		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
    137		    cpu, rq->yld_count,
    138		    rq->sched_count, rq->sched_goidle,
    139		    rq->ttwu_count, rq->ttwu_local,
    140		    rq->rq_cpu_time,
    141		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
    142
    143		seq_printf(seq, "\n");
    144
    145#ifdef CONFIG_SMP
    146		/* domain-specific stats */
    147		rcu_read_lock();
    148		for_each_domain(cpu, sd) {
    149			enum cpu_idle_type itype;
    150
    151			seq_printf(seq, "domain%d %*pb", dcount++,
    152				   cpumask_pr_args(sched_domain_span(sd)));
    153			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
    154					itype++) {
    155				seq_printf(seq, " %u %u %u %u %u %u %u %u",
    156				    sd->lb_count[itype],
    157				    sd->lb_balanced[itype],
    158				    sd->lb_failed[itype],
    159				    sd->lb_imbalance[itype],
    160				    sd->lb_gained[itype],
    161				    sd->lb_hot_gained[itype],
    162				    sd->lb_nobusyq[itype],
    163				    sd->lb_nobusyg[itype]);
    164			}
    165			seq_printf(seq,
    166				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
    167			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
    168			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
    169			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
    170			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
    171			    sd->ttwu_move_balance);
    172		}
    173		rcu_read_unlock();
    174#endif
    175	}
    176	return 0;
    177}
    178
    179/*
    180 * This iterator needs some explanation.
    181 * It returns 1 for the header position.
    182 * This means 2 is cpu 0.
    183 * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
    184 * to use cpumask_* to iterate over the CPUs.
    185 */
    186static void *schedstat_start(struct seq_file *file, loff_t *offset)
    187{
    188	unsigned long n = *offset;
    189
    190	if (n == 0)
    191		return (void *) 1;
    192
    193	n--;
    194
    195	if (n > 0)
    196		n = cpumask_next(n - 1, cpu_online_mask);
    197	else
    198		n = cpumask_first(cpu_online_mask);
    199
    200	*offset = n + 1;
    201
    202	if (n < nr_cpu_ids)
    203		return (void *)(unsigned long)(n + 2);
    204
    205	return NULL;
    206}
    207
    208static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
    209{
    210	(*offset)++;
    211
    212	return schedstat_start(file, offset);
    213}
    214
    215static void schedstat_stop(struct seq_file *file, void *data)
    216{
    217}
    218
    219static const struct seq_operations schedstat_sops = {
    220	.start = schedstat_start,
    221	.next  = schedstat_next,
    222	.stop  = schedstat_stop,
    223	.show  = show_schedstat,
    224};
    225
    226static int __init proc_schedstat_init(void)
    227{
    228	proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
    229	return 0;
    230}
    231subsys_initcall(proc_schedstat_init);