cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

clock.h (2506B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_SCHED_CLOCK_H
      3#define _LINUX_SCHED_CLOCK_H
      4
      5#include <linux/smp.h>
      6
      7/*
      8 * Do not use outside of architecture code which knows its limitations.
      9 *
     10 * sched_clock() has no promise of monotonicity or bounded drift between
     11 * CPUs, use (which you should not) requires disabling IRQs.
     12 *
     13 * Please use one of the three interfaces below.
     14 */
     15extern unsigned long long notrace sched_clock(void);
     16
     17/*
     18 * See the comment in kernel/sched/clock.c
     19 */
     20extern u64 running_clock(void);
     21extern u64 sched_clock_cpu(int cpu);
     22
     23
     24extern void sched_clock_init(void);
     25
     26#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
     27static inline void sched_clock_tick(void)
     28{
     29}
     30
     31static inline void clear_sched_clock_stable(void)
     32{
     33}
     34
     35static inline void sched_clock_idle_sleep_event(void)
     36{
     37}
     38
     39static inline void sched_clock_idle_wakeup_event(void)
     40{
     41}
     42
     43static inline u64 cpu_clock(int cpu)
     44{
     45	return sched_clock();
     46}
     47
     48static inline u64 local_clock(void)
     49{
     50	return sched_clock();
     51}
     52#else
     53extern int sched_clock_stable(void);
     54extern void clear_sched_clock_stable(void);
     55
     56/*
     57 * When sched_clock_stable(), __sched_clock_offset provides the offset
     58 * between local_clock() and sched_clock().
     59 */
     60extern u64 __sched_clock_offset;
     61
     62extern void sched_clock_tick(void);
     63extern void sched_clock_tick_stable(void);
     64extern void sched_clock_idle_sleep_event(void);
     65extern void sched_clock_idle_wakeup_event(void);
     66
     67/*
     68 * As outlined in clock.c, provides a fast, high resolution, nanosecond
     69 * time source that is monotonic per cpu argument and has bounded drift
     70 * between cpus.
     71 *
     72 * ######################### BIG FAT WARNING ##########################
     73 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
     74 * # go backwards !!                                                  #
     75 * ####################################################################
     76 */
     77static inline u64 cpu_clock(int cpu)
     78{
     79	return sched_clock_cpu(cpu);
     80}
     81
     82static inline u64 local_clock(void)
     83{
     84	return sched_clock_cpu(raw_smp_processor_id());
     85}
     86#endif
     87
     88#ifdef CONFIG_IRQ_TIME_ACCOUNTING
     89/*
     90 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
     91 * The reason for this explicit opt-in is not to have perf penalty with
     92 * slow sched_clocks.
     93 */
     94extern void enable_sched_clock_irqtime(void);
     95extern void disable_sched_clock_irqtime(void);
     96#else
     97static inline void enable_sched_clock_irqtime(void) {}
     98static inline void disable_sched_clock_irqtime(void) {}
     99#endif
    100
    101#endif /* _LINUX_SCHED_CLOCK_H */