cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vtime.h (4709B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_KERNEL_VTIME_H
      3#define _LINUX_KERNEL_VTIME_H
      4
      5#include <linux/context_tracking_state.h>
      6#include <linux/sched.h>
      7
      8#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
      9#include <asm/vtime.h>
     10#endif
     11
     12/*
     13 * Common vtime APIs
     14 */
     15#ifdef CONFIG_VIRT_CPU_ACCOUNTING
     16extern void vtime_account_kernel(struct task_struct *tsk);
     17extern void vtime_account_idle(struct task_struct *tsk);
     18#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
     19
     20#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
     21extern void arch_vtime_task_switch(struct task_struct *tsk);
     22extern void vtime_user_enter(struct task_struct *tsk);
     23extern void vtime_user_exit(struct task_struct *tsk);
     24extern void vtime_guest_enter(struct task_struct *tsk);
     25extern void vtime_guest_exit(struct task_struct *tsk);
     26extern void vtime_init_idle(struct task_struct *tsk, int cpu);
     27#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
     28static inline void vtime_user_enter(struct task_struct *tsk) { }
     29static inline void vtime_user_exit(struct task_struct *tsk) { }
     30static inline void vtime_guest_enter(struct task_struct *tsk) { }
     31static inline void vtime_guest_exit(struct task_struct *tsk) { }
     32static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
     33#endif
     34
     35#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
     36extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset);
     37extern void vtime_account_softirq(struct task_struct *tsk);
     38extern void vtime_account_hardirq(struct task_struct *tsk);
     39extern void vtime_flush(struct task_struct *tsk);
     40#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
     41static inline void vtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
     42static inline void vtime_account_softirq(struct task_struct *tsk) { }
     43static inline void vtime_account_hardirq(struct task_struct *tsk) { }
     44static inline void vtime_flush(struct task_struct *tsk) { }
     45#endif
     46
     47/*
     48 * vtime_accounting_enabled_this_cpu() definitions/declarations
     49 */
     50#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
     51
     52static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
     53extern void vtime_task_switch(struct task_struct *prev);
     54
     55static __always_inline void vtime_account_guest_enter(void)
     56{
     57	vtime_account_kernel(current);
     58	current->flags |= PF_VCPU;
     59}
     60
     61static __always_inline void vtime_account_guest_exit(void)
     62{
     63	vtime_account_kernel(current);
     64	current->flags &= ~PF_VCPU;
     65}
     66
     67#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
     68
     69/*
     70 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
     71 * in that case and compute the tickless cputime.
     72 * For now vtime state is tied to context tracking. We might want to decouple
     73 * those later if necessary.
     74 */
     75static inline bool vtime_accounting_enabled(void)
     76{
     77	return context_tracking_enabled();
     78}
     79
     80static inline bool vtime_accounting_enabled_cpu(int cpu)
     81{
     82	return context_tracking_enabled_cpu(cpu);
     83}
     84
     85static inline bool vtime_accounting_enabled_this_cpu(void)
     86{
     87	return context_tracking_enabled_this_cpu();
     88}
     89
     90extern void vtime_task_switch_generic(struct task_struct *prev);
     91
     92static inline void vtime_task_switch(struct task_struct *prev)
     93{
     94	if (vtime_accounting_enabled_this_cpu())
     95		vtime_task_switch_generic(prev);
     96}
     97
     98static __always_inline void vtime_account_guest_enter(void)
     99{
    100	if (vtime_accounting_enabled_this_cpu())
    101		vtime_guest_enter(current);
    102	else
    103		current->flags |= PF_VCPU;
    104}
    105
    106static __always_inline void vtime_account_guest_exit(void)
    107{
    108	if (vtime_accounting_enabled_this_cpu())
    109		vtime_guest_exit(current);
    110	else
    111		current->flags &= ~PF_VCPU;
    112}
    113
    114#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
    115
    116static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
    117static inline void vtime_task_switch(struct task_struct *prev) { }
    118
    119static __always_inline void vtime_account_guest_enter(void)
    120{
    121	current->flags |= PF_VCPU;
    122}
    123
    124static __always_inline void vtime_account_guest_exit(void)
    125{
    126	current->flags &= ~PF_VCPU;
    127}
    128
    129#endif
    130
    131
    132#ifdef CONFIG_IRQ_TIME_ACCOUNTING
    133extern void irqtime_account_irq(struct task_struct *tsk, unsigned int offset);
    134#else
    135static inline void irqtime_account_irq(struct task_struct *tsk, unsigned int offset) { }
    136#endif
    137
    138static inline void account_softirq_enter(struct task_struct *tsk)
    139{
    140	vtime_account_irq(tsk, SOFTIRQ_OFFSET);
    141	irqtime_account_irq(tsk, SOFTIRQ_OFFSET);
    142}
    143
    144static inline void account_softirq_exit(struct task_struct *tsk)
    145{
    146	vtime_account_softirq(tsk);
    147	irqtime_account_irq(tsk, 0);
    148}
    149
    150static inline void account_hardirq_enter(struct task_struct *tsk)
    151{
    152	vtime_account_irq(tsk, HARDIRQ_OFFSET);
    153	irqtime_account_irq(tsk, HARDIRQ_OFFSET);
    154}
    155
    156static inline void account_hardirq_exit(struct task_struct *tsk)
    157{
    158	vtime_account_hardirq(tsk);
    159	irqtime_account_irq(tsk, 0);
    160}
    161
    162#endif /* _LINUX_KERNEL_VTIME_H */