cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irqflags.h (2785B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _X86_IRQFLAGS_H_
      3#define _X86_IRQFLAGS_H_
      4
      5#include <asm/processor-flags.h>
      6
      7#ifndef __ASSEMBLY__
      8
      9#include <asm/nospec-branch.h>
     10
     11/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
     12#define __cpuidle __section(".cpuidle.text")
     13
     14/*
     15 * Interrupt control:
     16 */
     17
     18/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
     19extern inline unsigned long native_save_fl(void);
     20extern __always_inline unsigned long native_save_fl(void)
     21{
     22	unsigned long flags;
     23
     24	/*
     25	 * "=rm" is safe here, because "pop" adjusts the stack before
     26	 * it evaluates its effective address -- this is part of the
     27	 * documented behavior of the "pop" instruction.
     28	 */
     29	asm volatile("# __raw_save_flags\n\t"
     30		     "pushf ; pop %0"
     31		     : "=rm" (flags)
     32		     : /* no input */
     33		     : "memory");
     34
     35	return flags;
     36}
     37
     38static __always_inline void native_irq_disable(void)
     39{
     40	asm volatile("cli": : :"memory");
     41}
     42
     43static __always_inline void native_irq_enable(void)
     44{
     45	asm volatile("sti": : :"memory");
     46}
     47
     48static inline __cpuidle void native_safe_halt(void)
     49{
     50	mds_idle_clear_cpu_buffers();
     51	asm volatile("sti; hlt": : :"memory");
     52}
     53
     54static inline __cpuidle void native_halt(void)
     55{
     56	mds_idle_clear_cpu_buffers();
     57	asm volatile("hlt": : :"memory");
     58}
     59
     60#endif
     61
     62#ifdef CONFIG_PARAVIRT_XXL
     63#include <asm/paravirt.h>
     64#else
     65#ifndef __ASSEMBLY__
     66#include <linux/types.h>
     67
     68static __always_inline unsigned long arch_local_save_flags(void)
     69{
     70	return native_save_fl();
     71}
     72
     73static __always_inline void arch_local_irq_disable(void)
     74{
     75	native_irq_disable();
     76}
     77
     78static __always_inline void arch_local_irq_enable(void)
     79{
     80	native_irq_enable();
     81}
     82
     83/*
     84 * Used in the idle loop; sti takes one instruction cycle
     85 * to complete:
     86 */
     87static inline __cpuidle void arch_safe_halt(void)
     88{
     89	native_safe_halt();
     90}
     91
     92/*
     93 * Used when interrupts are already enabled or to
     94 * shutdown the processor:
     95 */
     96static inline __cpuidle void halt(void)
     97{
     98	native_halt();
     99}
    100
    101/*
    102 * For spinlocks, etc:
    103 */
    104static __always_inline unsigned long arch_local_irq_save(void)
    105{
    106	unsigned long flags = arch_local_save_flags();
    107	arch_local_irq_disable();
    108	return flags;
    109}
    110#else
    111
    112#ifdef CONFIG_X86_64
    113#ifdef CONFIG_DEBUG_ENTRY
    114#define SAVE_FLAGS		pushfq; popq %rax
    115#endif
    116
    117#endif
    118
    119#endif /* __ASSEMBLY__ */
    120#endif /* CONFIG_PARAVIRT_XXL */
    121
    122#ifndef __ASSEMBLY__
    123static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
    124{
    125	return !(flags & X86_EFLAGS_IF);
    126}
    127
    128static __always_inline int arch_irqs_disabled(void)
    129{
    130	unsigned long flags = arch_local_save_flags();
    131
    132	return arch_irqs_disabled_flags(flags);
    133}
    134
    135static __always_inline void arch_local_irq_restore(unsigned long flags)
    136{
    137	if (!arch_irqs_disabled_flags(flags))
    138		arch_local_irq_enable();
    139}
    140#endif /* !__ASSEMBLY__ */
    141
    142#endif