cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hw_irq.h (12131B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
      4 */
      5#ifndef _ASM_POWERPC_HW_IRQ_H
      6#define _ASM_POWERPC_HW_IRQ_H
      7
      8#ifdef __KERNEL__
      9
     10#include <linux/errno.h>
     11#include <linux/compiler.h>
     12#include <asm/ptrace.h>
     13#include <asm/processor.h>
     14
     15#ifdef CONFIG_PPC64
     16
     17/*
     18 * PACA flags in paca->irq_happened.
     19 *
     20 * This bits are set when interrupts occur while soft-disabled
     21 * and allow a proper replay.
     22 *
     23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
     24 * always in synch with the MSR[EE] state, except:
     25 * - A window in interrupt entry, where hardware disables MSR[EE] and that
     26 *   must be "reconciled" with the soft mask state.
     27 * - NMI interrupts that hit in awkward places, until they fix the state.
     28 * - When local irqs are being enabled and state is being fixed up.
     29 * - When returning from an interrupt there are some windows where this
     30 *   can become out of synch, but gets fixed before the RFI or before
     31 *   executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
     32 */
     33#define PACA_IRQ_HARD_DIS	0x01
     34#define PACA_IRQ_DBELL		0x02
     35#define PACA_IRQ_EE		0x04
     36#define PACA_IRQ_DEC		0x08 /* Or FIT */
     37#define PACA_IRQ_HMI		0x10
     38#define PACA_IRQ_PMI		0x20
     39
     40/*
     41 * Some soft-masked interrupts must be hard masked until they are replayed
     42 * (e.g., because the soft-masked handler does not clear the exception).
     43 */
     44#ifdef CONFIG_PPC_BOOK3S
     45#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
     46#else
     47#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
     48#endif
     49
     50#endif /* CONFIG_PPC64 */
     51
     52/*
     53 * flags for paca->irq_soft_mask
     54 */
     55#define IRQS_ENABLED		0
     56#define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
     57#define IRQS_PMI_DISABLED	2
     58#define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
     59
     60#ifndef __ASSEMBLY__
     61
     62static inline void __hard_irq_enable(void)
     63{
     64	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
     65		wrtee(MSR_EE);
     66	else if (IS_ENABLED(CONFIG_PPC_8xx))
     67		wrtspr(SPRN_EIE);
     68	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
     69		__mtmsrd(MSR_EE | MSR_RI, 1);
     70	else
     71		mtmsr(mfmsr() | MSR_EE);
     72}
     73
     74static inline void __hard_irq_disable(void)
     75{
     76	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
     77		wrtee(0);
     78	else if (IS_ENABLED(CONFIG_PPC_8xx))
     79		wrtspr(SPRN_EID);
     80	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
     81		__mtmsrd(MSR_RI, 1);
     82	else
     83		mtmsr(mfmsr() & ~MSR_EE);
     84}
     85
     86static inline void __hard_EE_RI_disable(void)
     87{
     88	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
     89		wrtee(0);
     90	else if (IS_ENABLED(CONFIG_PPC_8xx))
     91		wrtspr(SPRN_NRI);
     92	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
     93		__mtmsrd(0, 1);
     94	else
     95		mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
     96}
     97
     98static inline void __hard_RI_enable(void)
     99{
    100	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
    101		return;
    102
    103	if (IS_ENABLED(CONFIG_PPC_8xx))
    104		wrtspr(SPRN_EID);
    105	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
    106		__mtmsrd(MSR_RI, 1);
    107	else
    108		mtmsr(mfmsr() | MSR_RI);
    109}
    110
    111#ifdef CONFIG_PPC64
    112#include <asm/paca.h>
    113
    114static inline notrace unsigned long irq_soft_mask_return(void)
    115{
    116	unsigned long flags;
    117
    118	asm volatile(
    119		"lbz %0,%1(13)"
    120		: "=r" (flags)
    121		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
    122
    123	return flags;
    124}
    125
    126/*
    127 * The "memory" clobber acts as both a compiler barrier
    128 * for the critical section and as a clobber because
    129 * we changed paca->irq_soft_mask
    130 */
    131static inline notrace void irq_soft_mask_set(unsigned long mask)
    132{
    133#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
    134	/*
    135	 * The irq mask must always include the STD bit if any are set.
    136	 *
    137	 * and interrupts don't get replayed until the standard
    138	 * interrupt (local_irq_disable()) is unmasked.
    139	 *
    140	 * Other masks must only provide additional masking beyond
    141	 * the standard, and they are also not replayed until the
    142	 * standard interrupt becomes unmasked.
    143	 *
    144	 * This could be changed, but it will require partial
    145	 * unmasks to be replayed, among other things. For now, take
    146	 * the simple approach.
    147	 */
    148	WARN_ON(mask && !(mask & IRQS_DISABLED));
    149#endif
    150
    151	asm volatile(
    152		"stb %0,%1(13)"
    153		:
    154		: "r" (mask),
    155		  "i" (offsetof(struct paca_struct, irq_soft_mask))
    156		: "memory");
    157}
    158
    159static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
    160{
    161	unsigned long flags;
    162
    163#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
    164	WARN_ON(mask && !(mask & IRQS_DISABLED));
    165#endif
    166
    167	asm volatile(
    168		"lbz %0,%1(13); stb %2,%1(13)"
    169		: "=&r" (flags)
    170		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
    171		  "r" (mask)
    172		: "memory");
    173
    174	return flags;
    175}
    176
    177static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
    178{
    179	unsigned long flags, tmp;
    180
    181	asm volatile(
    182		"lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
    183		: "=&r" (flags), "=r" (tmp)
    184		: "i" (offsetof(struct paca_struct, irq_soft_mask)),
    185		  "r" (mask)
    186		: "memory");
    187
    188#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
    189	WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
    190#endif
    191
    192	return flags;
    193}
    194
    195static inline unsigned long arch_local_save_flags(void)
    196{
    197	return irq_soft_mask_return();
    198}
    199
    200static inline void arch_local_irq_disable(void)
    201{
    202	irq_soft_mask_set(IRQS_DISABLED);
    203}
    204
    205extern void arch_local_irq_restore(unsigned long);
    206
    207static inline void arch_local_irq_enable(void)
    208{
    209	arch_local_irq_restore(IRQS_ENABLED);
    210}
    211
    212static inline unsigned long arch_local_irq_save(void)
    213{
    214	return irq_soft_mask_set_return(IRQS_DISABLED);
    215}
    216
    217static inline bool arch_irqs_disabled_flags(unsigned long flags)
    218{
    219	return flags & IRQS_DISABLED;
    220}
    221
    222static inline bool arch_irqs_disabled(void)
    223{
    224	return arch_irqs_disabled_flags(arch_local_save_flags());
    225}
    226
    227static inline void set_pmi_irq_pending(void)
    228{
    229	/*
    230	 * Invoked from PMU callback functions to set PMI bit in the paca.
    231	 * This has to be called with irq's disabled (via hard_irq_disable()).
    232	 */
    233	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
    234		WARN_ON_ONCE(mfmsr() & MSR_EE);
    235
    236	get_paca()->irq_happened |= PACA_IRQ_PMI;
    237}
    238
    239static inline void clear_pmi_irq_pending(void)
    240{
    241	/*
    242	 * Invoked from PMU callback functions to clear the pending PMI bit
    243	 * in the paca.
    244	 */
    245	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
    246		WARN_ON_ONCE(mfmsr() & MSR_EE);
    247
    248	get_paca()->irq_happened &= ~PACA_IRQ_PMI;
    249}
    250
    251static inline bool pmi_irq_pending(void)
    252{
    253	/*
    254	 * Invoked from PMU callback functions to check if there is a pending
    255	 * PMI bit in the paca.
    256	 */
    257	if (get_paca()->irq_happened & PACA_IRQ_PMI)
    258		return true;
    259
    260	return false;
    261}
    262
    263#ifdef CONFIG_PPC_BOOK3S
    264/*
    265 * To support disabling and enabling of irq with PMI, set of
    266 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
    267 * functions are added. These macros are implemented using generic
    268 * linux local_irq_* code from include/linux/irqflags.h.
    269 */
    270#define raw_local_irq_pmu_save(flags)					\
    271	do {								\
    272		typecheck(unsigned long, flags);			\
    273		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
    274				IRQS_PMI_DISABLED);			\
    275	} while(0)
    276
    277#define raw_local_irq_pmu_restore(flags)				\
    278	do {								\
    279		typecheck(unsigned long, flags);			\
    280		arch_local_irq_restore(flags);				\
    281	} while(0)
    282
    283#ifdef CONFIG_TRACE_IRQFLAGS
    284#define powerpc_local_irq_pmu_save(flags)			\
    285	 do {							\
    286		raw_local_irq_pmu_save(flags);			\
    287		if (!raw_irqs_disabled_flags(flags))		\
    288			trace_hardirqs_off();			\
    289	} while(0)
    290#define powerpc_local_irq_pmu_restore(flags)			\
    291	do {							\
    292		if (!raw_irqs_disabled_flags(flags))		\
    293			trace_hardirqs_on();			\
    294		raw_local_irq_pmu_restore(flags);		\
    295	} while(0)
    296#else
    297#define powerpc_local_irq_pmu_save(flags)			\
    298	do {							\
    299		raw_local_irq_pmu_save(flags);			\
    300	} while(0)
    301#define powerpc_local_irq_pmu_restore(flags)			\
    302	do {							\
    303		raw_local_irq_pmu_restore(flags);		\
    304	} while (0)
    305#endif  /* CONFIG_TRACE_IRQFLAGS */
    306
    307#endif /* CONFIG_PPC_BOOK3S */
    308
    309#define hard_irq_disable()	do {					\
    310	unsigned long flags;						\
    311	__hard_irq_disable();						\
    312	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
    313	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
    314	if (!arch_irqs_disabled_flags(flags)) {				\
    315		asm ("stdx %%r1, 0, %1 ;"				\
    316		     : "=m" (local_paca->saved_r1)			\
    317		     : "b" (&local_paca->saved_r1));			\
    318		trace_hardirqs_off();					\
    319	}								\
    320} while(0)
    321
    322static inline bool __lazy_irq_pending(u8 irq_happened)
    323{
    324	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
    325}
    326
    327/*
    328 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
    329 */
    330static inline bool lazy_irq_pending(void)
    331{
    332	return __lazy_irq_pending(get_paca()->irq_happened);
    333}
    334
    335/*
    336 * Check if a lazy IRQ is pending, with no debugging checks.
    337 * Should be called with IRQs hard disabled.
    338 * For use in RI disabled code or other constrained situations.
    339 */
    340static inline bool lazy_irq_pending_nocheck(void)
    341{
    342	return __lazy_irq_pending(local_paca->irq_happened);
    343}
    344
    345bool power_pmu_wants_prompt_pmi(void);
    346
    347/*
    348 * This is called by asynchronous interrupts to check whether to
    349 * conditionally re-enable hard interrupts after having cleared
    350 * the source of the interrupt. They are kept disabled if there
    351 * is a different soft-masked interrupt pending that requires hard
    352 * masking.
    353 */
    354static inline bool should_hard_irq_enable(void)
    355{
    356#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
    357	WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
    358	WARN_ON(mfmsr() & MSR_EE);
    359#endif
    360#ifdef CONFIG_PERF_EVENTS
    361	/*
    362	 * If the PMU is not running, there is not much reason to enable
    363	 * MSR[EE] in irq handlers because any interrupts would just be
    364	 * soft-masked.
    365	 *
    366	 * TODO: Add test for 64e
    367	 */
    368	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
    369		return false;
    370
    371	if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
    372		return false;
    373
    374	return true;
    375#else
    376	return false;
    377#endif
    378}
    379
    380/*
    381 * Do the hard enabling, only call this if should_hard_irq_enable is true.
    382 */
    383static inline void do_hard_irq_enable(void)
    384{
    385#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
    386	WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
    387	WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
    388	WARN_ON(mfmsr() & MSR_EE);
    389#endif
    390	/*
    391	 * This allows PMI interrupts (and watchdog soft-NMIs) through.
    392	 * There is no other reason to enable this way.
    393	 */
    394	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
    395	__hard_irq_enable();
    396}
    397
    398static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
    399{
    400	return (regs->softe & IRQS_DISABLED);
    401}
    402
    403extern bool prep_irq_for_idle(void);
    404extern bool prep_irq_for_idle_irqsoff(void);
    405extern void irq_set_pending_from_srr1(unsigned long srr1);
    406
    407#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
    408
    409extern void force_external_irq_replay(void);
    410
    411static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
    412{
    413	regs->softe = val;
    414}
    415#else /* CONFIG_PPC64 */
    416
    417static inline notrace unsigned long irq_soft_mask_return(void)
    418{
    419	return 0;
    420}
    421
    422static inline unsigned long arch_local_save_flags(void)
    423{
    424	return mfmsr();
    425}
    426
    427static inline void arch_local_irq_restore(unsigned long flags)
    428{
    429	if (IS_ENABLED(CONFIG_BOOKE))
    430		wrtee(flags);
    431	else
    432		mtmsr(flags);
    433}
    434
    435static inline unsigned long arch_local_irq_save(void)
    436{
    437	unsigned long flags = arch_local_save_flags();
    438
    439	if (IS_ENABLED(CONFIG_BOOKE))
    440		wrtee(0);
    441	else if (IS_ENABLED(CONFIG_PPC_8xx))
    442		wrtspr(SPRN_EID);
    443	else
    444		mtmsr(flags & ~MSR_EE);
    445
    446	return flags;
    447}
    448
    449static inline void arch_local_irq_disable(void)
    450{
    451	__hard_irq_disable();
    452}
    453
    454static inline void arch_local_irq_enable(void)
    455{
    456	__hard_irq_enable();
    457}
    458
    459static inline bool arch_irqs_disabled_flags(unsigned long flags)
    460{
    461	return (flags & MSR_EE) == 0;
    462}
    463
    464static inline bool arch_irqs_disabled(void)
    465{
    466	return arch_irqs_disabled_flags(arch_local_save_flags());
    467}
    468
    469#define hard_irq_disable()		arch_local_irq_disable()
    470
    471static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
    472{
    473	return !(regs->msr & MSR_EE);
    474}
    475
    476static __always_inline bool should_hard_irq_enable(void)
    477{
    478	return false;
    479}
    480
    481static inline void do_hard_irq_enable(void)
    482{
    483	BUILD_BUG();
    484}
    485
    486static inline void clear_pmi_irq_pending(void) { }
    487static inline void set_pmi_irq_pending(void) { }
    488static inline bool pmi_irq_pending(void) { return false; }
    489
    490static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
    491{
    492}
    493#endif /* CONFIG_PPC64 */
    494
    495#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
    496
    497#endif  /* __ASSEMBLY__ */
    498#endif	/* __KERNEL__ */
    499#endif	/* _ASM_POWERPC_HW_IRQ_H */