cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq_stack.h (7597B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_IRQ_STACK_H
      3#define _ASM_X86_IRQ_STACK_H
      4
      5#include <linux/ptrace.h>
      6#include <linux/objtool.h>
      7
      8#include <asm/processor.h>
      9
     10#ifdef CONFIG_X86_64
     11
     12/*
     13 * Macro to inline switching to an interrupt stack and invoking function
     14 * calls from there. The following rules apply:
     15 *
     16 * - Ordering:
     17 *
     18 *   1. Write the stack pointer into the top most place of the irq
     19 *	stack. This ensures that the various unwinders can link back to the
     20 *	original stack.
     21 *
     22 *   2. Switch the stack pointer to the top of the irq stack.
     23 *
     24 *   3. Invoke whatever needs to be done (@asm_call argument)
     25 *
     26 *   4. Pop the original stack pointer from the top of the irq stack
     27 *	which brings it back to the original stack where it left off.
     28 *
     29 * - Function invocation:
     30 *
     31 *   To allow flexible usage of the macro, the actual function code including
     32 *   the store of the arguments in the call ABI registers is handed in via
     33 *   the @asm_call argument.
     34 *
     35 * - Local variables:
     36 *
     37 *   @tos:
     38 *	The @tos variable holds a pointer to the top of the irq stack and
     39 *	_must_ be allocated in a non-callee saved register as this is a
     40 *	restriction coming from objtool.
     41 *
     42 *	Note, that (tos) is both in input and output constraints to ensure
     43 *	that the compiler does not assume that R11 is left untouched in
     44 *	case this macro is used in some place where the per cpu interrupt
     45 *	stack pointer is used again afterwards
     46 *
     47 * - Function arguments:
     48 *	The function argument(s), if any, have to be defined in register
     49 *	variables at the place where this is invoked. Storing the
     50 *	argument(s) in the proper register(s) is part of the @asm_call
     51 *
     52 * - Constraints:
     53 *
     54 *   The constraints have to be done very carefully because the compiler
     55 *   does not know about the assembly call.
     56 *
     57 *   output:
     58 *     As documented already above the @tos variable is required to be in
     59 *     the output constraints to make the compiler aware that R11 cannot be
     60 *     reused after the asm() statement.
     61 *
     62 *     For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
     63 *     required as well as this prevents certain creative GCC variants from
     64 *     misplacing the ASM code.
     65 *
     66 *  input:
     67 *    - func:
     68 *	  Immediate, which tells the compiler that the function is referenced.
     69 *
     70 *    - tos:
     71 *	  Register. The actual register is defined by the variable declaration.
     72 *
     73 *    - function arguments:
     74 *	  The constraints are handed in via the 'argconstr' argument list. They
     75 *	  describe the register arguments which are used in @asm_call.
     76 *
     77 *  clobbers:
     78 *     Function calls can clobber anything except the callee-saved
     79 *     registers. Tell the compiler.
     80 */
     81#define call_on_stack(stack, func, asm_call, argconstr...)		\
     82{									\
     83	register void *tos asm("r11");					\
     84									\
     85	tos = ((void *)(stack));					\
     86									\
     87	asm_inline volatile(						\
     88	"movq	%%rsp, (%[tos])				\n"		\
     89	"movq	%[tos], %%rsp				\n"		\
     90									\
     91	asm_call							\
     92									\
     93	"popq	%%rsp					\n"		\
     94									\
     95	: "+r" (tos), ASM_CALL_CONSTRAINT				\
     96	: [__func] "i" (func), [tos] "r" (tos) argconstr		\
     97	: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10",	\
     98	  "memory"							\
     99	);								\
    100}
    101
    102#define ASM_CALL_ARG0							\
    103	"call %P[__func]				\n"		\
    104	ASM_REACHABLE
    105
    106#define ASM_CALL_ARG1							\
    107	"movq	%[arg1], %%rdi				\n"		\
    108	ASM_CALL_ARG0
    109
    110#define ASM_CALL_ARG2							\
    111	"movq	%[arg2], %%rsi				\n"		\
    112	ASM_CALL_ARG1
    113
    114#define ASM_CALL_ARG3							\
    115	"movq	%[arg3], %%rdx				\n"		\
    116	ASM_CALL_ARG2
    117
    118#define call_on_irqstack(func, asm_call, argconstr...)			\
    119	call_on_stack(__this_cpu_read(hardirq_stack_ptr),		\
    120		      func, asm_call, argconstr)
    121
    122/* Macros to assert type correctness for run_*_on_irqstack macros */
    123#define assert_function_type(func, proto)				\
    124	static_assert(__builtin_types_compatible_p(typeof(&func), proto))
    125
    126#define assert_arg_type(arg, proto)					\
    127	static_assert(__builtin_types_compatible_p(typeof(arg), proto))
    128
    129/*
    130 * Macro to invoke system vector and device interrupt C handlers.
    131 */
    132#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...)	\
    133{									\
    134	/*								\
    135	 * User mode entry and interrupt on the irq stack do not	\
    136	 * switch stacks. If from user mode the task stack is empty.	\
    137	 */								\
    138	if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {	\
    139		irq_enter_rcu();					\
    140		func(c_args);						\
    141		irq_exit_rcu();						\
    142	} else {							\
    143		/*							\
    144		 * Mark the irq stack inuse _before_ and unmark _after_	\
    145		 * switching stacks. Interrupts are disabled in both	\
    146		 * places. Invoke the stack switch macro with the call	\
    147		 * sequence which matches the above direct invocation.	\
    148		 */							\
    149		__this_cpu_write(hardirq_stack_inuse, true);		\
    150		call_on_irqstack(func, asm_call, constr);		\
    151		__this_cpu_write(hardirq_stack_inuse, false);		\
    152	}								\
    153}
    154
    155/*
    156 * Function call sequence for __call_on_irqstack() for system vectors.
    157 *
    158 * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
    159 * mechanism because these functions are global and cannot be optimized out
    160 * when compiling a particular source file which uses one of these macros.
    161 *
    162 * The argument (regs) does not need to be pushed or stashed in a callee
    163 * saved register to be safe vs. the irq_enter_rcu() call because the
    164 * clobbers already prevent the compiler from storing it in a callee
    165 * clobbered register. As the compiler has to preserve @regs for the final
    166 * call to idtentry_exit() anyway, it's likely that it does not cause extra
    167 * effort for this asm magic.
    168 */
    169#define ASM_CALL_SYSVEC							\
    170	"call irq_enter_rcu				\n"		\
    171	ASM_CALL_ARG1							\
    172	"call irq_exit_rcu				\n"
    173
    174#define SYSVEC_CONSTRAINTS	, [arg1] "r" (regs)
    175
    176#define run_sysvec_on_irqstack_cond(func, regs)				\
    177{									\
    178	assert_function_type(func, void (*)(struct pt_regs *));		\
    179	assert_arg_type(regs, struct pt_regs *);			\
    180									\
    181	call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC,		\
    182			      SYSVEC_CONSTRAINTS, regs);		\
    183}
    184
    185/*
    186 * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
    187 * @regs and @vector in callee saved registers.
    188 */
    189#define ASM_CALL_IRQ							\
    190	"call irq_enter_rcu				\n"		\
    191	ASM_CALL_ARG2							\
    192	"call irq_exit_rcu				\n"
    193
    194#define IRQ_CONSTRAINTS	, [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
    195
    196#define run_irq_on_irqstack_cond(func, regs, vector)			\
    197{									\
    198	assert_function_type(func, void (*)(struct pt_regs *, u32));	\
    199	assert_arg_type(regs, struct pt_regs *);			\
    200	assert_arg_type(vector, u32);					\
    201									\
    202	call_on_irqstack_cond(func, regs, ASM_CALL_IRQ,			\
    203			      IRQ_CONSTRAINTS, regs, vector);		\
    204}
    205
    206#ifndef CONFIG_PREEMPT_RT
    207/*
    208 * Macro to invoke __do_softirq on the irq stack. This is only called from
    209 * task context when bottom halves are about to be reenabled and soft
    210 * interrupts are pending to be processed. The interrupt stack cannot be in
    211 * use here.
    212 */
    213#define do_softirq_own_stack()						\
    214{									\
    215	__this_cpu_write(hardirq_stack_inuse, true);			\
    216	call_on_irqstack(__do_softirq, ASM_CALL_ARG0);			\
    217	__this_cpu_write(hardirq_stack_inuse, false);			\
    218}
    219
    220#endif
    221
    222#else /* CONFIG_X86_64 */
    223/* System vector handlers always run on the stack they interrupted. */
    224#define run_sysvec_on_irqstack_cond(func, regs)				\
    225{									\
    226	irq_enter_rcu();						\
    227	func(regs);							\
    228	irq_exit_rcu();							\
    229}
    230
    231/* Switches to the irq stack within func() */
    232#define run_irq_on_irqstack_cond(func, regs, vector)			\
    233{									\
    234	irq_enter_rcu();						\
    235	func(regs, vector);						\
    236	irq_exit_rcu();							\
    237}
    238
    239#endif /* !CONFIG_X86_64 */
    240
    241#endif