cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hazards.h (8646B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
      7 * Copyright (C) MIPS Technologies, Inc.
      8 *   written by Ralf Baechle <ralf@linux-mips.org>
      9 */
     10#ifndef _ASM_HAZARDS_H
     11#define _ASM_HAZARDS_H
     12
     13#include <linux/stringify.h>
     14#include <asm/compiler.h>
     15
     16#define ___ssnop							\
     17	sll	$0, $0, 1
     18
     19#define ___ehb								\
     20	sll	$0, $0, 3
     21
     22/*
     23 * TLB hazards
     24 */
     25#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
     26     defined(CONFIG_CPU_MIPSR6)) && \
     27    !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_CPU_LOONGSON64)
     28
     29/*
     30 * MIPSR2 defines ehb for hazard avoidance
     31 */
     32
     33#define __mtc0_tlbw_hazard						\
     34	___ehb
     35
     36#define __mtc0_tlbr_hazard						\
     37	___ehb
     38
     39#define __tlbw_use_hazard						\
     40	___ehb
     41
     42#define __tlb_read_hazard						\
     43	___ehb
     44
     45#define __tlb_probe_hazard						\
     46	___ehb
     47
     48#define __irq_enable_hazard						\
     49	___ehb
     50
     51#define __irq_disable_hazard						\
     52	___ehb
     53
     54#define __back_to_back_c0_hazard					\
     55	___ehb
     56
     57/*
     58 * gcc has a tradition of misscompiling the previous construct using the
     59 * address of a label as argument to inline assembler.	Gas otoh has the
     60 * annoying difference between la and dla which are only usable for 32-bit
     61 * rsp. 64-bit code, so can't be used without conditional compilation.
     62 * The alternative is switching the assembler to 64-bit code which happens
     63 * to work right even for 32-bit code...
     64 */
     65#define instruction_hazard()						\
     66do {									\
     67	unsigned long tmp;						\
     68									\
     69	__asm__ __volatile__(						\
     70	"	.set	push					\n"	\
     71	"	.set "MIPS_ISA_LEVEL"				\n"	\
     72	"	dla	%0, 1f					\n"	\
     73	"	jr.hb	%0					\n"	\
     74	"	.set	pop					\n"	\
     75	"1:							\n"	\
     76	: "=r" (tmp));							\
     77} while (0)
     78
     79#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
     80	defined(CONFIG_CPU_BMIPS)
     81
     82/*
     83 * These are slightly complicated by the fact that we guarantee R1 kernels to
     84 * run fine on R2 processors.
     85 */
     86
     87#define __mtc0_tlbw_hazard						\
     88	___ssnop;							\
     89	___ssnop;							\
     90	___ehb
     91
     92#define __mtc0_tlbr_hazard						\
     93	___ssnop;							\
     94	___ssnop;							\
     95	___ehb
     96
     97#define __tlbw_use_hazard						\
     98	___ssnop;							\
     99	___ssnop;							\
    100	___ssnop;							\
    101	___ehb
    102
    103#define __tlb_read_hazard						\
    104	___ssnop;							\
    105	___ssnop;							\
    106	___ssnop;							\
    107	___ehb
    108
    109#define __tlb_probe_hazard						\
    110	___ssnop;							\
    111	___ssnop;							\
    112	___ssnop;							\
    113	___ehb
    114
    115#define __irq_enable_hazard						\
    116	___ssnop;							\
    117	___ssnop;							\
    118	___ssnop;							\
    119	___ehb
    120
    121#define __irq_disable_hazard						\
    122	___ssnop;							\
    123	___ssnop;							\
    124	___ssnop;							\
    125	___ehb
    126
    127#define __back_to_back_c0_hazard					\
    128	___ssnop;							\
    129	___ssnop;							\
    130	___ssnop;							\
    131	___ehb
    132
    133/*
    134 * gcc has a tradition of misscompiling the previous construct using the
    135 * address of a label as argument to inline assembler.	Gas otoh has the
    136 * annoying difference between la and dla which are only usable for 32-bit
    137 * rsp. 64-bit code, so can't be used without conditional compilation.
    138 * The alternative is switching the assembler to 64-bit code which happens
    139 * to work right even for 32-bit code...
    140 */
    141#define __instruction_hazard()						\
    142do {									\
    143	unsigned long tmp;						\
    144									\
    145	__asm__ __volatile__(						\
    146	"	.set	push					\n"	\
    147	"	.set	mips64r2				\n"	\
    148	"	dla	%0, 1f					\n"	\
    149	"	jr.hb	%0					\n"	\
    150	"	.set	pop					\n"	\
    151	"1:							\n"	\
    152	: "=r" (tmp));							\
    153} while (0)
    154
    155#define instruction_hazard()						\
    156do {									\
    157	if (cpu_has_mips_r2_r6)						\
    158		__instruction_hazard();					\
    159} while (0)
    160
    161#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
    162	defined(CONFIG_CPU_LOONGSON2EF) || defined(CONFIG_CPU_LOONGSON64) || \
    163	defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500)
    164
    165/*
    166 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
    167 */
    168
    169#define __mtc0_tlbw_hazard
    170
    171#define __mtc0_tlbr_hazard
    172
    173#define __tlbw_use_hazard
    174
    175#define __tlb_read_hazard
    176
    177#define __tlb_probe_hazard
    178
    179#define __irq_enable_hazard
    180
    181#define __irq_disable_hazard
    182
    183#define __back_to_back_c0_hazard
    184
    185#define instruction_hazard() do { } while (0)
    186
    187#elif defined(CONFIG_CPU_SB1)
    188
    189/*
    190 * Mostly like R4000 for historic reasons
    191 */
    192#define __mtc0_tlbw_hazard
    193
    194#define __mtc0_tlbr_hazard
    195
    196#define __tlbw_use_hazard
    197
    198#define __tlb_read_hazard
    199
    200#define __tlb_probe_hazard
    201
    202#define __irq_enable_hazard
    203
    204#define __irq_disable_hazard						\
    205	___ssnop;							\
    206	___ssnop;							\
    207	___ssnop
    208
    209#define __back_to_back_c0_hazard
    210
    211#define instruction_hazard() do { } while (0)
    212
    213#else
    214
    215/*
    216 * Finally the catchall case for all other processors including R4000, R4400,
    217 * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
    218 *
    219 * The taken branch will result in a two cycle penalty for the two killed
    220 * instructions on R4000 / R4400.  Other processors only have a single cycle
    221 * hazard so this is nice trick to have an optimal code for a range of
    222 * processors.
    223 */
    224#define __mtc0_tlbw_hazard						\
    225	nop;								\
    226	nop
    227
    228#define __mtc0_tlbr_hazard						\
    229	nop;								\
    230	nop
    231
    232#define __tlbw_use_hazard						\
    233	nop;								\
    234	nop;								\
    235	nop
    236
    237#define __tlb_read_hazard						\
    238	nop;								\
    239	nop;								\
    240	nop
    241
    242#define __tlb_probe_hazard						\
    243	nop;								\
    244	nop;								\
    245	nop
    246
    247#define __irq_enable_hazard						\
    248	___ssnop;							\
    249	___ssnop;							\
    250	___ssnop
    251
    252#define __irq_disable_hazard						\
    253	nop;								\
    254	nop;								\
    255	nop
    256
    257#define __back_to_back_c0_hazard					\
    258	___ssnop;							\
    259	___ssnop;							\
    260	___ssnop
    261
    262#define instruction_hazard() do { } while (0)
    263
    264#endif
    265
    266
    267/* FPU hazards */
    268
    269#if defined(CONFIG_CPU_SB1)
    270
    271#define __enable_fpu_hazard						\
    272	.set	push;							\
    273	.set	mips64;							\
    274	.set	noreorder;						\
    275	___ssnop;							\
    276	bnezl	$0, .+4;						\
    277	___ssnop;							\
    278	.set	pop
    279
    280#define __disable_fpu_hazard
    281
    282#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
    283      defined(CONFIG_CPU_MIPSR6)
    284
    285#define __enable_fpu_hazard						\
    286	___ehb
    287
    288#define __disable_fpu_hazard						\
    289	___ehb
    290
    291#else
    292
    293#define __enable_fpu_hazard						\
    294	nop;								\
    295	nop;								\
    296	nop;								\
    297	nop
    298
    299#define __disable_fpu_hazard						\
    300	___ehb
    301
    302#endif
    303
    304#ifdef __ASSEMBLY__
    305
    306#define _ssnop ___ssnop
    307#define	_ehb ___ehb
    308#define mtc0_tlbw_hazard __mtc0_tlbw_hazard
    309#define mtc0_tlbr_hazard __mtc0_tlbr_hazard
    310#define tlbw_use_hazard __tlbw_use_hazard
    311#define tlb_read_hazard __tlb_read_hazard
    312#define tlb_probe_hazard __tlb_probe_hazard
    313#define irq_enable_hazard __irq_enable_hazard
    314#define irq_disable_hazard __irq_disable_hazard
    315#define back_to_back_c0_hazard __back_to_back_c0_hazard
    316#define enable_fpu_hazard __enable_fpu_hazard
    317#define disable_fpu_hazard __disable_fpu_hazard
    318
    319#else
    320
    321#define _ssnop()							\
    322do {									\
    323	__asm__ __volatile__(						\
    324	__stringify(___ssnop)						\
    325	);								\
    326} while (0)
    327
    328#define	_ehb()								\
    329do {									\
    330	__asm__ __volatile__(						\
    331	__stringify(___ehb)						\
    332	);								\
    333} while (0)
    334
    335
    336#define mtc0_tlbw_hazard()						\
    337do {									\
    338	__asm__ __volatile__(						\
    339	__stringify(__mtc0_tlbw_hazard)					\
    340	);								\
    341} while (0)
    342
    343
    344#define mtc0_tlbr_hazard()						\
    345do {									\
    346	__asm__ __volatile__(						\
    347	__stringify(__mtc0_tlbr_hazard)					\
    348	);								\
    349} while (0)
    350
    351
    352#define tlbw_use_hazard()						\
    353do {									\
    354	__asm__ __volatile__(						\
    355	__stringify(__tlbw_use_hazard)					\
    356	);								\
    357} while (0)
    358
    359
    360#define tlb_read_hazard()						\
    361do {									\
    362	__asm__ __volatile__(						\
    363	__stringify(__tlb_read_hazard)					\
    364	);								\
    365} while (0)
    366
    367
    368#define tlb_probe_hazard()						\
    369do {									\
    370	__asm__ __volatile__(						\
    371	__stringify(__tlb_probe_hazard)					\
    372	);								\
    373} while (0)
    374
    375
    376#define irq_enable_hazard()						\
    377do {									\
    378	__asm__ __volatile__(						\
    379	__stringify(__irq_enable_hazard)				\
    380	);								\
    381} while (0)
    382
    383
    384#define irq_disable_hazard()						\
    385do {									\
    386	__asm__ __volatile__(						\
    387	__stringify(__irq_disable_hazard)				\
    388	);								\
    389} while (0)
    390
    391
    392#define back_to_back_c0_hazard() 					\
    393do {									\
    394	__asm__ __volatile__(						\
    395	__stringify(__back_to_back_c0_hazard)				\
    396	);								\
    397} while (0)
    398
    399
    400#define enable_fpu_hazard()						\
    401do {									\
    402	__asm__ __volatile__(						\
    403	__stringify(__enable_fpu_hazard)				\
    404	);								\
    405} while (0)
    406
    407
    408#define disable_fpu_hazard()						\
    409do {									\
    410	__asm__ __volatile__(						\
    411	__stringify(__disable_fpu_hazard)				\
    412	);								\
    413} while (0)
    414
    415/*
    416 * MIPS R2 instruction hazard barrier.   Needs to be called as a subroutine.
    417 */
    418extern void mips_ihb(void);
    419
    420#endif /* __ASSEMBLY__  */
    421
    422#endif /* _ASM_HAZARDS_H */