cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

patch.c (6539B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Instruction-patching support.
      4 *
      5 * Copyright (C) 2003 Hewlett-Packard Co
      6 *	David Mosberger-Tang <davidm@hpl.hp.com>
      7 */
      8#include <linux/init.h>
      9#include <linux/string.h>
     10
     11#include <asm/patch.h>
     12#include <asm/processor.h>
     13#include <asm/sections.h>
     14#include <asm/unistd.h>
     15
     16/*
     17 * This was adapted from code written by Tony Luck:
     18 *
     19 * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
     20 * like this:
     21 *
     22 * 6  6         5         4         3         2         1
     23 * 3210987654321098765432109876543210987654321098765432109876543210
     24 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
     25 *
     26 * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
     27 * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
     28 */
     29static u64
     30get_imm64 (u64 insn_addr)
     31{
     32	u64 *p = (u64 *) (insn_addr & -16);	/* mask out slot number */
     33
     34	return ( (p[1] & 0x0800000000000000UL) << 4)  | /*A*/
     35		((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
     36		((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
     37		((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
     38		((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
     39		((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
     40		((p[1] & 0x000007f000000000UL) >> 36);  /*G*/
     41}
     42
     43/* Patch instruction with "val" where "mask" has 1 bits. */
     44void
     45ia64_patch (u64 insn_addr, u64 mask, u64 val)
     46{
     47	u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
     48#	define insn_mask ((1UL << 41) - 1)
     49	unsigned long shift;
     50
     51	b0 = b[0]; b1 = b[1];
     52	shift = 5 + 41 * (insn_addr % 16); /* 5 bits of template, then 3 x 41-bit instructions */
     53	if (shift >= 64) {
     54		m1 = mask << (shift - 64);
     55		v1 = val << (shift - 64);
     56	} else {
     57		m0 = mask << shift; m1 = mask >> (64 - shift);
     58		v0 = val  << shift; v1 = val >> (64 - shift);
     59		b[0] = (b0 & ~m0) | (v0 & m0);
     60	}
     61	b[1] = (b1 & ~m1) | (v1 & m1);
     62}
     63
     64void
     65ia64_patch_imm64 (u64 insn_addr, u64 val)
     66{
     67	/* The assembler may generate offset pointing to either slot 1
     68	   or slot 2 for a long (2-slot) instruction, occupying slots 1
     69	   and 2.  */
     70  	insn_addr &= -16UL;
     71	ia64_patch(insn_addr + 2,
     72		   0x01fffefe000UL, (  ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
     73				     | ((val & 0x0000000000200000UL) <<  0) /* bit 21 -> 21 */
     74				     | ((val & 0x00000000001f0000UL) <<  6) /* bit 16 -> 22 */
     75				     | ((val & 0x000000000000ff80UL) << 20) /* bit  7 -> 27 */
     76				     | ((val & 0x000000000000007fUL) << 13) /* bit  0 -> 13 */));
     77	ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
     78}
     79
     80void
     81ia64_patch_imm60 (u64 insn_addr, u64 val)
     82{
     83	/* The assembler may generate offset pointing to either slot 1
     84	   or slot 2 for a long (2-slot) instruction, occupying slots 1
     85	   and 2.  */
     86  	insn_addr &= -16UL;
     87	ia64_patch(insn_addr + 2,
     88		   0x011ffffe000UL, (  ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
     89				     | ((val & 0x00000000000fffffUL) << 13) /* bit  0 -> 13 */));
     90	ia64_patch(insn_addr + 1, 0x1fffffffffcUL, val >> 18);
     91}
     92
     93/*
     94 * We need sometimes to load the physical address of a kernel
     95 * object.  Often we can convert the virtual address to physical
     96 * at execution time, but sometimes (either for performance reasons
     97 * or during error recovery) we cannot to this.  Patch the marked
     98 * bundles to load the physical address.
     99 */
    100void __init
    101ia64_patch_vtop (unsigned long start, unsigned long end)
    102{
    103	s32 *offp = (s32 *) start;
    104	u64 ip;
    105
    106	while (offp < (s32 *) end) {
    107		ip = (u64) offp + *offp;
    108
    109		/* replace virtual address with corresponding physical address: */
    110		ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
    111		ia64_fc((void *) ip);
    112		++offp;
    113	}
    114	ia64_sync_i();
    115	ia64_srlz_i();
    116}
    117
    118/*
    119 * Disable the RSE workaround by turning the conditional branch
    120 * that we tagged in each place the workaround was used into an
    121 * unconditional branch.
    122 */
    123void __init
    124ia64_patch_rse (unsigned long start, unsigned long end)
    125{
    126	s32 *offp = (s32 *) start;
    127	u64 ip, *b;
    128
    129	while (offp < (s32 *) end) {
    130		ip = (u64) offp + *offp;
    131
    132		b = (u64 *)(ip & -16);
    133		b[1] &= ~0xf800000L;
    134		ia64_fc((void *) ip);
    135		++offp;
    136	}
    137	ia64_sync_i();
    138	ia64_srlz_i();
    139}
    140
    141void __init
    142ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
    143{
    144	static int first_time = 1;
    145	int need_workaround;
    146	s32 *offp = (s32 *) start;
    147	u64 *wp;
    148
    149	need_workaround = (local_cpu_data->family == 0x1f && local_cpu_data->model == 0);
    150
    151	if (first_time) {
    152		first_time = 0;
    153		if (need_workaround)
    154			printk(KERN_INFO "Leaving McKinley Errata 9 workaround enabled\n");
    155	}
    156	if (need_workaround)
    157		return;
    158
    159	while (offp < (s32 *) end) {
    160		wp = (u64 *) ia64_imva((char *) offp + *offp);
    161		wp[0] = 0x0000000100000011UL; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
    162		wp[1] = 0x0084006880000200UL;
    163		wp[2] = 0x0000000100000000UL; /* nop.m 0; nop.i 0; nop.i 0 */
    164		wp[3] = 0x0004000000000200UL;
    165		ia64_fc(wp); ia64_fc(wp + 2);
    166		++offp;
    167	}
    168	ia64_sync_i();
    169	ia64_srlz_i();
    170}
    171
    172static void __init
    173patch_fsyscall_table (unsigned long start, unsigned long end)
    174{
    175	extern unsigned long fsyscall_table[NR_syscalls];
    176	s32 *offp = (s32 *) start;
    177	u64 ip;
    178
    179	while (offp < (s32 *) end) {
    180		ip = (u64) ia64_imva((char *) offp + *offp);
    181		ia64_patch_imm64(ip, (u64) fsyscall_table);
    182		ia64_fc((void *) ip);
    183		++offp;
    184	}
    185	ia64_sync_i();
    186	ia64_srlz_i();
    187}
    188
    189static void __init
    190patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
    191{
    192	extern char fsys_bubble_down[];
    193	s32 *offp = (s32 *) start;
    194	u64 ip;
    195
    196	while (offp < (s32 *) end) {
    197		ip = (u64) offp + *offp;
    198		ia64_patch_imm60((u64) ia64_imva((void *) ip),
    199				 (u64) (fsys_bubble_down - (ip & -16)) / 16);
    200		ia64_fc((void *) ip);
    201		++offp;
    202	}
    203	ia64_sync_i();
    204	ia64_srlz_i();
    205}
    206
    207void __init
    208ia64_patch_gate (void)
    209{
    210#	define START(name)	((unsigned long) __start_gate_##name##_patchlist)
    211#	define END(name)	((unsigned long)__end_gate_##name##_patchlist)
    212
    213	patch_fsyscall_table(START(fsyscall), END(fsyscall));
    214	patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
    215	ia64_patch_vtop(START(vtop), END(vtop));
    216	ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
    217}
    218
    219void ia64_patch_phys_stack_reg(unsigned long val)
    220{
    221	s32 * offp = (s32 *) __start___phys_stack_reg_patchlist;
    222	s32 * end = (s32 *) __end___phys_stack_reg_patchlist;
    223	u64 ip, mask, imm;
    224
    225	/* see instruction format A4: adds r1 = imm13, r3 */
    226	mask = (0x3fUL << 27) | (0x7f << 13);
    227	imm = (((val >> 7) & 0x3f) << 27) | (val & 0x7f) << 13;
    228
    229	while (offp < end) {
    230		ip = (u64) offp + *offp;
    231		ia64_patch(ip, mask, imm);
    232		ia64_fc((void *)ip);
    233		++offp;
    234	}
    235	ia64_sync_i();
    236	ia64_srlz_i();
    237}