cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

relocate_kernel_64.S (6317B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * relocate_kernel.S - put the kernel image in place to boot
      4 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
      5 */
      6
      7#include <linux/linkage.h>
      8#include <asm/page_types.h>
      9#include <asm/kexec.h>
     10#include <asm/processor-flags.h>
     11#include <asm/pgtable_types.h>
     12#include <asm/nospec-branch.h>
     13#include <asm/unwind_hints.h>
     14
     15/*
     16 * Must be relocatable PIC code callable as a C function
     17 */
     18
     19#define PTR(x) (x << 3)
     20#define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
     21
     22/*
     23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
     24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
     25 * jumping back
     26 */
     27#define DATA(offset)		(KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
     28
     29/* Minimal CPU state */
     30#define RSP			DATA(0x0)
     31#define CR0			DATA(0x8)
     32#define CR3			DATA(0x10)
     33#define CR4			DATA(0x18)
     34
     35/* other data */
     36#define CP_PA_TABLE_PAGE	DATA(0x20)
     37#define CP_PA_SWAP_PAGE		DATA(0x28)
     38#define CP_PA_BACKUP_PAGES_MAP	DATA(0x30)
     39
     40	.text
     41	.align PAGE_SIZE
     42	.code64
     43SYM_CODE_START_NOALIGN(relocate_kernel)
     44	UNWIND_HINT_EMPTY
     45	ANNOTATE_NOENDBR
     46	/*
     47	 * %rdi indirection_page
     48	 * %rsi page_list
     49	 * %rdx start address
     50	 * %rcx preserve_context
     51	 * %r8  host_mem_enc_active
     52	 */
     53
     54	/* Save the CPU context, used for jumping back */
     55	pushq %rbx
     56	pushq %rbp
     57	pushq %r12
     58	pushq %r13
     59	pushq %r14
     60	pushq %r15
     61	pushf
     62
     63	movq	PTR(VA_CONTROL_PAGE)(%rsi), %r11
     64	movq	%rsp, RSP(%r11)
     65	movq	%cr0, %rax
     66	movq	%rax, CR0(%r11)
     67	movq	%cr3, %rax
     68	movq	%rax, CR3(%r11)
     69	movq	%cr4, %rax
     70	movq	%rax, CR4(%r11)
     71
     72	/* Save CR4. Required to enable the right paging mode later. */
     73	movq	%rax, %r13
     74
     75	/* zero out flags, and disable interrupts */
     76	pushq $0
     77	popfq
     78
     79	/* Save SME active flag */
     80	movq	%r8, %r12
     81
     82	/*
     83	 * get physical address of control page now
     84	 * this is impossible after page table switch
     85	 */
     86	movq	PTR(PA_CONTROL_PAGE)(%rsi), %r8
     87
     88	/* get physical address of page table now too */
     89	movq	PTR(PA_TABLE_PAGE)(%rsi), %r9
     90
     91	/* get physical address of swap page now */
     92	movq	PTR(PA_SWAP_PAGE)(%rsi), %r10
     93
     94	/* save some information for jumping back */
     95	movq	%r9, CP_PA_TABLE_PAGE(%r11)
     96	movq	%r10, CP_PA_SWAP_PAGE(%r11)
     97	movq	%rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
     98
     99	/* Switch to the identity mapped page tables */
    100	movq	%r9, %cr3
    101
    102	/* setup a new stack at the end of the physical control page */
    103	lea	PAGE_SIZE(%r8), %rsp
    104
    105	/* jump to identity mapped page */
    106	addq	$(identity_mapped - relocate_kernel), %r8
    107	pushq	%r8
    108	RET
    109SYM_CODE_END(relocate_kernel)
    110
    111SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
    112	UNWIND_HINT_EMPTY
    113	/* set return address to 0 if not preserving context */
    114	pushq	$0
    115	/* store the start address on the stack */
    116	pushq   %rdx
    117
    118	/*
    119	 * Clear X86_CR4_CET (if it was set) such that we can clear CR0_WP
    120	 * below.
    121	 */
    122	movq	%cr4, %rax
    123	andq	$~(X86_CR4_CET), %rax
    124	movq	%rax, %cr4
    125
    126	/*
    127	 * Set cr0 to a known state:
    128	 *  - Paging enabled
    129	 *  - Alignment check disabled
    130	 *  - Write protect disabled
    131	 *  - No task switch
    132	 *  - Don't do FP software emulation.
    133	 *  - Protected mode enabled
    134	 */
    135	movq	%cr0, %rax
    136	andq	$~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
    137	orl	$(X86_CR0_PG | X86_CR0_PE), %eax
    138	movq	%rax, %cr0
    139
    140	/*
    141	 * Set cr4 to a known state:
    142	 *  - physical address extension enabled
    143	 *  - 5-level paging, if it was enabled before
    144	 */
    145	movl	$X86_CR4_PAE, %eax
    146	testq	$X86_CR4_LA57, %r13
    147	jz	1f
    148	orl	$X86_CR4_LA57, %eax
    1491:
    150	movq	%rax, %cr4
    151
    152	jmp 1f
    1531:
    154
    155	/* Flush the TLB (needed?) */
    156	movq	%r9, %cr3
    157
    158	/*
    159	 * If SME is active, there could be old encrypted cache line
    160	 * entries that will conflict with the now unencrypted memory
    161	 * used by kexec. Flush the caches before copying the kernel.
    162	 */
    163	testq	%r12, %r12
    164	jz 1f
    165	wbinvd
    1661:
    167
    168	movq	%rcx, %r11
    169	call	swap_pages
    170
    171	/*
    172	 * To be certain of avoiding problems with self-modifying code
    173	 * I need to execute a serializing instruction here.
    174	 * So I flush the TLB by reloading %cr3 here, it's handy,
    175	 * and not processor dependent.
    176	 */
    177	movq	%cr3, %rax
    178	movq	%rax, %cr3
    179
    180	/*
    181	 * set all of the registers to known values
    182	 * leave %rsp alone
    183	 */
    184
    185	testq	%r11, %r11
    186	jnz 1f
    187	xorl	%eax, %eax
    188	xorl	%ebx, %ebx
    189	xorl    %ecx, %ecx
    190	xorl    %edx, %edx
    191	xorl    %esi, %esi
    192	xorl    %edi, %edi
    193	xorl    %ebp, %ebp
    194	xorl	%r8d, %r8d
    195	xorl	%r9d, %r9d
    196	xorl	%r10d, %r10d
    197	xorl	%r11d, %r11d
    198	xorl	%r12d, %r12d
    199	xorl	%r13d, %r13d
    200	xorl	%r14d, %r14d
    201	xorl	%r15d, %r15d
    202
    203	RET
    204
    2051:
    206	popq	%rdx
    207	leaq	PAGE_SIZE(%r10), %rsp
    208	ANNOTATE_RETPOLINE_SAFE
    209	call	*%rdx
    210
    211	/* get the re-entry point of the peer system */
    212	movq	0(%rsp), %rbp
    213	leaq	relocate_kernel(%rip), %r8
    214	movq	CP_PA_SWAP_PAGE(%r8), %r10
    215	movq	CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
    216	movq	CP_PA_TABLE_PAGE(%r8), %rax
    217	movq	%rax, %cr3
    218	lea	PAGE_SIZE(%r8), %rsp
    219	call	swap_pages
    220	movq	$virtual_mapped, %rax
    221	pushq	%rax
    222	RET
    223SYM_CODE_END(identity_mapped)
    224
    225SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
    226	UNWIND_HINT_EMPTY
    227	ANNOTATE_NOENDBR // RET target, above
    228	movq	RSP(%r8), %rsp
    229	movq	CR4(%r8), %rax
    230	movq	%rax, %cr4
    231	movq	CR3(%r8), %rax
    232	movq	CR0(%r8), %r8
    233	movq	%rax, %cr3
    234	movq	%r8, %cr0
    235	movq	%rbp, %rax
    236
    237	popf
    238	popq	%r15
    239	popq	%r14
    240	popq	%r13
    241	popq	%r12
    242	popq	%rbp
    243	popq	%rbx
    244	RET
    245SYM_CODE_END(virtual_mapped)
    246
    247	/* Do the copies */
    248SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
    249	UNWIND_HINT_EMPTY
    250	movq	%rdi, %rcx 	/* Put the page_list in %rcx */
    251	xorl	%edi, %edi
    252	xorl	%esi, %esi
    253	jmp	1f
    254
    2550:	/* top, read another word for the indirection page */
    256
    257	movq	(%rbx), %rcx
    258	addq	$8,	%rbx
    2591:
    260	testb	$0x1,	%cl   /* is it a destination page? */
    261	jz	2f
    262	movq	%rcx,	%rdi
    263	andq	$0xfffffffffffff000, %rdi
    264	jmp	0b
    2652:
    266	testb	$0x2,	%cl   /* is it an indirection page? */
    267	jz	2f
    268	movq	%rcx,   %rbx
    269	andq	$0xfffffffffffff000, %rbx
    270	jmp	0b
    2712:
    272	testb	$0x4,	%cl   /* is it the done indicator? */
    273	jz	2f
    274	jmp	3f
    2752:
    276	testb	$0x8,	%cl   /* is it the source indicator? */
    277	jz	0b	      /* Ignore it otherwise */
    278	movq	%rcx,   %rsi  /* For ever source page do a copy */
    279	andq	$0xfffffffffffff000, %rsi
    280
    281	movq	%rdi, %rdx
    282	movq	%rsi, %rax
    283
    284	movq	%r10, %rdi
    285	movl	$512, %ecx
    286	rep ; movsq
    287
    288	movq	%rax, %rdi
    289	movq	%rdx, %rsi
    290	movl	$512, %ecx
    291	rep ; movsq
    292
    293	movq	%rdx, %rdi
    294	movq	%r10, %rsi
    295	movl	$512, %ecx
    296	rep ; movsq
    297
    298	lea	PAGE_SIZE(%rax), %rsi
    299	jmp	0b
    3003:
    301	RET
    302SYM_CODE_END(swap_pages)
    303
    304	.globl kexec_control_code_size
    305.set kexec_control_code_size, . - relocate_kernel