cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

head.S (3978B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2
      3/*
      4 * Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
      5 */
      6
      7	.code32
      8	.text
      9#define _pa(x)          ((x) - __START_KERNEL_map)
     10
     11#include <linux/elfnote.h>
     12#include <linux/init.h>
     13#include <linux/linkage.h>
     14#include <asm/segment.h>
     15#include <asm/asm.h>
     16#include <asm/boot.h>
     17#include <asm/processor-flags.h>
     18#include <asm/msr.h>
     19#include <asm/nospec-branch.h>
     20#include <xen/interface/elfnote.h>
     21
     22	__HEAD
     23
     24/*
     25 * Entry point for PVH guests.
     26 *
     27 * Xen ABI specifies the following register state when we come here:
     28 *
     29 * - `ebx`: contains the physical memory address where the loader has placed
     30 *          the boot start info structure.
     31 * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
     32 * - `cr4`: all bits are cleared.
     33 * - `cs `: must be a 32-bit read/execute code segment with a base of `0`
     34 *          and a limit of `0xFFFFFFFF`. The selector value is unspecified.
     35 * - `ds`, `es`: must be a 32-bit read/write data segment with a base of
     36 *               `0` and a limit of `0xFFFFFFFF`. The selector values are all
     37 *               unspecified.
     38 * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
     39 *         of '0x67'.
     40 * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared.
     41 *             Bit 8 (TF) must be cleared. Other bits are all unspecified.
     42 *
     43 * All other processor registers and flag bits are unspecified. The OS is in
     44 * charge of setting up it's own stack, GDT and IDT.
     45 */
     46
     47#define PVH_GDT_ENTRY_CS	1
     48#define PVH_GDT_ENTRY_DS	2
     49#define PVH_CS_SEL		(PVH_GDT_ENTRY_CS * 8)
     50#define PVH_DS_SEL		(PVH_GDT_ENTRY_DS * 8)
     51
     52SYM_CODE_START_LOCAL(pvh_start_xen)
     53	UNWIND_HINT_EMPTY
     54	cld
     55
     56	lgdt (_pa(gdt))
     57
     58	mov $PVH_DS_SEL,%eax
     59	mov %eax,%ds
     60	mov %eax,%es
     61	mov %eax,%ss
     62
     63	/* Stash hvm_start_info. */
     64	mov $_pa(pvh_start_info), %edi
     65	mov %ebx, %esi
     66	mov _pa(pvh_start_info_sz), %ecx
     67	shr $2,%ecx
     68	rep
     69	movsl
     70
     71	mov $_pa(early_stack_end), %esp
     72
     73	/* Enable PAE mode. */
     74	mov %cr4, %eax
     75	orl $X86_CR4_PAE, %eax
     76	mov %eax, %cr4
     77
     78#ifdef CONFIG_X86_64
     79	/* Enable Long mode. */
     80	mov $MSR_EFER, %ecx
     81	rdmsr
     82	btsl $_EFER_LME, %eax
     83	wrmsr
     84
     85	/* Enable pre-constructed page tables. */
     86	mov $_pa(init_top_pgt), %eax
     87	mov %eax, %cr3
     88	mov $(X86_CR0_PG | X86_CR0_PE), %eax
     89	mov %eax, %cr0
     90
     91	/* Jump to 64-bit mode. */
     92	ljmp $PVH_CS_SEL, $_pa(1f)
     93
     94	/* 64-bit entry point. */
     95	.code64
     961:
     97	/* Set base address in stack canary descriptor. */
     98	mov $MSR_GS_BASE,%ecx
     99	mov $_pa(canary), %eax
    100	xor %edx, %edx
    101	wrmsr
    102
    103	call xen_prepare_pvh
    104
    105	/* startup_64 expects boot_params in %rsi. */
    106	mov $_pa(pvh_bootparams), %rsi
    107	mov $_pa(startup_64), %rax
    108	ANNOTATE_RETPOLINE_SAFE
    109	jmp *%rax
    110
    111#else /* CONFIG_X86_64 */
    112
    113	call mk_early_pgtbl_32
    114
    115	mov $_pa(initial_page_table), %eax
    116	mov %eax, %cr3
    117
    118	mov %cr0, %eax
    119	or $(X86_CR0_PG | X86_CR0_PE), %eax
    120	mov %eax, %cr0
    121
    122	ljmp $PVH_CS_SEL, $1f
    1231:
    124	call xen_prepare_pvh
    125	mov $_pa(pvh_bootparams), %esi
    126
    127	/* startup_32 doesn't expect paging and PAE to be on. */
    128	ljmp $PVH_CS_SEL, $_pa(2f)
    1292:
    130	mov %cr0, %eax
    131	and $~X86_CR0_PG, %eax
    132	mov %eax, %cr0
    133	mov %cr4, %eax
    134	and $~X86_CR4_PAE, %eax
    135	mov %eax, %cr4
    136
    137	ljmp $PVH_CS_SEL, $_pa(startup_32)
    138#endif
    139SYM_CODE_END(pvh_start_xen)
    140
    141	.section ".init.data","aw"
    142	.balign 8
    143SYM_DATA_START_LOCAL(gdt)
    144	.word gdt_end - gdt_start
    145	.long _pa(gdt_start)
    146	.word 0
    147SYM_DATA_END(gdt)
    148SYM_DATA_START_LOCAL(gdt_start)
    149	.quad 0x0000000000000000            /* NULL descriptor */
    150#ifdef CONFIG_X86_64
    151	.quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
    152#else
    153	.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
    154#endif
    155	.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
    156SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
    157
    158	.balign 16
    159SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
    160
    161SYM_DATA_START_LOCAL(early_stack)
    162	.fill BOOT_STACK_SIZE, 1, 0
    163SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
    164
    165	ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
    166	             _ASM_PTR (pvh_start_xen - __START_KERNEL_map))