cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mem_encrypt_boot.S (4316B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * AMD Memory Encryption Support
      4 *
      5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
      6 *
      7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
      8 */
      9
     10#include <linux/linkage.h>
     11#include <linux/pgtable.h>
     12#include <asm/page.h>
     13#include <asm/processor-flags.h>
     14#include <asm/msr-index.h>
     15#include <asm/nospec-branch.h>
     16
     17	.text
     18	.code64
     19SYM_FUNC_START(sme_encrypt_execute)
     20
     21	/*
     22	 * Entry parameters:
     23	 *   RDI - virtual address for the encrypted mapping
     24	 *   RSI - virtual address for the decrypted mapping
     25	 *   RDX - length to encrypt
     26	 *   RCX - virtual address of the encryption workarea, including:
     27	 *     - stack page (PAGE_SIZE)
     28	 *     - encryption routine page (PAGE_SIZE)
     29	 *     - intermediate copy buffer (PMD_PAGE_SIZE)
     30	 *    R8 - physical address of the pagetables to use for encryption
     31	 */
     32
     33	push	%rbp
     34	movq	%rsp, %rbp		/* RBP now has original stack pointer */
     35
     36	/* Set up a one page stack in the non-encrypted memory area */
     37	movq	%rcx, %rax		/* Workarea stack page */
     38	leaq	PAGE_SIZE(%rax), %rsp	/* Set new stack pointer */
     39	addq	$PAGE_SIZE, %rax	/* Workarea encryption routine */
     40
     41	push	%r12
     42	movq	%rdi, %r10		/* Encrypted area */
     43	movq	%rsi, %r11		/* Decrypted area */
     44	movq	%rdx, %r12		/* Area length */
     45
     46	/* Copy encryption routine into the workarea */
     47	movq	%rax, %rdi				/* Workarea encryption routine */
     48	leaq	__enc_copy(%rip), %rsi			/* Encryption routine */
     49	movq	$(.L__enc_copy_end - __enc_copy), %rcx	/* Encryption routine length */
     50	rep	movsb
     51
     52	/* Setup registers for call */
     53	movq	%r10, %rdi		/* Encrypted area */
     54	movq	%r11, %rsi		/* Decrypted area */
     55	movq	%r8, %rdx		/* Pagetables used for encryption */
     56	movq	%r12, %rcx		/* Area length */
     57	movq	%rax, %r8		/* Workarea encryption routine */
     58	addq	$PAGE_SIZE, %r8		/* Workarea intermediate copy buffer */
     59
     60	ANNOTATE_RETPOLINE_SAFE
     61	call	*%rax			/* Call the encryption routine */
     62
     63	pop	%r12
     64
     65	movq	%rbp, %rsp		/* Restore original stack pointer */
     66	pop	%rbp
     67
     68	RET
     69SYM_FUNC_END(sme_encrypt_execute)
     70
     71SYM_FUNC_START(__enc_copy)
     72/*
     73 * Routine used to encrypt memory in place.
     74 *   This routine must be run outside of the kernel proper since
     75 *   the kernel will be encrypted during the process. So this
     76 *   routine is defined here and then copied to an area outside
     77 *   of the kernel where it will remain and run decrypted
     78 *   during execution.
     79 *
     80 *   On entry the registers must be:
     81 *     RDI - virtual address for the encrypted mapping
     82 *     RSI - virtual address for the decrypted mapping
     83 *     RDX - address of the pagetables to use for encryption
     84 *     RCX - length of area
     85 *      R8 - intermediate copy buffer
     86 *
     87 *     RAX - points to this routine
     88 *
     89 * The area will be encrypted by copying from the non-encrypted
     90 * memory space to an intermediate buffer and then copying from the
     91 * intermediate buffer back to the encrypted memory space. The physical
     92 * addresses of the two mappings are the same which results in the area
     93 * being encrypted "in place".
     94 */
     95	/* Enable the new page tables */
     96	mov	%rdx, %cr3
     97
     98	/* Flush any global TLBs */
     99	mov	%cr4, %rdx
    100	andq	$~X86_CR4_PGE, %rdx
    101	mov	%rdx, %cr4
    102	orq	$X86_CR4_PGE, %rdx
    103	mov	%rdx, %cr4
    104
    105	push	%r15
    106	push	%r12
    107
    108	movq	%rcx, %r9		/* Save area length */
    109	movq	%rdi, %r10		/* Save encrypted area address */
    110	movq	%rsi, %r11		/* Save decrypted area address */
    111
    112	/* Set the PAT register PA5 entry to write-protect */
    113	movl	$MSR_IA32_CR_PAT, %ecx
    114	rdmsr
    115	mov	%rdx, %r15		/* Save original PAT value */
    116	andl	$0xffff00ff, %edx	/* Clear PA5 */
    117	orl	$0x00000500, %edx	/* Set PA5 to WP */
    118	wrmsr
    119
    120	wbinvd				/* Invalidate any cache entries */
    121
    122	/* Copy/encrypt up to 2MB at a time */
    123	movq	$PMD_PAGE_SIZE, %r12
    1241:
    125	cmpq	%r12, %r9
    126	jnb	2f
    127	movq	%r9, %r12
    128
    1292:
    130	movq	%r11, %rsi		/* Source - decrypted area */
    131	movq	%r8, %rdi		/* Dest   - intermediate copy buffer */
    132	movq	%r12, %rcx
    133	rep	movsb
    134
    135	movq	%r8, %rsi		/* Source - intermediate copy buffer */
    136	movq	%r10, %rdi		/* Dest   - encrypted area */
    137	movq	%r12, %rcx
    138	rep	movsb
    139
    140	addq	%r12, %r11
    141	addq	%r12, %r10
    142	subq	%r12, %r9		/* Kernel length decrement */
    143	jnz	1b			/* Kernel length not zero? */
    144
    145	/* Restore PAT register */
    146	movl	$MSR_IA32_CR_PAT, %ecx
    147	rdmsr
    148	mov	%r15, %rdx		/* Restore original PAT value */
    149	wrmsr
    150
    151	pop	%r12
    152	pop	%r15
    153
    154	RET
    155.L__enc_copy_end:
    156SYM_FUNC_END(__enc_copy)