cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sev_verify_cbit.S (2480B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *	sev_verify_cbit.S - Code for verification of the C-bit position reported
      4 *			    by the Hypervisor when running with SEV enabled.
      5 *
      6 *	Copyright (c) 2020  Joerg Roedel (jroedel@suse.de)
      7 *
      8 * sev_verify_cbit() is called before switching to a new long-mode page-table
      9 * at boot.
     10 *
     11 * Verify that the C-bit position is correct by writing a random value to
     12 * an encrypted memory location while on the current page-table. Then it
     13 * switches to the new page-table to verify the memory content is still the
     14 * same. After that it switches back to the current page-table and when the
     15 * check succeeded it returns. If the check failed the code invalidates the
     16 * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
     17 * make sure no interrupt or exception can get the CPU out of the hlt loop.
     18 *
     19 * New page-table pointer is expected in %rdi (first parameter)
     20 *
     21 */
     22SYM_FUNC_START(sev_verify_cbit)
     23#ifdef CONFIG_AMD_MEM_ENCRYPT
     24	/* First check if a C-bit was detected */
     25	movq	sme_me_mask(%rip), %rsi
     26	testq	%rsi, %rsi
     27	jz	3f
     28
     29	/* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
     30	movq	sev_status(%rip), %rsi
     31	testq	%rsi, %rsi
     32	jz	3f
     33
     34	/* Save CR4 in %rsi */
     35	movq	%cr4, %rsi
     36
     37	/* Disable Global Pages */
     38	movq	%rsi, %rdx
     39	andq	$(~X86_CR4_PGE), %rdx
     40	movq	%rdx, %cr4
     41
     42	/*
     43	 * Verified that running under SEV - now get a random value using
     44	 * RDRAND. This instruction is mandatory when running as an SEV guest.
     45	 *
     46	 * Don't bail out of the loop if RDRAND returns errors. It is better to
     47	 * prevent forward progress than to work with a non-random value here.
     48	 */
     491:	rdrand	%rdx
     50	jnc	1b
     51
     52	/* Store value to memory and keep it in %rdx */
     53	movq	%rdx, sev_check_data(%rip)
     54
     55	/* Backup current %cr3 value to restore it later */
     56	movq	%cr3, %rcx
     57
     58	/* Switch to new %cr3 - This might unmap the stack */
     59	movq	%rdi, %cr3
     60
     61	/*
     62	 * Compare value in %rdx with memory location. If C-bit is incorrect
     63	 * this would read the encrypted data and make the check fail.
     64	 */
     65	cmpq	%rdx, sev_check_data(%rip)
     66
     67	/* Restore old %cr3 */
     68	movq	%rcx, %cr3
     69
     70	/* Restore previous CR4 */
     71	movq	%rsi, %cr4
     72
     73	/* Check CMPQ result */
     74	je	3f
     75
     76	/*
     77	 * The check failed, prevent any forward progress to prevent ROP
     78	 * attacks, invalidate the stack and go into a hlt loop.
     79	 */
     80	xorq	%rsp, %rsp
     81	subq	$0x1000, %rsp
     822:	hlt
     83	jmp 2b
     843:
     85#endif
     86	/* Return page-table pointer */
     87	movq	%rdi, %rax
     88	RET
     89SYM_FUNC_END(sev_verify_cbit)