cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sev.c (10789B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * AMD Encrypted Register State Support
      4 *
      5 * Author: Joerg Roedel <jroedel@suse.de>
      6 */
      7
      8/*
      9 * misc.h needs to be first because it knows how to include the other kernel
     10 * headers in the pre-decompression code in a way that does not break
     11 * compilation.
     12 */
     13#include "misc.h"
     14
     15#include <asm/pgtable_types.h>
     16#include <asm/sev.h>
     17#include <asm/trapnr.h>
     18#include <asm/trap_pf.h>
     19#include <asm/msr-index.h>
     20#include <asm/fpu/xcr.h>
     21#include <asm/ptrace.h>
     22#include <asm/svm.h>
     23#include <asm/cpuid.h>
     24
     25#include "error.h"
     26#include "../msr.h"
     27
     28struct ghcb boot_ghcb_page __aligned(PAGE_SIZE);
     29struct ghcb *boot_ghcb;
     30
     31/*
     32 * Copy a version of this function here - insn-eval.c can't be used in
     33 * pre-decompression code.
     34 */
     35static bool insn_has_rep_prefix(struct insn *insn)
     36{
     37	insn_byte_t p;
     38	int i;
     39
     40	insn_get_prefixes(insn);
     41
     42	for_each_insn_prefix(insn, i, p) {
     43		if (p == 0xf2 || p == 0xf3)
     44			return true;
     45	}
     46
     47	return false;
     48}
     49
     50/*
     51 * Only a dummy for insn_get_seg_base() - Early boot-code is 64bit only and
     52 * doesn't use segments.
     53 */
     54static unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
     55{
     56	return 0UL;
     57}
     58
     59static inline u64 sev_es_rd_ghcb_msr(void)
     60{
     61	struct msr m;
     62
     63	boot_rdmsr(MSR_AMD64_SEV_ES_GHCB, &m);
     64
     65	return m.q;
     66}
     67
     68static inline void sev_es_wr_ghcb_msr(u64 val)
     69{
     70	struct msr m;
     71
     72	m.q = val;
     73	boot_wrmsr(MSR_AMD64_SEV_ES_GHCB, &m);
     74}
     75
     76static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
     77{
     78	char buffer[MAX_INSN_SIZE];
     79	int ret;
     80
     81	memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
     82
     83	ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
     84	if (ret < 0)
     85		return ES_DECODE_FAILED;
     86
     87	return ES_OK;
     88}
     89
     90static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
     91				   void *dst, char *buf, size_t size)
     92{
     93	memcpy(dst, buf, size);
     94
     95	return ES_OK;
     96}
     97
     98static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
     99				  void *src, char *buf, size_t size)
    100{
    101	memcpy(buf, src, size);
    102
    103	return ES_OK;
    104}
    105
    106#undef __init
    107#undef __pa
    108#define __init
    109#define __pa(x)	((unsigned long)(x))
    110
    111#define __BOOT_COMPRESSED
    112
    113/* Basic instruction decoding support needed */
    114#include "../../lib/inat.c"
    115#include "../../lib/insn.c"
    116
    117/* Include code for early handlers */
    118#include "../../kernel/sev-shared.c"
    119
    120static inline bool sev_snp_enabled(void)
    121{
    122	return sev_status & MSR_AMD64_SEV_SNP_ENABLED;
    123}
    124
    125static void __page_state_change(unsigned long paddr, enum psc_op op)
    126{
    127	u64 val;
    128
    129	if (!sev_snp_enabled())
    130		return;
    131
    132	/*
    133	 * If private -> shared then invalidate the page before requesting the
    134	 * state change in the RMP table.
    135	 */
    136	if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
    137		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
    138
    139	/* Issue VMGEXIT to change the page state in RMP table. */
    140	sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
    141	VMGEXIT();
    142
    143	/* Read the response of the VMGEXIT. */
    144	val = sev_es_rd_ghcb_msr();
    145	if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
    146		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
    147
    148	/*
    149	 * Now that page state is changed in the RMP table, validate it so that it is
    150	 * consistent with the RMP entry.
    151	 */
    152	if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
    153		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
    154}
    155
    156void snp_set_page_private(unsigned long paddr)
    157{
    158	__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
    159}
    160
    161void snp_set_page_shared(unsigned long paddr)
    162{
    163	__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
    164}
    165
    166static bool early_setup_ghcb(void)
    167{
    168	if (set_page_decrypted((unsigned long)&boot_ghcb_page))
    169		return false;
    170
    171	/* Page is now mapped decrypted, clear it */
    172	memset(&boot_ghcb_page, 0, sizeof(boot_ghcb_page));
    173
    174	boot_ghcb = &boot_ghcb_page;
    175
    176	/* Initialize lookup tables for the instruction decoder */
    177	inat_init_tables();
    178
    179	/* SNP guest requires the GHCB GPA must be registered */
    180	if (sev_snp_enabled())
    181		snp_register_ghcb_early(__pa(&boot_ghcb_page));
    182
    183	return true;
    184}
    185
    186void sev_es_shutdown_ghcb(void)
    187{
    188	if (!boot_ghcb)
    189		return;
    190
    191	if (!sev_es_check_cpu_features())
    192		error("SEV-ES CPU Features missing.");
    193
    194	/*
    195	 * GHCB Page must be flushed from the cache and mapped encrypted again.
    196	 * Otherwise the running kernel will see strange cache effects when
    197	 * trying to use that page.
    198	 */
    199	if (set_page_encrypted((unsigned long)&boot_ghcb_page))
    200		error("Can't map GHCB page encrypted");
    201
    202	/*
    203	 * GHCB page is mapped encrypted again and flushed from the cache.
    204	 * Mark it non-present now to catch bugs when #VC exceptions trigger
    205	 * after this point.
    206	 */
    207	if (set_page_non_present((unsigned long)&boot_ghcb_page))
    208		error("Can't unmap GHCB page");
    209}
    210
    211bool sev_es_check_ghcb_fault(unsigned long address)
    212{
    213	/* Check whether the fault was on the GHCB page */
    214	return ((address & PAGE_MASK) == (unsigned long)&boot_ghcb_page);
    215}
    216
    217void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
    218{
    219	struct es_em_ctxt ctxt;
    220	enum es_result result;
    221
    222	if (!boot_ghcb && !early_setup_ghcb())
    223		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
    224
    225	vc_ghcb_invalidate(boot_ghcb);
    226	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
    227	if (result != ES_OK)
    228		goto finish;
    229
    230	switch (exit_code) {
    231	case SVM_EXIT_RDTSC:
    232	case SVM_EXIT_RDTSCP:
    233		result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code);
    234		break;
    235	case SVM_EXIT_IOIO:
    236		result = vc_handle_ioio(boot_ghcb, &ctxt);
    237		break;
    238	case SVM_EXIT_CPUID:
    239		result = vc_handle_cpuid(boot_ghcb, &ctxt);
    240		break;
    241	default:
    242		result = ES_UNSUPPORTED;
    243		break;
    244	}
    245
    246finish:
    247	if (result == ES_OK)
    248		vc_finish_insn(&ctxt);
    249	else if (result != ES_RETRY)
    250		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
    251}
    252
    253static void enforce_vmpl0(void)
    254{
    255	u64 attrs;
    256	int err;
    257
    258	/*
    259	 * RMPADJUST modifies RMP permissions of a lesser-privileged (numerically
    260	 * higher) privilege level. Here, clear the VMPL1 permission mask of the
    261	 * GHCB page. If the guest is not running at VMPL0, this will fail.
    262	 *
    263	 * If the guest is running at VMPL0, it will succeed. Even if that operation
    264	 * modifies permission bits, it is still ok to do so currently because Linux
    265	 * SNP guests are supported only on VMPL0 so VMPL1 or higher permission masks
    266	 * changing is a don't-care.
    267	 */
    268	attrs = 1;
    269	if (rmpadjust((unsigned long)&boot_ghcb_page, RMP_PG_SIZE_4K, attrs))
    270		sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_NOT_VMPL0);
    271}
    272
    273void sev_enable(struct boot_params *bp)
    274{
    275	unsigned int eax, ebx, ecx, edx;
    276	struct msr m;
    277	bool snp;
    278
    279	/*
    280	 * Setup/preliminary detection of SNP. This will be sanity-checked
    281	 * against CPUID/MSR values later.
    282	 */
    283	snp = snp_init(bp);
    284
    285	/* Check for the SME/SEV support leaf */
    286	eax = 0x80000000;
    287	ecx = 0;
    288	native_cpuid(&eax, &ebx, &ecx, &edx);
    289	if (eax < 0x8000001f)
    290		return;
    291
    292	/*
    293	 * Check for the SME/SEV feature:
    294	 *   CPUID Fn8000_001F[EAX]
    295	 *   - Bit 0 - Secure Memory Encryption support
    296	 *   - Bit 1 - Secure Encrypted Virtualization support
    297	 *   CPUID Fn8000_001F[EBX]
    298	 *   - Bits 5:0 - Pagetable bit position used to indicate encryption
    299	 */
    300	eax = 0x8000001f;
    301	ecx = 0;
    302	native_cpuid(&eax, &ebx, &ecx, &edx);
    303	/* Check whether SEV is supported */
    304	if (!(eax & BIT(1))) {
    305		if (snp)
    306			error("SEV-SNP support indicated by CC blob, but not CPUID.");
    307		return;
    308	}
    309
    310	/* Set the SME mask if this is an SEV guest. */
    311	boot_rdmsr(MSR_AMD64_SEV, &m);
    312	sev_status = m.q;
    313	if (!(sev_status & MSR_AMD64_SEV_ENABLED))
    314		return;
    315
    316	/* Negotiate the GHCB protocol version. */
    317	if (sev_status & MSR_AMD64_SEV_ES_ENABLED) {
    318		if (!sev_es_negotiate_protocol())
    319			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_PROT_UNSUPPORTED);
    320	}
    321
    322	/*
    323	 * SNP is supported in v2 of the GHCB spec which mandates support for HV
    324	 * features.
    325	 */
    326	if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
    327		if (!(get_hv_features() & GHCB_HV_FT_SNP))
    328			sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
    329
    330		enforce_vmpl0();
    331	}
    332
    333	if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
    334		error("SEV-SNP supported indicated by CC blob, but not SEV status MSR.");
    335
    336	sme_me_mask = BIT_ULL(ebx & 0x3f);
    337}
    338
    339/* Search for Confidential Computing blob in the EFI config table. */
    340static struct cc_blob_sev_info *find_cc_blob_efi(struct boot_params *bp)
    341{
    342	unsigned long cfg_table_pa;
    343	unsigned int cfg_table_len;
    344	int ret;
    345
    346	ret = efi_get_conf_table(bp, &cfg_table_pa, &cfg_table_len);
    347	if (ret)
    348		return NULL;
    349
    350	return (struct cc_blob_sev_info *)efi_find_vendor_table(bp, cfg_table_pa,
    351								cfg_table_len,
    352								EFI_CC_BLOB_GUID);
    353}
    354
    355/*
    356 * Initial set up of SNP relies on information provided by the
    357 * Confidential Computing blob, which can be passed to the boot kernel
    358 * by firmware/bootloader in the following ways:
    359 *
    360 * - via an entry in the EFI config table
    361 * - via a setup_data structure, as defined by the Linux Boot Protocol
    362 *
    363 * Scan for the blob in that order.
    364 */
    365static struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
    366{
    367	struct cc_blob_sev_info *cc_info;
    368
    369	cc_info = find_cc_blob_efi(bp);
    370	if (cc_info)
    371		goto found_cc_info;
    372
    373	cc_info = find_cc_blob_setup_data(bp);
    374	if (!cc_info)
    375		return NULL;
    376
    377found_cc_info:
    378	if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
    379		sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
    380
    381	return cc_info;
    382}
    383
    384/*
    385 * Indicate SNP based on presence of SNP-specific CC blob. Subsequent checks
    386 * will verify the SNP CPUID/MSR bits.
    387 */
    388bool snp_init(struct boot_params *bp)
    389{
    390	struct cc_blob_sev_info *cc_info;
    391
    392	if (!bp)
    393		return false;
    394
    395	cc_info = find_cc_blob(bp);
    396	if (!cc_info)
    397		return false;
    398
    399	/*
    400	 * If a SNP-specific Confidential Computing blob is present, then
    401	 * firmware/bootloader have indicated SNP support. Verifying this
    402	 * involves CPUID checks which will be more reliable if the SNP
    403	 * CPUID table is used. See comments over snp_setup_cpuid_table() for
    404	 * more details.
    405	 */
    406	setup_cpuid_table(cc_info);
    407
    408	/*
    409	 * Pass run-time kernel a pointer to CC info via boot_params so EFI
    410	 * config table doesn't need to be searched again during early startup
    411	 * phase.
    412	 */
    413	bp->cc_blob_address = (u32)(unsigned long)cc_info;
    414
    415	return true;
    416}
    417
    418void sev_prep_identity_maps(unsigned long top_level_pgt)
    419{
    420	/*
    421	 * The Confidential Computing blob is used very early in uncompressed
    422	 * kernel to find the in-memory CPUID table to handle CPUID
    423	 * instructions. Make sure an identity-mapping exists so it can be
    424	 * accessed after switchover.
    425	 */
    426	if (sev_snp_enabled()) {
    427		unsigned long cc_info_pa = boot_params->cc_blob_address;
    428		struct cc_blob_sev_info *cc_info;
    429
    430		kernel_add_identity_map(cc_info_pa, cc_info_pa + sizeof(*cc_info));
    431
    432		cc_info = (struct cc_blob_sev_info *)cc_info_pa;
    433		kernel_add_identity_map(cc_info->cpuid_phys, cc_info->cpuid_phys + cc_info->cpuid_len);
    434	}
    435
    436	sev_verify_cbit(top_level_pgt);
    437}