cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sev.h (7810B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * AMD Encrypted Register State Support
      4 *
      5 * Author: Joerg Roedel <jroedel@suse.de>
      6 */
      7
      8#ifndef __ASM_ENCRYPTED_STATE_H
      9#define __ASM_ENCRYPTED_STATE_H
     10
     11#include <linux/types.h>
     12#include <linux/sev.h>
     13#include <asm/insn.h>
     14#include <asm/sev-common.h>
     15#include <asm/bootparam.h>
     16
     17#define GHCB_PROTOCOL_MIN	1ULL
     18#define GHCB_PROTOCOL_MAX	2ULL
     19#define GHCB_DEFAULT_USAGE	0ULL
     20
     21#define	VMGEXIT()			{ asm volatile("rep; vmmcall\n\r"); }
     22
     23enum es_result {
     24	ES_OK,			/* All good */
     25	ES_UNSUPPORTED,		/* Requested operation not supported */
     26	ES_VMM_ERROR,		/* Unexpected state from the VMM */
     27	ES_DECODE_FAILED,	/* Instruction decoding failed */
     28	ES_EXCEPTION,		/* Instruction caused exception */
     29	ES_RETRY,		/* Retry instruction emulation */
     30};
     31
     32struct es_fault_info {
     33	unsigned long vector;
     34	unsigned long error_code;
     35	unsigned long cr2;
     36};
     37
     38struct pt_regs;
     39
     40/* ES instruction emulation context */
     41struct es_em_ctxt {
     42	struct pt_regs *regs;
     43	struct insn insn;
     44	struct es_fault_info fi;
     45};
     46
     47/*
     48 * AMD SEV Confidential computing blob structure. The structure is
     49 * defined in OVMF UEFI firmware header:
     50 * https://github.com/tianocore/edk2/blob/master/OvmfPkg/Include/Guid/ConfidentialComputingSevSnpBlob.h
     51 */
     52#define CC_BLOB_SEV_HDR_MAGIC	0x45444d41
     53struct cc_blob_sev_info {
     54	u32 magic;
     55	u16 version;
     56	u16 reserved;
     57	u64 secrets_phys;
     58	u32 secrets_len;
     59	u32 rsvd1;
     60	u64 cpuid_phys;
     61	u32 cpuid_len;
     62	u32 rsvd2;
     63} __packed;
     64
     65void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
     66
     67static inline u64 lower_bits(u64 val, unsigned int bits)
     68{
     69	u64 mask = (1ULL << bits) - 1;
     70
     71	return (val & mask);
     72}
     73
     74struct real_mode_header;
     75enum stack_type;
     76struct ghcb;
     77
     78/* Early IDT entry points for #VC handler */
     79extern void vc_no_ghcb(void);
     80extern void vc_boot_ghcb(void);
     81extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
     82
     83/* Software defined (when rFlags.CF = 1) */
     84#define PVALIDATE_FAIL_NOUPDATE		255
     85
     86/* RMP page size */
     87#define RMP_PG_SIZE_4K			0
     88#define RMP_PG_SIZE_2M			1
     89#define RMP_TO_X86_PG_LEVEL(level)	(((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
     90#define X86_TO_RMP_PG_LEVEL(level)	(((level) == PG_LEVEL_4K) ? RMP_PG_SIZE_4K : RMP_PG_SIZE_2M)
     91
     92/*
     93 * The RMP entry format is not architectural. The format is defined in PPR
     94 * Family 19h Model 01h, Rev B1 processor.
     95 */
     96struct __packed rmpentry {
     97	union {
     98		struct {
     99			u64	assigned	: 1,
    100				pagesize	: 1,
    101				immutable	: 1,
    102				rsvd1		: 9,
    103				gpa		: 39,
    104				asid		: 10,
    105				vmsa		: 1,
    106				validated	: 1,
    107				rsvd2		: 1;
    108		} info;
    109		u64 low;
    110	};
    111	u64 high;
    112};
    113
    114#define rmpentry_assigned(x)	((x)->info.assigned)
    115#define rmpentry_pagesize(x)	((x)->info.pagesize)
    116#define rmpentry_vmsa(x)	((x)->info.vmsa)
    117#define rmpentry_asid(x)	((x)->info.asid)
    118#define rmpentry_validated(x)	((x)->info.validated)
    119#define rmpentry_gpa(x)		((unsigned long)(x)->info.gpa)
    120#define rmpentry_immutable(x)	((x)->info.immutable)
    121
    122#define RMPADJUST_VMSA_PAGE_BIT		BIT(16)
    123
    124/* SNP Guest message request */
    125struct snp_req_data {
    126	unsigned long req_gpa;
    127	unsigned long resp_gpa;
    128	unsigned long data_gpa;
    129	unsigned int data_npages;
    130};
    131
    132struct sev_guest_platform_data {
    133	u64 secrets_gpa;
    134};
    135
    136/*
    137 * The secrets page contains 96-bytes of reserved field that can be used by
    138 * the guest OS. The guest OS uses the area to save the message sequence
    139 * number for each VMPCK.
    140 *
    141 * See the GHCB spec section Secret page layout for the format for this area.
    142 */
    143struct secrets_os_area {
    144	u32 msg_seqno_0;
    145	u32 msg_seqno_1;
    146	u32 msg_seqno_2;
    147	u32 msg_seqno_3;
    148	u64 ap_jump_table_pa;
    149	u8 rsvd[40];
    150	u8 guest_usage[32];
    151} __packed;
    152
    153#define VMPCK_KEY_LEN		32
    154
    155/* See the SNP spec version 0.9 for secrets page format */
    156struct snp_secrets_page_layout {
    157	u32 version;
    158	u32 imien	: 1,
    159	    rsvd1	: 31;
    160	u32 fms;
    161	u32 rsvd2;
    162	u8 gosvw[16];
    163	u8 vmpck0[VMPCK_KEY_LEN];
    164	u8 vmpck1[VMPCK_KEY_LEN];
    165	u8 vmpck2[VMPCK_KEY_LEN];
    166	u8 vmpck3[VMPCK_KEY_LEN];
    167	struct secrets_os_area os_area;
    168	u8 rsvd3[3840];
    169} __packed;
    170
    171struct rmpupdate {
    172	u64 gpa;
    173	u8 assigned;
    174	u8 pagesize;
    175	u8 immutable;
    176	u8 rsvd;
    177	u32 asid;
    178} __packed;
    179
    180#ifdef CONFIG_AMD_MEM_ENCRYPT
    181extern struct static_key_false sev_es_enable_key;
    182extern void __sev_es_ist_enter(struct pt_regs *regs);
    183extern void __sev_es_ist_exit(void);
    184static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
    185{
    186	if (static_branch_unlikely(&sev_es_enable_key))
    187		__sev_es_ist_enter(regs);
    188}
    189static __always_inline void sev_es_ist_exit(void)
    190{
    191	if (static_branch_unlikely(&sev_es_enable_key))
    192		__sev_es_ist_exit();
    193}
    194extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
    195extern void __sev_es_nmi_complete(void);
    196static __always_inline void sev_es_nmi_complete(void)
    197{
    198	if (static_branch_unlikely(&sev_es_enable_key))
    199		__sev_es_nmi_complete();
    200}
    201extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
    202extern enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
    203					  bool set_ghcb_msr,
    204					  struct es_em_ctxt *ctxt,
    205					  u64 exit_code, u64 exit_info_1,
    206					  u64 exit_info_2);
    207static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs)
    208{
    209	int rc;
    210
    211	/* "rmpadjust" mnemonic support in binutils 2.36 and newer */
    212	asm volatile(".byte 0xF3,0x0F,0x01,0xFE\n\t"
    213		     : "=a"(rc)
    214		     : "a"(vaddr), "c"(rmp_psize), "d"(attrs)
    215		     : "memory", "cc");
    216
    217	return rc;
    218}
    219static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
    220{
    221	bool no_rmpupdate;
    222	int rc;
    223
    224	/* "pvalidate" mnemonic support in binutils 2.36 and newer */
    225	asm volatile(".byte 0xF2, 0x0F, 0x01, 0xFF\n\t"
    226		     CC_SET(c)
    227		     : CC_OUT(c) (no_rmpupdate), "=a"(rc)
    228		     : "a"(vaddr), "c"(rmp_psize), "d"(validate)
    229		     : "memory", "cc");
    230
    231	if (no_rmpupdate)
    232		return PVALIDATE_FAIL_NOUPDATE;
    233
    234	return rc;
    235}
    236void setup_ghcb(void);
    237void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
    238					 unsigned int npages);
    239void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
    240					unsigned int npages);
    241void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
    242void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
    243void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
    244void snp_set_wakeup_secondary_cpu(void);
    245bool snp_init(struct boot_params *bp);
    246void snp_abort(void);
    247int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err);
    248#else
    249static inline void sev_es_ist_enter(struct pt_regs *regs) { }
    250static inline void sev_es_ist_exit(void) { }
    251static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
    252static inline void sev_es_nmi_complete(void) { }
    253static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
    254static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; }
    255static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; }
    256static inline void setup_ghcb(void) { }
    257static inline void __init
    258early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
    259static inline void __init
    260early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
    261static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
    262static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
    263static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
    264static inline void snp_set_wakeup_secondary_cpu(void) { }
    265static inline bool snp_init(struct boot_params *bp) { return false; }
    266static inline void snp_abort(void) { }
    267static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input,
    268					  unsigned long *fw_err)
    269{
    270	return -ENOTTY;
    271}
    272#endif
    273
    274#endif