cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

svm.h (14526B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __SVM_H
      3#define __SVM_H
      4
      5#include <uapi/asm/svm.h>
      6#include <uapi/asm/kvm.h>
      7
      8/*
      9 * 32-bit intercept words in the VMCB Control Area, starting
     10 * at Byte offset 000h.
     11 */
     12
     13enum intercept_words {
     14	INTERCEPT_CR = 0,
     15	INTERCEPT_DR,
     16	INTERCEPT_EXCEPTION,
     17	INTERCEPT_WORD3,
     18	INTERCEPT_WORD4,
     19	INTERCEPT_WORD5,
     20	MAX_INTERCEPT,
     21};
     22
     23enum {
     24	/* Byte offset 000h (word 0) */
     25	INTERCEPT_CR0_READ = 0,
     26	INTERCEPT_CR3_READ = 3,
     27	INTERCEPT_CR4_READ = 4,
     28	INTERCEPT_CR8_READ = 8,
     29	INTERCEPT_CR0_WRITE = 16,
     30	INTERCEPT_CR3_WRITE = 16 + 3,
     31	INTERCEPT_CR4_WRITE = 16 + 4,
     32	INTERCEPT_CR8_WRITE = 16 + 8,
     33	/* Byte offset 004h (word 1) */
     34	INTERCEPT_DR0_READ = 32,
     35	INTERCEPT_DR1_READ,
     36	INTERCEPT_DR2_READ,
     37	INTERCEPT_DR3_READ,
     38	INTERCEPT_DR4_READ,
     39	INTERCEPT_DR5_READ,
     40	INTERCEPT_DR6_READ,
     41	INTERCEPT_DR7_READ,
     42	INTERCEPT_DR0_WRITE = 48,
     43	INTERCEPT_DR1_WRITE,
     44	INTERCEPT_DR2_WRITE,
     45	INTERCEPT_DR3_WRITE,
     46	INTERCEPT_DR4_WRITE,
     47	INTERCEPT_DR5_WRITE,
     48	INTERCEPT_DR6_WRITE,
     49	INTERCEPT_DR7_WRITE,
     50	/* Byte offset 008h (word 2) */
     51	INTERCEPT_EXCEPTION_OFFSET = 64,
     52	/* Byte offset 00Ch (word 3) */
     53	INTERCEPT_INTR = 96,
     54	INTERCEPT_NMI,
     55	INTERCEPT_SMI,
     56	INTERCEPT_INIT,
     57	INTERCEPT_VINTR,
     58	INTERCEPT_SELECTIVE_CR0,
     59	INTERCEPT_STORE_IDTR,
     60	INTERCEPT_STORE_GDTR,
     61	INTERCEPT_STORE_LDTR,
     62	INTERCEPT_STORE_TR,
     63	INTERCEPT_LOAD_IDTR,
     64	INTERCEPT_LOAD_GDTR,
     65	INTERCEPT_LOAD_LDTR,
     66	INTERCEPT_LOAD_TR,
     67	INTERCEPT_RDTSC,
     68	INTERCEPT_RDPMC,
     69	INTERCEPT_PUSHF,
     70	INTERCEPT_POPF,
     71	INTERCEPT_CPUID,
     72	INTERCEPT_RSM,
     73	INTERCEPT_IRET,
     74	INTERCEPT_INTn,
     75	INTERCEPT_INVD,
     76	INTERCEPT_PAUSE,
     77	INTERCEPT_HLT,
     78	INTERCEPT_INVLPG,
     79	INTERCEPT_INVLPGA,
     80	INTERCEPT_IOIO_PROT,
     81	INTERCEPT_MSR_PROT,
     82	INTERCEPT_TASK_SWITCH,
     83	INTERCEPT_FERR_FREEZE,
     84	INTERCEPT_SHUTDOWN,
     85	/* Byte offset 010h (word 4) */
     86	INTERCEPT_VMRUN = 128,
     87	INTERCEPT_VMMCALL,
     88	INTERCEPT_VMLOAD,
     89	INTERCEPT_VMSAVE,
     90	INTERCEPT_STGI,
     91	INTERCEPT_CLGI,
     92	INTERCEPT_SKINIT,
     93	INTERCEPT_RDTSCP,
     94	INTERCEPT_ICEBP,
     95	INTERCEPT_WBINVD,
     96	INTERCEPT_MONITOR,
     97	INTERCEPT_MWAIT,
     98	INTERCEPT_MWAIT_COND,
     99	INTERCEPT_XSETBV,
    100	INTERCEPT_RDPRU,
    101	TRAP_EFER_WRITE,
    102	TRAP_CR0_WRITE,
    103	TRAP_CR1_WRITE,
    104	TRAP_CR2_WRITE,
    105	TRAP_CR3_WRITE,
    106	TRAP_CR4_WRITE,
    107	TRAP_CR5_WRITE,
    108	TRAP_CR6_WRITE,
    109	TRAP_CR7_WRITE,
    110	TRAP_CR8_WRITE,
    111	/* Byte offset 014h (word 5) */
    112	INTERCEPT_INVLPGB = 160,
    113	INTERCEPT_INVLPGB_ILLEGAL,
    114	INTERCEPT_INVPCID,
    115	INTERCEPT_MCOMMIT,
    116	INTERCEPT_TLBSYNC,
    117};
    118
    119
    120struct __attribute__ ((__packed__)) vmcb_control_area {
    121	u32 intercepts[MAX_INTERCEPT];
    122	u32 reserved_1[15 - MAX_INTERCEPT];
    123	u16 pause_filter_thresh;
    124	u16 pause_filter_count;
    125	u64 iopm_base_pa;
    126	u64 msrpm_base_pa;
    127	u64 tsc_offset;
    128	u32 asid;
    129	u8 tlb_ctl;
    130	u8 reserved_2[3];
    131	u32 int_ctl;
    132	u32 int_vector;
    133	u32 int_state;
    134	u8 reserved_3[4];
    135	u32 exit_code;
    136	u32 exit_code_hi;
    137	u64 exit_info_1;
    138	u64 exit_info_2;
    139	u32 exit_int_info;
    140	u32 exit_int_info_err;
    141	u64 nested_ctl;
    142	u64 avic_vapic_bar;
    143	u64 ghcb_gpa;
    144	u32 event_inj;
    145	u32 event_inj_err;
    146	u64 nested_cr3;
    147	u64 virt_ext;
    148	u32 clean;
    149	u32 reserved_5;
    150	u64 next_rip;
    151	u8 insn_len;
    152	u8 insn_bytes[15];
    153	u64 avic_backing_page;	/* Offset 0xe0 */
    154	u8 reserved_6[8];	/* Offset 0xe8 */
    155	u64 avic_logical_id;	/* Offset 0xf0 */
    156	u64 avic_physical_id;	/* Offset 0xf8 */
    157	u8 reserved_7[8];
    158	u64 vmsa_pa;		/* Used for an SEV-ES guest */
    159	u8 reserved_8[720];
    160	/*
    161	 * Offset 0x3e0, 32 bytes reserved
    162	 * for use by hypervisor/software.
    163	 */
    164	u8 reserved_sw[32];
    165};
    166
    167
    168#define TLB_CONTROL_DO_NOTHING 0
    169#define TLB_CONTROL_FLUSH_ALL_ASID 1
    170#define TLB_CONTROL_FLUSH_ASID 3
    171#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
    172
    173#define V_TPR_MASK 0x0f
    174
    175#define V_IRQ_SHIFT 8
    176#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
    177
    178#define V_GIF_SHIFT 9
    179#define V_GIF_MASK (1 << V_GIF_SHIFT)
    180
    181#define V_INTR_PRIO_SHIFT 16
    182#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
    183
    184#define V_IGN_TPR_SHIFT 20
    185#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
    186
    187#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK)
    188
    189#define V_INTR_MASKING_SHIFT 24
    190#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
    191
    192#define V_GIF_ENABLE_SHIFT 25
    193#define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT)
    194
    195#define AVIC_ENABLE_SHIFT 31
    196#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
    197
    198#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
    199#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
    200
    201#define SVM_INTERRUPT_SHADOW_MASK	BIT_ULL(0)
    202#define SVM_GUEST_INTERRUPT_MASK	BIT_ULL(1)
    203
    204#define SVM_IOIO_STR_SHIFT 2
    205#define SVM_IOIO_REP_SHIFT 3
    206#define SVM_IOIO_SIZE_SHIFT 4
    207#define SVM_IOIO_ASIZE_SHIFT 7
    208
    209#define SVM_IOIO_TYPE_MASK 1
    210#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
    211#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
    212#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
    213#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
    214
    215#define SVM_VM_CR_VALID_MASK	0x001fULL
    216#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
    217#define SVM_VM_CR_SVM_DIS_MASK  0x0010ULL
    218
    219#define SVM_NESTED_CTL_NP_ENABLE	BIT(0)
    220#define SVM_NESTED_CTL_SEV_ENABLE	BIT(1)
    221#define SVM_NESTED_CTL_SEV_ES_ENABLE	BIT(2)
    222
    223
    224#define SVM_TSC_RATIO_RSVD	0xffffff0000000000ULL
    225#define SVM_TSC_RATIO_MIN	0x0000000000000001ULL
    226#define SVM_TSC_RATIO_MAX	0x000000ffffffffffULL
    227#define SVM_TSC_RATIO_DEFAULT	0x0100000000ULL
    228
    229
    230/* AVIC */
    231#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK	(0xFFULL)
    232#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT			31
    233#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK		(1 << 31)
    234
    235#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK	GENMASK_ULL(11, 0)
    236#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK	(0xFFFFFFFFFFULL << 12)
    237#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK		(1ULL << 62)
    238#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK		(1ULL << 63)
    239#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK		(0xFFULL)
    240
    241#define AVIC_DOORBELL_PHYSICAL_ID_MASK			GENMASK_ULL(11, 0)
    242
    243#define VMCB_AVIC_APIC_BAR_MASK				0xFFFFFFFFFF000ULL
    244
    245#define AVIC_UNACCEL_ACCESS_WRITE_MASK		1
    246#define AVIC_UNACCEL_ACCESS_OFFSET_MASK		0xFF0
    247#define AVIC_UNACCEL_ACCESS_VECTOR_MASK		0xFFFFFFFF
    248
    249enum avic_ipi_failure_cause {
    250	AVIC_IPI_FAILURE_INVALID_INT_TYPE,
    251	AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
    252	AVIC_IPI_FAILURE_INVALID_TARGET,
    253	AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
    254};
    255
    256
    257/*
    258 * 0xff is broadcast, so the max index allowed for physical APIC ID
    259 * table is 0xfe.  APIC IDs above 0xff are reserved.
    260 */
    261#define AVIC_MAX_PHYSICAL_ID_COUNT	0xff
    262
    263#define AVIC_HPA_MASK	~((0xFFFULL << 52) | 0xFFF)
    264#define VMCB_AVIC_APIC_BAR_MASK		0xFFFFFFFFFF000ULL
    265
    266#define SVM_SEV_FEAT_SNP_ACTIVE			BIT(0)
    267#define SVM_SEV_FEAT_RESTRICTED_INJECTION	BIT(3)
    268#define SVM_SEV_FEAT_ALTERNATE_INJECTION	BIT(4)
    269#define SVM_SEV_FEAT_INT_INJ_MODES		\
    270	(SVM_SEV_FEAT_RESTRICTED_INJECTION |	\
    271	 SVM_SEV_FEAT_ALTERNATE_INJECTION)
    272
    273struct vmcb_seg {
    274	u16 selector;
    275	u16 attrib;
    276	u32 limit;
    277	u64 base;
    278} __packed;
    279
    280/* Save area definition for legacy and SEV-MEM guests */
    281struct vmcb_save_area {
    282	struct vmcb_seg es;
    283	struct vmcb_seg cs;
    284	struct vmcb_seg ss;
    285	struct vmcb_seg ds;
    286	struct vmcb_seg fs;
    287	struct vmcb_seg gs;
    288	struct vmcb_seg gdtr;
    289	struct vmcb_seg ldtr;
    290	struct vmcb_seg idtr;
    291	struct vmcb_seg tr;
    292	u8 reserved_1[42];
    293	u8 vmpl;
    294	u8 cpl;
    295	u8 reserved_2[4];
    296	u64 efer;
    297	u8 reserved_3[112];
    298	u64 cr4;
    299	u64 cr3;
    300	u64 cr0;
    301	u64 dr7;
    302	u64 dr6;
    303	u64 rflags;
    304	u64 rip;
    305	u8 reserved_4[88];
    306	u64 rsp;
    307	u64 s_cet;
    308	u64 ssp;
    309	u64 isst_addr;
    310	u64 rax;
    311	u64 star;
    312	u64 lstar;
    313	u64 cstar;
    314	u64 sfmask;
    315	u64 kernel_gs_base;
    316	u64 sysenter_cs;
    317	u64 sysenter_esp;
    318	u64 sysenter_eip;
    319	u64 cr2;
    320	u8 reserved_5[32];
    321	u64 g_pat;
    322	u64 dbgctl;
    323	u64 br_from;
    324	u64 br_to;
    325	u64 last_excp_from;
    326	u64 last_excp_to;
    327	u8 reserved_6[72];
    328	u32 spec_ctrl;		/* Guest version of SPEC_CTRL at 0x2E0 */
    329} __packed;
    330
    331/* Save area definition for SEV-ES and SEV-SNP guests */
    332struct sev_es_save_area {
    333	struct vmcb_seg es;
    334	struct vmcb_seg cs;
    335	struct vmcb_seg ss;
    336	struct vmcb_seg ds;
    337	struct vmcb_seg fs;
    338	struct vmcb_seg gs;
    339	struct vmcb_seg gdtr;
    340	struct vmcb_seg ldtr;
    341	struct vmcb_seg idtr;
    342	struct vmcb_seg tr;
    343	u64 vmpl0_ssp;
    344	u64 vmpl1_ssp;
    345	u64 vmpl2_ssp;
    346	u64 vmpl3_ssp;
    347	u64 u_cet;
    348	u8 reserved_1[2];
    349	u8 vmpl;
    350	u8 cpl;
    351	u8 reserved_2[4];
    352	u64 efer;
    353	u8 reserved_3[104];
    354	u64 xss;
    355	u64 cr4;
    356	u64 cr3;
    357	u64 cr0;
    358	u64 dr7;
    359	u64 dr6;
    360	u64 rflags;
    361	u64 rip;
    362	u64 dr0;
    363	u64 dr1;
    364	u64 dr2;
    365	u64 dr3;
    366	u64 dr0_addr_mask;
    367	u64 dr1_addr_mask;
    368	u64 dr2_addr_mask;
    369	u64 dr3_addr_mask;
    370	u8 reserved_4[24];
    371	u64 rsp;
    372	u64 s_cet;
    373	u64 ssp;
    374	u64 isst_addr;
    375	u64 rax;
    376	u64 star;
    377	u64 lstar;
    378	u64 cstar;
    379	u64 sfmask;
    380	u64 kernel_gs_base;
    381	u64 sysenter_cs;
    382	u64 sysenter_esp;
    383	u64 sysenter_eip;
    384	u64 cr2;
    385	u8 reserved_5[32];
    386	u64 g_pat;
    387	u64 dbgctl;
    388	u64 br_from;
    389	u64 br_to;
    390	u64 last_excp_from;
    391	u64 last_excp_to;
    392	u8 reserved_7[80];
    393	u32 pkru;
    394	u8 reserved_8[20];
    395	u64 reserved_9;		/* rax already available at 0x01f8 */
    396	u64 rcx;
    397	u64 rdx;
    398	u64 rbx;
    399	u64 reserved_10;	/* rsp already available at 0x01d8 */
    400	u64 rbp;
    401	u64 rsi;
    402	u64 rdi;
    403	u64 r8;
    404	u64 r9;
    405	u64 r10;
    406	u64 r11;
    407	u64 r12;
    408	u64 r13;
    409	u64 r14;
    410	u64 r15;
    411	u8 reserved_11[16];
    412	u64 guest_exit_info_1;
    413	u64 guest_exit_info_2;
    414	u64 guest_exit_int_info;
    415	u64 guest_nrip;
    416	u64 sev_features;
    417	u64 vintr_ctrl;
    418	u64 guest_exit_code;
    419	u64 virtual_tom;
    420	u64 tlb_id;
    421	u64 pcpu_id;
    422	u64 event_inj;
    423	u64 xcr0;
    424	u8 reserved_12[16];
    425
    426	/* Floating point area */
    427	u64 x87_dp;
    428	u32 mxcsr;
    429	u16 x87_ftw;
    430	u16 x87_fsw;
    431	u16 x87_fcw;
    432	u16 x87_fop;
    433	u16 x87_ds;
    434	u16 x87_cs;
    435	u64 x87_rip;
    436	u8 fpreg_x87[80];
    437	u8 fpreg_xmm[256];
    438	u8 fpreg_ymm[256];
    439} __packed;
    440
    441struct ghcb_save_area {
    442	u8 reserved_1[203];
    443	u8 cpl;
    444	u8 reserved_2[116];
    445	u64 xss;
    446	u8 reserved_3[24];
    447	u64 dr7;
    448	u8 reserved_4[16];
    449	u64 rip;
    450	u8 reserved_5[88];
    451	u64 rsp;
    452	u8 reserved_6[24];
    453	u64 rax;
    454	u8 reserved_7[264];
    455	u64 rcx;
    456	u64 rdx;
    457	u64 rbx;
    458	u8 reserved_8[8];
    459	u64 rbp;
    460	u64 rsi;
    461	u64 rdi;
    462	u64 r8;
    463	u64 r9;
    464	u64 r10;
    465	u64 r11;
    466	u64 r12;
    467	u64 r13;
    468	u64 r14;
    469	u64 r15;
    470	u8 reserved_9[16];
    471	u64 sw_exit_code;
    472	u64 sw_exit_info_1;
    473	u64 sw_exit_info_2;
    474	u64 sw_scratch;
    475	u8 reserved_10[56];
    476	u64 xcr0;
    477	u8 valid_bitmap[16];
    478	u64 x87_state_gpa;
    479} __packed;
    480
    481#define GHCB_SHARED_BUF_SIZE	2032
    482
    483struct ghcb {
    484	struct ghcb_save_area save;
    485	u8 reserved_save[2048 - sizeof(struct ghcb_save_area)];
    486
    487	u8 shared_buffer[GHCB_SHARED_BUF_SIZE];
    488
    489	u8 reserved_1[10];
    490	u16 protocol_version;	/* negotiated SEV-ES/GHCB protocol version */
    491	u32 ghcb_usage;
    492} __packed;
    493
    494
    495#define EXPECTED_VMCB_SAVE_AREA_SIZE		740
    496#define EXPECTED_GHCB_SAVE_AREA_SIZE		1032
    497#define EXPECTED_SEV_ES_SAVE_AREA_SIZE		1648
    498#define EXPECTED_VMCB_CONTROL_AREA_SIZE		1024
    499#define EXPECTED_GHCB_SIZE			PAGE_SIZE
    500
    501static inline void __unused_size_checks(void)
    502{
    503	BUILD_BUG_ON(sizeof(struct vmcb_save_area)	!= EXPECTED_VMCB_SAVE_AREA_SIZE);
    504	BUILD_BUG_ON(sizeof(struct ghcb_save_area)	!= EXPECTED_GHCB_SAVE_AREA_SIZE);
    505	BUILD_BUG_ON(sizeof(struct sev_es_save_area)	!= EXPECTED_SEV_ES_SAVE_AREA_SIZE);
    506	BUILD_BUG_ON(sizeof(struct vmcb_control_area)	!= EXPECTED_VMCB_CONTROL_AREA_SIZE);
    507	BUILD_BUG_ON(sizeof(struct ghcb)		!= EXPECTED_GHCB_SIZE);
    508}
    509
    510struct vmcb {
    511	struct vmcb_control_area control;
    512	struct vmcb_save_area save;
    513} __packed;
    514
    515#define SVM_CPUID_FUNC 0x8000000a
    516
    517#define SVM_VM_CR_SVM_DISABLE 4
    518
    519#define SVM_SELECTOR_S_SHIFT 4
    520#define SVM_SELECTOR_DPL_SHIFT 5
    521#define SVM_SELECTOR_P_SHIFT 7
    522#define SVM_SELECTOR_AVL_SHIFT 8
    523#define SVM_SELECTOR_L_SHIFT 9
    524#define SVM_SELECTOR_DB_SHIFT 10
    525#define SVM_SELECTOR_G_SHIFT 11
    526
    527#define SVM_SELECTOR_TYPE_MASK (0xf)
    528#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
    529#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
    530#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
    531#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
    532#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
    533#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
    534#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
    535
    536#define SVM_SELECTOR_WRITE_MASK (1 << 1)
    537#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
    538#define SVM_SELECTOR_CODE_MASK (1 << 3)
    539
    540#define SVM_EVTINJ_VEC_MASK 0xff
    541
    542#define SVM_EVTINJ_TYPE_SHIFT 8
    543#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
    544
    545#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
    546#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
    547#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
    548#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
    549
    550#define SVM_EVTINJ_VALID (1 << 31)
    551#define SVM_EVTINJ_VALID_ERR (1 << 11)
    552
    553#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
    554#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
    555
    556#define	SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
    557#define	SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
    558#define	SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
    559#define	SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
    560
    561#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
    562#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
    563
    564#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
    565#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
    566#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
    567
    568#define SVM_EXITINFO_REG_MASK 0x0F
    569
    570#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
    571
    572/* GHCB Accessor functions */
    573
    574#define GHCB_BITMAP_IDX(field)							\
    575	(offsetof(struct ghcb_save_area, field) / sizeof(u64))
    576
    577#define DEFINE_GHCB_ACCESSORS(field)						\
    578	static __always_inline bool ghcb_##field##_is_valid(const struct ghcb *ghcb) \
    579	{									\
    580		return test_bit(GHCB_BITMAP_IDX(field),				\
    581				(unsigned long *)&ghcb->save.valid_bitmap);	\
    582	}									\
    583										\
    584	static __always_inline u64 ghcb_get_##field(struct ghcb *ghcb)		\
    585	{									\
    586		return ghcb->save.field;					\
    587	}									\
    588										\
    589	static __always_inline u64 ghcb_get_##field##_if_valid(struct ghcb *ghcb) \
    590	{									\
    591		return ghcb_##field##_is_valid(ghcb) ? ghcb->save.field : 0;	\
    592	}									\
    593										\
    594	static __always_inline void ghcb_set_##field(struct ghcb *ghcb, u64 value) \
    595	{									\
    596		__set_bit(GHCB_BITMAP_IDX(field),				\
    597			  (unsigned long *)&ghcb->save.valid_bitmap);		\
    598		ghcb->save.field = value;					\
    599	}
    600
    601DEFINE_GHCB_ACCESSORS(cpl)
    602DEFINE_GHCB_ACCESSORS(rip)
    603DEFINE_GHCB_ACCESSORS(rsp)
    604DEFINE_GHCB_ACCESSORS(rax)
    605DEFINE_GHCB_ACCESSORS(rcx)
    606DEFINE_GHCB_ACCESSORS(rdx)
    607DEFINE_GHCB_ACCESSORS(rbx)
    608DEFINE_GHCB_ACCESSORS(rbp)
    609DEFINE_GHCB_ACCESSORS(rsi)
    610DEFINE_GHCB_ACCESSORS(rdi)
    611DEFINE_GHCB_ACCESSORS(r8)
    612DEFINE_GHCB_ACCESSORS(r9)
    613DEFINE_GHCB_ACCESSORS(r10)
    614DEFINE_GHCB_ACCESSORS(r11)
    615DEFINE_GHCB_ACCESSORS(r12)
    616DEFINE_GHCB_ACCESSORS(r13)
    617DEFINE_GHCB_ACCESSORS(r14)
    618DEFINE_GHCB_ACCESSORS(r15)
    619DEFINE_GHCB_ACCESSORS(sw_exit_code)
    620DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
    621DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
    622DEFINE_GHCB_ACCESSORS(sw_scratch)
    623DEFINE_GHCB_ACCESSORS(xcr0)
    624
    625#endif