cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

kvm.h (11761B)


      1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
      2#ifndef _ASM_X86_KVM_H
      3#define _ASM_X86_KVM_H
      4
      5/*
      6 * KVM x86 specific structures and definitions
      7 *
      8 */
      9
     10#include <linux/types.h>
     11#include <linux/ioctl.h>
     12
     13#define KVM_PIO_PAGE_OFFSET 1
     14#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
     15#define KVM_DIRTY_LOG_PAGE_OFFSET 64
     16
     17#define DE_VECTOR 0
     18#define DB_VECTOR 1
     19#define BP_VECTOR 3
     20#define OF_VECTOR 4
     21#define BR_VECTOR 5
     22#define UD_VECTOR 6
     23#define NM_VECTOR 7
     24#define DF_VECTOR 8
     25#define TS_VECTOR 10
     26#define NP_VECTOR 11
     27#define SS_VECTOR 12
     28#define GP_VECTOR 13
     29#define PF_VECTOR 14
     30#define MF_VECTOR 16
     31#define AC_VECTOR 17
     32#define MC_VECTOR 18
     33#define XM_VECTOR 19
     34#define VE_VECTOR 20
     35
     36/* Select x86 specific features in <linux/kvm.h> */
     37#define __KVM_HAVE_PIT
     38#define __KVM_HAVE_IOAPIC
     39#define __KVM_HAVE_IRQ_LINE
     40#define __KVM_HAVE_MSI
     41#define __KVM_HAVE_USER_NMI
     42#define __KVM_HAVE_GUEST_DEBUG
     43#define __KVM_HAVE_MSIX
     44#define __KVM_HAVE_MCE
     45#define __KVM_HAVE_PIT_STATE2
     46#define __KVM_HAVE_XEN_HVM
     47#define __KVM_HAVE_VCPU_EVENTS
     48#define __KVM_HAVE_DEBUGREGS
     49#define __KVM_HAVE_XSAVE
     50#define __KVM_HAVE_XCRS
     51#define __KVM_HAVE_READONLY_MEM
     52
     53/* Architectural interrupt line count. */
     54#define KVM_NR_INTERRUPTS 256
     55
     56struct kvm_memory_alias {
     57	__u32 slot;  /* this has a different namespace than memory slots */
     58	__u32 flags;
     59	__u64 guest_phys_addr;
     60	__u64 memory_size;
     61	__u64 target_phys_addr;
     62};
     63
     64/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
     65struct kvm_pic_state {
     66	__u8 last_irr;	/* edge detection */
     67	__u8 irr;		/* interrupt request register */
     68	__u8 imr;		/* interrupt mask register */
     69	__u8 isr;		/* interrupt service register */
     70	__u8 priority_add;	/* highest irq priority */
     71	__u8 irq_base;
     72	__u8 read_reg_select;
     73	__u8 poll;
     74	__u8 special_mask;
     75	__u8 init_state;
     76	__u8 auto_eoi;
     77	__u8 rotate_on_auto_eoi;
     78	__u8 special_fully_nested_mode;
     79	__u8 init4;		/* true if 4 byte init */
     80	__u8 elcr;		/* PIIX edge/trigger selection */
     81	__u8 elcr_mask;
     82};
     83
     84#define KVM_IOAPIC_NUM_PINS  24
     85struct kvm_ioapic_state {
     86	__u64 base_address;
     87	__u32 ioregsel;
     88	__u32 id;
     89	__u32 irr;
     90	__u32 pad;
     91	union {
     92		__u64 bits;
     93		struct {
     94			__u8 vector;
     95			__u8 delivery_mode:3;
     96			__u8 dest_mode:1;
     97			__u8 delivery_status:1;
     98			__u8 polarity:1;
     99			__u8 remote_irr:1;
    100			__u8 trig_mode:1;
    101			__u8 mask:1;
    102			__u8 reserve:7;
    103			__u8 reserved[4];
    104			__u8 dest_id;
    105		} fields;
    106	} redirtbl[KVM_IOAPIC_NUM_PINS];
    107};
    108
    109#define KVM_IRQCHIP_PIC_MASTER   0
    110#define KVM_IRQCHIP_PIC_SLAVE    1
    111#define KVM_IRQCHIP_IOAPIC       2
    112#define KVM_NR_IRQCHIPS          3
    113
    114#define KVM_RUN_X86_SMM		 (1 << 0)
    115#define KVM_RUN_X86_BUS_LOCK     (1 << 1)
    116
    117/* for KVM_GET_REGS and KVM_SET_REGS */
    118struct kvm_regs {
    119	/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
    120	__u64 rax, rbx, rcx, rdx;
    121	__u64 rsi, rdi, rsp, rbp;
    122	__u64 r8,  r9,  r10, r11;
    123	__u64 r12, r13, r14, r15;
    124	__u64 rip, rflags;
    125};
    126
    127/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
    128#define KVM_APIC_REG_SIZE 0x400
    129struct kvm_lapic_state {
    130	char regs[KVM_APIC_REG_SIZE];
    131};
    132
    133struct kvm_segment {
    134	__u64 base;
    135	__u32 limit;
    136	__u16 selector;
    137	__u8  type;
    138	__u8  present, dpl, db, s, l, g, avl;
    139	__u8  unusable;
    140	__u8  padding;
    141};
    142
    143struct kvm_dtable {
    144	__u64 base;
    145	__u16 limit;
    146	__u16 padding[3];
    147};
    148
    149
    150/* for KVM_GET_SREGS and KVM_SET_SREGS */
    151struct kvm_sregs {
    152	/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
    153	struct kvm_segment cs, ds, es, fs, gs, ss;
    154	struct kvm_segment tr, ldt;
    155	struct kvm_dtable gdt, idt;
    156	__u64 cr0, cr2, cr3, cr4, cr8;
    157	__u64 efer;
    158	__u64 apic_base;
    159	__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
    160};
    161
    162struct kvm_sregs2 {
    163	/* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
    164	struct kvm_segment cs, ds, es, fs, gs, ss;
    165	struct kvm_segment tr, ldt;
    166	struct kvm_dtable gdt, idt;
    167	__u64 cr0, cr2, cr3, cr4, cr8;
    168	__u64 efer;
    169	__u64 apic_base;
    170	__u64 flags;
    171	__u64 pdptrs[4];
    172};
    173#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
    174
    175/* for KVM_GET_FPU and KVM_SET_FPU */
    176struct kvm_fpu {
    177	__u8  fpr[8][16];
    178	__u16 fcw;
    179	__u16 fsw;
    180	__u8  ftwx;  /* in fxsave format */
    181	__u8  pad1;
    182	__u16 last_opcode;
    183	__u64 last_ip;
    184	__u64 last_dp;
    185	__u8  xmm[16][16];
    186	__u32 mxcsr;
    187	__u32 pad2;
    188};
    189
    190struct kvm_msr_entry {
    191	__u32 index;
    192	__u32 reserved;
    193	__u64 data;
    194};
    195
    196/* for KVM_GET_MSRS and KVM_SET_MSRS */
    197struct kvm_msrs {
    198	__u32 nmsrs; /* number of msrs in entries */
    199	__u32 pad;
    200
    201	struct kvm_msr_entry entries[0];
    202};
    203
    204/* for KVM_GET_MSR_INDEX_LIST */
    205struct kvm_msr_list {
    206	__u32 nmsrs; /* number of msrs in entries */
    207	__u32 indices[0];
    208};
    209
    210/* Maximum size of any access bitmap in bytes */
    211#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
    212
    213/* for KVM_X86_SET_MSR_FILTER */
    214struct kvm_msr_filter_range {
    215#define KVM_MSR_FILTER_READ  (1 << 0)
    216#define KVM_MSR_FILTER_WRITE (1 << 1)
    217	__u32 flags;
    218	__u32 nmsrs; /* number of msrs in bitmap */
    219	__u32 base;  /* MSR index the bitmap starts at */
    220	__u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
    221};
    222
    223#define KVM_MSR_FILTER_MAX_RANGES 16
    224struct kvm_msr_filter {
    225#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
    226#define KVM_MSR_FILTER_DEFAULT_DENY  (1 << 0)
    227	__u32 flags;
    228	struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
    229};
    230
    231struct kvm_cpuid_entry {
    232	__u32 function;
    233	__u32 eax;
    234	__u32 ebx;
    235	__u32 ecx;
    236	__u32 edx;
    237	__u32 padding;
    238};
    239
    240/* for KVM_SET_CPUID */
    241struct kvm_cpuid {
    242	__u32 nent;
    243	__u32 padding;
    244	struct kvm_cpuid_entry entries[0];
    245};
    246
    247struct kvm_cpuid_entry2 {
    248	__u32 function;
    249	__u32 index;
    250	__u32 flags;
    251	__u32 eax;
    252	__u32 ebx;
    253	__u32 ecx;
    254	__u32 edx;
    255	__u32 padding[3];
    256};
    257
    258#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX		(1 << 0)
    259#define KVM_CPUID_FLAG_STATEFUL_FUNC		(1 << 1)
    260#define KVM_CPUID_FLAG_STATE_READ_NEXT		(1 << 2)
    261
    262/* for KVM_SET_CPUID2 */
    263struct kvm_cpuid2 {
    264	__u32 nent;
    265	__u32 padding;
    266	struct kvm_cpuid_entry2 entries[0];
    267};
    268
    269/* for KVM_GET_PIT and KVM_SET_PIT */
    270struct kvm_pit_channel_state {
    271	__u32 count; /* can be 65536 */
    272	__u16 latched_count;
    273	__u8 count_latched;
    274	__u8 status_latched;
    275	__u8 status;
    276	__u8 read_state;
    277	__u8 write_state;
    278	__u8 write_latch;
    279	__u8 rw_mode;
    280	__u8 mode;
    281	__u8 bcd;
    282	__u8 gate;
    283	__s64 count_load_time;
    284};
    285
    286struct kvm_debug_exit_arch {
    287	__u32 exception;
    288	__u32 pad;
    289	__u64 pc;
    290	__u64 dr6;
    291	__u64 dr7;
    292};
    293
    294#define KVM_GUESTDBG_USE_SW_BP		0x00010000
    295#define KVM_GUESTDBG_USE_HW_BP		0x00020000
    296#define KVM_GUESTDBG_INJECT_DB		0x00040000
    297#define KVM_GUESTDBG_INJECT_BP		0x00080000
    298#define KVM_GUESTDBG_BLOCKIRQ		0x00100000
    299
    300/* for KVM_SET_GUEST_DEBUG */
    301struct kvm_guest_debug_arch {
    302	__u64 debugreg[8];
    303};
    304
    305struct kvm_pit_state {
    306	struct kvm_pit_channel_state channels[3];
    307};
    308
    309#define KVM_PIT_FLAGS_HPET_LEGACY  0x00000001
    310
    311struct kvm_pit_state2 {
    312	struct kvm_pit_channel_state channels[3];
    313	__u32 flags;
    314	__u32 reserved[9];
    315};
    316
    317struct kvm_reinject_control {
    318	__u8 pit_reinject;
    319	__u8 reserved[31];
    320};
    321
    322/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
    323#define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
    324#define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
    325#define KVM_VCPUEVENT_VALID_SHADOW	0x00000004
    326#define KVM_VCPUEVENT_VALID_SMM		0x00000008
    327#define KVM_VCPUEVENT_VALID_PAYLOAD	0x00000010
    328
    329/* Interrupt shadow states */
    330#define KVM_X86_SHADOW_INT_MOV_SS	0x01
    331#define KVM_X86_SHADOW_INT_STI		0x02
    332
    333/* for KVM_GET/SET_VCPU_EVENTS */
    334struct kvm_vcpu_events {
    335	struct {
    336		__u8 injected;
    337		__u8 nr;
    338		__u8 has_error_code;
    339		__u8 pending;
    340		__u32 error_code;
    341	} exception;
    342	struct {
    343		__u8 injected;
    344		__u8 nr;
    345		__u8 soft;
    346		__u8 shadow;
    347	} interrupt;
    348	struct {
    349		__u8 injected;
    350		__u8 pending;
    351		__u8 masked;
    352		__u8 pad;
    353	} nmi;
    354	__u32 sipi_vector;
    355	__u32 flags;
    356	struct {
    357		__u8 smm;
    358		__u8 pending;
    359		__u8 smm_inside_nmi;
    360		__u8 latched_init;
    361	} smi;
    362	__u8 reserved[27];
    363	__u8 exception_has_payload;
    364	__u64 exception_payload;
    365};
    366
    367/* for KVM_GET/SET_DEBUGREGS */
    368struct kvm_debugregs {
    369	__u64 db[4];
    370	__u64 dr6;
    371	__u64 dr7;
    372	__u64 flags;
    373	__u64 reserved[9];
    374};
    375
    376/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */
    377struct kvm_xsave {
    378	/*
    379	 * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes
    380	 * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
    381	 * respectively, when invoked on the vm file descriptor.
    382	 *
    383	 * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
    384	 * will always be at least 4096. Currently, it is only greater
    385	 * than 4096 if a dynamic feature has been enabled with
    386	 * ``arch_prctl()``, but this may change in the future.
    387	 *
    388	 * The offsets of the state save areas in struct kvm_xsave follow
    389	 * the contents of CPUID leaf 0xD on the host.
    390	 */
    391	__u32 region[1024];
    392	__u32 extra[0];
    393};
    394
    395#define KVM_MAX_XCRS	16
    396
    397struct kvm_xcr {
    398	__u32 xcr;
    399	__u32 reserved;
    400	__u64 value;
    401};
    402
    403struct kvm_xcrs {
    404	__u32 nr_xcrs;
    405	__u32 flags;
    406	struct kvm_xcr xcrs[KVM_MAX_XCRS];
    407	__u64 padding[16];
    408};
    409
    410#define KVM_SYNC_X86_REGS      (1UL << 0)
    411#define KVM_SYNC_X86_SREGS     (1UL << 1)
    412#define KVM_SYNC_X86_EVENTS    (1UL << 2)
    413
    414#define KVM_SYNC_X86_VALID_FIELDS \
    415	(KVM_SYNC_X86_REGS| \
    416	 KVM_SYNC_X86_SREGS| \
    417	 KVM_SYNC_X86_EVENTS)
    418
    419/* kvm_sync_regs struct included by kvm_run struct */
    420struct kvm_sync_regs {
    421	/* Members of this structure are potentially malicious.
    422	 * Care must be taken by code reading, esp. interpreting,
    423	 * data fields from them inside KVM to prevent TOCTOU and
    424	 * double-fetch types of vulnerabilities.
    425	 */
    426	struct kvm_regs regs;
    427	struct kvm_sregs sregs;
    428	struct kvm_vcpu_events events;
    429};
    430
    431#define KVM_X86_QUIRK_LINT0_REENABLED		(1 << 0)
    432#define KVM_X86_QUIRK_CD_NW_CLEARED		(1 << 1)
    433#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE		(1 << 2)
    434#define KVM_X86_QUIRK_OUT_7E_INC_RIP		(1 << 3)
    435#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT	(1 << 4)
    436#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN	(1 << 5)
    437
    438#define KVM_STATE_NESTED_FORMAT_VMX	0
    439#define KVM_STATE_NESTED_FORMAT_SVM	1
    440
    441#define KVM_STATE_NESTED_GUEST_MODE	0x00000001
    442#define KVM_STATE_NESTED_RUN_PENDING	0x00000002
    443#define KVM_STATE_NESTED_EVMCS		0x00000004
    444#define KVM_STATE_NESTED_MTF_PENDING	0x00000008
    445#define KVM_STATE_NESTED_GIF_SET	0x00000100
    446
    447#define KVM_STATE_NESTED_SMM_GUEST_MODE	0x00000001
    448#define KVM_STATE_NESTED_SMM_VMXON	0x00000002
    449
    450#define KVM_STATE_NESTED_VMX_VMCS_SIZE	0x1000
    451
    452#define KVM_STATE_NESTED_SVM_VMCB_SIZE	0x1000
    453
    454#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE	0x00000001
    455
    456/* attributes for system fd (group 0) */
    457#define KVM_X86_XCOMP_GUEST_SUPP	0
    458
    459struct kvm_vmx_nested_state_data {
    460	__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
    461	__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
    462};
    463
    464struct kvm_vmx_nested_state_hdr {
    465	__u64 vmxon_pa;
    466	__u64 vmcs12_pa;
    467
    468	struct {
    469		__u16 flags;
    470	} smm;
    471
    472	__u16 pad;
    473
    474	__u32 flags;
    475	__u64 preemption_timer_deadline;
    476};
    477
    478struct kvm_svm_nested_state_data {
    479	/* Save area only used if KVM_STATE_NESTED_RUN_PENDING.  */
    480	__u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
    481};
    482
    483struct kvm_svm_nested_state_hdr {
    484	__u64 vmcb_pa;
    485};
    486
    487/* for KVM_CAP_NESTED_STATE */
    488struct kvm_nested_state {
    489	__u16 flags;
    490	__u16 format;
    491	__u32 size;
    492
    493	union {
    494		struct kvm_vmx_nested_state_hdr vmx;
    495		struct kvm_svm_nested_state_hdr svm;
    496
    497		/* Pad the header to 128 bytes.  */
    498		__u8 pad[120];
    499	} hdr;
    500
    501	/*
    502	 * Define data region as 0 bytes to preserve backwards-compatability
    503	 * to old definition of kvm_nested_state in order to avoid changing
    504	 * KVM_{GET,PUT}_NESTED_STATE ioctl values.
    505	 */
    506	union {
    507		struct kvm_vmx_nested_state_data vmx[0];
    508		struct kvm_svm_nested_state_data svm[0];
    509	} data;
    510};
    511
    512/* for KVM_CAP_PMU_EVENT_FILTER */
    513struct kvm_pmu_event_filter {
    514	__u32 action;
    515	__u32 nevents;
    516	__u32 fixed_counter_bitmap;
    517	__u32 flags;
    518	__u32 pad[4];
    519	__u64 events[0];
    520};
    521
    522#define KVM_PMU_EVENT_ALLOW 0
    523#define KVM_PMU_EVENT_DENY 1
    524
    525/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
    526#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
    527#define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
    528
    529#endif /* _ASM_X86_KVM_H */