cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

svm.h (20717B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Kernel-based Virtual Machine driver for Linux
      4 *
      5 * AMD SVM support
      6 *
      7 * Copyright (C) 2006 Qumranet, Inc.
      8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
      9 *
     10 * Authors:
     11 *   Yaniv Kamay  <yaniv@qumranet.com>
     12 *   Avi Kivity   <avi@qumranet.com>
     13 */
     14
     15#ifndef __SVM_SVM_H
     16#define __SVM_SVM_H
     17
     18#include <linux/kvm_types.h>
     19#include <linux/kvm_host.h>
     20#include <linux/bits.h>
     21
     22#include <asm/svm.h>
     23#include <asm/sev-common.h>
     24
     25#include "kvm_cache_regs.h"
     26
     27#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
     28
     29#define	IOPM_SIZE PAGE_SIZE * 3
     30#define	MSRPM_SIZE PAGE_SIZE * 2
     31
     32#define MAX_DIRECT_ACCESS_MSRS	21
     33#define MSRPM_OFFSETS	16
     34extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
     35extern bool npt_enabled;
     36extern int vgif;
     37extern bool intercept_smi;
     38
     39/*
     40 * Clean bits in VMCB.
     41 * VMCB_ALL_CLEAN_MASK might also need to
     42 * be updated if this enum is modified.
     43 */
     44enum {
     45	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
     46			    pause filter count */
     47	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
     48	VMCB_ASID,	 /* ASID */
     49	VMCB_INTR,	 /* int_ctl, int_vector */
     50	VMCB_NPT,        /* npt_en, nCR3, gPAT */
     51	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
     52	VMCB_DR,         /* DR6, DR7 */
     53	VMCB_DT,         /* GDT, IDT */
     54	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
     55	VMCB_CR2,        /* CR2 only */
     56	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
     57	VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
     58			  * AVIC PHYSICAL_TABLE pointer,
     59			  * AVIC LOGICAL_TABLE pointer
     60			  */
     61	VMCB_SW = 31,    /* Reserved for hypervisor/software use */
     62};
     63
     64#define VMCB_ALL_CLEAN_MASK (					\
     65	(1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) |	\
     66	(1U << VMCB_ASID) | (1U << VMCB_INTR) |			\
     67	(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) |	\
     68	(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) |	\
     69	(1U << VMCB_LBR) | (1U << VMCB_AVIC) |			\
     70	(1U << VMCB_SW))
     71
     72/* TPR and CR2 are always written before VMRUN */
     73#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
     74
     75/* Supported init feature flags */
     76#define SEV_SNP_SUPPORTED_FLAGS		0x0
     77
     78struct kvm_sev_info {
     79	bool active;		/* SEV enabled guest */
     80	bool es_active;		/* SEV-ES enabled guest */
     81	bool snp_active;	/* SEV-SNP enabled guest */
     82
     83	unsigned int asid;	/* ASID used for this guest */
     84	unsigned int handle;	/* SEV firmware handle */
     85	int fd;			/* SEV device fd */
     86
     87	unsigned long pages_locked; /* Number of pages locked */
     88	struct list_head regions_list;  /* List of registered regions */
     89
     90	u64 ap_jump_table;	/* SEV-ES AP Jump Table address */
     91
     92	struct kvm *enc_context_owner; /* Owner of copied encryption context */
     93	struct list_head mirror_vms; /* List of VMs mirroring */
     94	struct list_head mirror_entry; /* Use as a list entry of mirrors */
     95	struct misc_cg *misc_cg; /* For misc cgroup accounting */
     96	atomic_t migration_in_progress;
     97
     98	u64 snp_init_flags;
     99	void *snp_context;      /* SNP guest context page */
    100	spinlock_t psc_lock;
    101	void *snp_certs_data;
    102	struct mutex guest_req_lock;
    103
    104	u64 sev_features;	/* Features set at VMSA creation */
    105};
    106
    107struct kvm_svm {
    108	struct kvm kvm;
    109
    110	/* Struct members for AVIC */
    111	u32 avic_vm_id;
    112	struct page *avic_logical_id_table_page;
    113	struct page *avic_physical_id_table_page;
    114	struct hlist_node hnode;
    115
    116	struct kvm_sev_info sev_info;
    117};
    118
    119struct kvm_vcpu;
    120
    121struct kvm_vmcb_info {
    122	struct vmcb *ptr;
    123	unsigned long pa;
    124	int cpu;
    125	uint64_t asid_generation;
    126};
    127
    128struct vmcb_save_area_cached {
    129	u64 efer;
    130	u64 cr4;
    131	u64 cr3;
    132	u64 cr0;
    133	u64 dr7;
    134	u64 dr6;
    135};
    136
    137struct vmcb_ctrl_area_cached {
    138	u32 intercepts[MAX_INTERCEPT];
    139	u16 pause_filter_thresh;
    140	u16 pause_filter_count;
    141	u64 iopm_base_pa;
    142	u64 msrpm_base_pa;
    143	u64 tsc_offset;
    144	u32 asid;
    145	u8 tlb_ctl;
    146	u32 int_ctl;
    147	u32 int_vector;
    148	u32 int_state;
    149	u32 exit_code;
    150	u32 exit_code_hi;
    151	u64 exit_info_1;
    152	u64 exit_info_2;
    153	u32 exit_int_info;
    154	u32 exit_int_info_err;
    155	u64 nested_ctl;
    156	u32 event_inj;
    157	u32 event_inj_err;
    158	u64 nested_cr3;
    159	u64 virt_ext;
    160	u32 clean;
    161	u8 reserved_sw[32];
    162};
    163
    164struct svm_nested_state {
    165	struct kvm_vmcb_info vmcb02;
    166	u64 hsave_msr;
    167	u64 vm_cr_msr;
    168	u64 vmcb12_gpa;
    169	u64 last_vmcb12_gpa;
    170
    171	/* These are the merged vectors */
    172	u32 *msrpm;
    173
    174	/* A VMRUN has started but has not yet been performed, so
    175	 * we cannot inject a nested vmexit yet.  */
    176	bool nested_run_pending;
    177
    178	/* cache for control fields of the guest */
    179	struct vmcb_ctrl_area_cached ctl;
    180
    181	/*
    182	 * Note: this struct is not kept up-to-date while L2 runs; it is only
    183	 * valid within nested_svm_vmrun.
    184	 */
    185	struct vmcb_save_area_cached save;
    186
    187	bool initialized;
    188
    189	/*
    190	 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
    191	 * changes in MSR bitmap for L1 or switching to a different L2. Note,
    192	 * this flag can only be used reliably in conjunction with a paravirt L1
    193	 * which informs L0 whether any changes to MSR bitmap for L2 were done
    194	 * on its side.
    195	 */
    196	bool force_msr_bitmap_recalc;
    197};
    198
    199struct vcpu_sev_es_state {
    200	/* SEV-ES support */
    201	struct sev_es_save_area *vmsa;
    202	hpa_t vmsa_pa;
    203	bool ghcb_in_use;
    204	bool received_first_sipi;
    205	unsigned int ap_reset_hold_type;
    206
    207	/* SEV-ES scratch area support */
    208	void *ghcb_sa;
    209	u32 ghcb_sa_len;
    210	u64 ghcb_sa_gpa;
    211	u32 ghcb_sa_alloc_len;
    212	bool ghcb_sa_sync;
    213	bool ghcb_sa_contained;
    214	u32 ghcb_sa_offset;
    215
    216	/*
    217	 * SEV-ES support to hold the sw_exit_info return values to be
    218	 * sync'ed to the GHCB when mapped.
    219	 */
    220	u64 ghcb_sw_exit_info_1;
    221	u64 ghcb_sw_exit_info_2;
    222
    223	u64 ghcb_registered_gpa;
    224
    225	struct mutex snp_vmsa_mutex;
    226	gpa_t snp_vmsa_gpa;
    227	bool snp_ap_create;
    228};
    229
    230struct vcpu_svm {
    231	struct kvm_vcpu vcpu;
    232	/* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
    233	struct vmcb *vmcb;
    234	struct kvm_vmcb_info vmcb01;
    235	struct kvm_vmcb_info *current_vmcb;
    236	struct svm_cpu_data *svm_data;
    237	u32 asid;
    238	u32 sysenter_esp_hi;
    239	u32 sysenter_eip_hi;
    240	uint64_t tsc_aux;
    241
    242	u64 msr_decfg;
    243
    244	u64 next_rip;
    245
    246	u64 spec_ctrl;
    247
    248	u64 tsc_ratio_msr;
    249	/*
    250	 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
    251	 * translated into the appropriate L2_CFG bits on the host to
    252	 * perform speculative control.
    253	 */
    254	u64 virt_spec_ctrl;
    255
    256	u32 *msrpm;
    257
    258	ulong nmi_iret_rip;
    259
    260	struct svm_nested_state nested;
    261
    262	bool nmi_singlestep;
    263	u64 nmi_singlestep_guest_rflags;
    264
    265	unsigned int3_injected;
    266	unsigned long int3_rip;
    267
    268	/* optional nested SVM features that are enabled for this guest  */
    269	bool nrips_enabled                : 1;
    270	bool tsc_scaling_enabled          : 1;
    271	bool v_vmload_vmsave_enabled      : 1;
    272	bool lbrv_enabled                 : 1;
    273	bool pause_filter_enabled         : 1;
    274	bool pause_threshold_enabled      : 1;
    275	bool vgif_enabled                 : 1;
    276
    277	u32 ldr_reg;
    278	u32 dfr_reg;
    279	struct page *avic_backing_page;
    280	u64 *avic_physical_id_cache;
    281
    282	/*
    283	 * Per-vcpu list of struct amd_svm_iommu_ir:
    284	 * This is used mainly to store interrupt remapping information used
    285	 * when update the vcpu affinity. This avoids the need to scan for
    286	 * IRTE and try to match ga_tag in the IOMMU driver.
    287	 */
    288	struct list_head ir_list;
    289	spinlock_t ir_list_lock;
    290
    291	/* Save desired MSR intercept (read: pass-through) state */
    292	struct {
    293		DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
    294		DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
    295	} shadow_msr_intercept;
    296
    297	struct vcpu_sev_es_state sev_es;
    298
    299	bool guest_state_loaded;
    300};
    301
    302struct svm_cpu_data {
    303	int cpu;
    304
    305	u64 asid_generation;
    306	u32 max_asid;
    307	u32 next_asid;
    308	u32 min_asid;
    309	struct kvm_ldttss_desc *tss_desc;
    310
    311	struct page *save_area;
    312	struct vmcb *current_vmcb;
    313
    314	/* index = sev_asid, value = vmcb pointer */
    315	struct vmcb **sev_vmcbs;
    316};
    317
    318DECLARE_PER_CPU(struct svm_cpu_data *, svm_data);
    319
    320void recalc_intercepts(struct vcpu_svm *svm);
    321
    322static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
    323{
    324	return container_of(kvm, struct kvm_svm, kvm);
    325}
    326
    327static __always_inline bool sev_guest(struct kvm *kvm)
    328{
    329#ifdef CONFIG_KVM_AMD_SEV
    330	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
    331
    332	return sev->active;
    333#else
    334	return false;
    335#endif
    336}
    337
    338static __always_inline bool sev_es_guest(struct kvm *kvm)
    339{
    340#ifdef CONFIG_KVM_AMD_SEV
    341	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
    342
    343	return sev->es_active && !WARN_ON_ONCE(!sev->active);
    344#else
    345	return false;
    346#endif
    347}
    348
    349static inline bool sev_snp_guest(struct kvm *kvm)
    350{
    351	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
    352
    353	return sev_es_guest(kvm) && sev->snp_active;
    354}
    355
    356static inline bool ghcb_gpa_is_registered(struct vcpu_svm *svm, u64 val)
    357{
    358	return svm->sev_es.ghcb_registered_gpa == val;
    359}
    360
    361static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
    362{
    363	vmcb->control.clean = 0;
    364}
    365
    366static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
    367{
    368	vmcb->control.clean = VMCB_ALL_CLEAN_MASK
    369			       & ~VMCB_ALWAYS_DIRTY_MASK;
    370}
    371
    372static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
    373{
    374	vmcb->control.clean &= ~(1 << bit);
    375}
    376
    377static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
    378{
    379        return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
    380}
    381
    382static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
    383{
    384	return container_of(vcpu, struct vcpu_svm, vcpu);
    385}
    386
    387/*
    388 * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
    389 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
    390 *
    391 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
    392 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
    393 * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
    394 */
    395#define SVM_REGS_LAZY_LOAD_SET	(1 << VCPU_EXREG_PDPTR)
    396
    397static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
    398{
    399	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
    400	__set_bit(bit, (unsigned long *)&control->intercepts);
    401}
    402
    403static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
    404{
    405	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
    406	__clear_bit(bit, (unsigned long *)&control->intercepts);
    407}
    408
    409static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
    410{
    411	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
    412	return test_bit(bit, (unsigned long *)&control->intercepts);
    413}
    414
    415static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
    416{
    417	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
    418	return test_bit(bit, (unsigned long *)&control->intercepts);
    419}
    420
    421static inline void set_dr_intercepts(struct vcpu_svm *svm)
    422{
    423	struct vmcb *vmcb = svm->vmcb01.ptr;
    424
    425	if (!sev_es_guest(svm->vcpu.kvm)) {
    426		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
    427		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
    428		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
    429		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
    430		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
    431		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
    432		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
    433		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
    434		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
    435		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
    436		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
    437		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
    438		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
    439		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
    440	}
    441
    442	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
    443	vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
    444
    445	recalc_intercepts(svm);
    446}
    447
    448static inline void clr_dr_intercepts(struct vcpu_svm *svm)
    449{
    450	struct vmcb *vmcb = svm->vmcb01.ptr;
    451
    452	vmcb->control.intercepts[INTERCEPT_DR] = 0;
    453
    454	/* DR7 access must remain intercepted for an SEV-ES guest */
    455	if (sev_es_guest(svm->vcpu.kvm)) {
    456		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
    457		vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
    458	}
    459
    460	recalc_intercepts(svm);
    461}
    462
    463static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
    464{
    465	struct vmcb *vmcb = svm->vmcb01.ptr;
    466
    467	WARN_ON_ONCE(bit >= 32);
    468	vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
    469
    470	recalc_intercepts(svm);
    471}
    472
    473static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
    474{
    475	struct vmcb *vmcb = svm->vmcb01.ptr;
    476
    477	WARN_ON_ONCE(bit >= 32);
    478	vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
    479
    480	recalc_intercepts(svm);
    481}
    482
    483static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
    484{
    485	struct vmcb *vmcb = svm->vmcb01.ptr;
    486
    487	vmcb_set_intercept(&vmcb->control, bit);
    488
    489	recalc_intercepts(svm);
    490}
    491
    492static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
    493{
    494	struct vmcb *vmcb = svm->vmcb01.ptr;
    495
    496	vmcb_clr_intercept(&vmcb->control, bit);
    497
    498	recalc_intercepts(svm);
    499}
    500
    501static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
    502{
    503	return vmcb_is_intercept(&svm->vmcb->control, bit);
    504}
    505
    506static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
    507{
    508	return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
    509}
    510
    511static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
    512{
    513	if (!vgif)
    514		return NULL;
    515
    516	if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
    517		return svm->nested.vmcb02.ptr;
    518	else
    519		return svm->vmcb01.ptr;
    520}
    521
    522static inline void enable_gif(struct vcpu_svm *svm)
    523{
    524	struct vmcb *vmcb = get_vgif_vmcb(svm);
    525
    526	if (vmcb)
    527		vmcb->control.int_ctl |= V_GIF_MASK;
    528	else
    529		svm->vcpu.arch.hflags |= HF_GIF_MASK;
    530}
    531
    532static inline void disable_gif(struct vcpu_svm *svm)
    533{
    534	struct vmcb *vmcb = get_vgif_vmcb(svm);
    535
    536	if (vmcb)
    537		vmcb->control.int_ctl &= ~V_GIF_MASK;
    538	else
    539		svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
    540}
    541
    542static inline bool gif_set(struct vcpu_svm *svm)
    543{
    544	struct vmcb *vmcb = get_vgif_vmcb(svm);
    545
    546	if (vmcb)
    547		return !!(vmcb->control.int_ctl & V_GIF_MASK);
    548	else
    549		return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
    550}
    551
    552static inline bool nested_npt_enabled(struct vcpu_svm *svm)
    553{
    554	return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
    555}
    556
    557/* svm.c */
    558#define MSR_INVALID				0xffffffffU
    559
    560#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
    561
    562extern bool dump_invalid_vmcb;
    563
    564u32 svm_msrpm_offset(u32 msr);
    565u32 *svm_vcpu_alloc_msrpm(void);
    566void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
    567void svm_vcpu_free_msrpm(u32 *msrpm);
    568void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
    569void svm_update_lbrv(struct kvm_vcpu *vcpu);
    570
    571int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
    572void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
    573void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
    574void disable_nmi_singlestep(struct vcpu_svm *svm);
    575bool svm_smi_blocked(struct kvm_vcpu *vcpu);
    576bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
    577bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
    578void svm_set_gif(struct vcpu_svm *svm, bool value);
    579int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
    580void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
    581			  int read, int write);
    582void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
    583				     int trig_mode, int vec);
    584
    585/* nested.c */
    586
    587#define NESTED_EXIT_HOST	0	/* Exit handled on host level */
    588#define NESTED_EXIT_DONE	1	/* Exit caused nested vmexit  */
    589#define NESTED_EXIT_CONTINUE	2	/* Further checks needed      */
    590
    591static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
    592{
    593	struct vcpu_svm *svm = to_svm(vcpu);
    594
    595	return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
    596}
    597
    598static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
    599{
    600	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
    601}
    602
    603static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
    604{
    605	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
    606}
    607
    608static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
    609{
    610	return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
    611}
    612
    613int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
    614			 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
    615void svm_leave_nested(struct kvm_vcpu *vcpu);
    616void svm_free_nested(struct vcpu_svm *svm);
    617int svm_allocate_nested(struct vcpu_svm *svm);
    618int nested_svm_vmrun(struct kvm_vcpu *vcpu);
    619void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
    620			  struct vmcb_save_area *from_save);
    621void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
    622int nested_svm_vmexit(struct vcpu_svm *svm);
    623
    624static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
    625{
    626	svm->vmcb->control.exit_code   = exit_code;
    627	svm->vmcb->control.exit_info_1 = 0;
    628	svm->vmcb->control.exit_info_2 = 0;
    629	return nested_svm_vmexit(svm);
    630}
    631
    632int nested_svm_exit_handled(struct vcpu_svm *svm);
    633int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
    634int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
    635			       bool has_error_code, u32 error_code);
    636int nested_svm_exit_special(struct vcpu_svm *svm);
    637void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
    638void __svm_write_tsc_multiplier(u64 multiplier);
    639void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
    640				       struct vmcb_control_area *control);
    641void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
    642				    struct vmcb_save_area *save);
    643void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
    644void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
    645void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
    646
    647static inline void svm_set_ghcb_sw_exit_info_1(struct kvm_vcpu *vcpu, u64 val)
    648{
    649	struct vcpu_svm *svm = to_svm(vcpu);
    650
    651	svm->sev_es.ghcb_sw_exit_info_1 = val;
    652}
    653
    654static inline void svm_set_ghcb_sw_exit_info_2(struct kvm_vcpu *vcpu, u64 val)
    655{
    656	struct vcpu_svm *svm = to_svm(vcpu);
    657
    658	svm->sev_es.ghcb_sw_exit_info_2 = val;
    659}
    660
    661extern struct kvm_x86_nested_ops svm_nested_ops;
    662
    663/* avic.c */
    664
    665int avic_ga_log_notifier(u32 ga_tag);
    666void avic_vm_destroy(struct kvm *kvm);
    667int avic_vm_init(struct kvm *kvm);
    668void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
    669int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
    670int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
    671int avic_init_vcpu(struct vcpu_svm *svm);
    672void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
    673void avic_vcpu_put(struct kvm_vcpu *vcpu);
    674void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
    675void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
    676void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
    677bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);
    678void avic_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
    679void avic_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
    680bool avic_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
    681int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
    682			uint32_t guest_irq, bool set);
    683void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
    684void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
    685void avic_ring_doorbell(struct kvm_vcpu *vcpu);
    686unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
    687
    688/* sev.c */
    689
    690#define GHCB_VERSION_MAX	2ULL
    691#define GHCB_VERSION_MIN	1ULL
    692
    693#define GHCB_HV_FT_SUPPORTED	(GHCB_HV_FT_SNP | GHCB_HV_FT_SNP_AP_CREATION)
    694
    695extern unsigned int max_sev_asid;
    696
    697void sev_vm_destroy(struct kvm *kvm);
    698int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
    699int sev_mem_enc_register_region(struct kvm *kvm,
    700				struct kvm_enc_region *range);
    701int sev_mem_enc_unregister_region(struct kvm *kvm,
    702				  struct kvm_enc_region *range);
    703int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
    704int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
    705void pre_sev_run(struct vcpu_svm *svm, int cpu);
    706void __init sev_set_cpu_caps(void);
    707void __init sev_hardware_setup(void);
    708void sev_hardware_unsetup(void);
    709int sev_cpu_init(struct svm_cpu_data *sd);
    710void sev_init_vmcb(struct vcpu_svm *svm);
    711void sev_free_vcpu(struct kvm_vcpu *vcpu);
    712int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
    713int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
    714void sev_es_vcpu_reset(struct vcpu_svm *svm);
    715void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
    716void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
    717void sev_es_unmap_ghcb(struct vcpu_svm *svm);
    718struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
    719void sev_rmp_page_level_adjust(struct kvm *kvm, kvm_pfn_t pfn, int *level);
    720int sev_post_map_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);
    721void sev_post_unmap_gfn(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn);
    722void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
    723void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
    724int sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu);
    725
    726/* vmenter.S */
    727
    728void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
    729void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
    730
    731#endif