cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

hvf.c (21344B)


      1/* Copyright 2008 IBM Corporation
      2 *           2008 Red Hat, Inc.
      3 * Copyright 2011 Intel Corporation
      4 * Copyright 2016 Veertu, Inc.
      5 * Copyright 2017 The Android Open Source Project
      6 *
      7 * QEMU Hypervisor.framework support
      8 *
      9 * This program is free software; you can redistribute it and/or
     10 * modify it under the terms of version 2 of the GNU General Public
     11 * License as published by the Free Software Foundation.
     12 *
     13 * This program is distributed in the hope that it will be useful,
     14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     16 * General Public License for more details.
     17 *
     18 * You should have received a copy of the GNU General Public License
     19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
     20 *
     21 * This file contain code under public domain from the hvdos project:
     22 * https://github.com/mist64/hvdos
     23 *
     24 * Parts Copyright (c) 2011 NetApp, Inc.
     25 * All rights reserved.
     26 *
     27 * Redistribution and use in source and binary forms, with or without
     28 * modification, are permitted provided that the following conditions
     29 * are met:
     30 * 1. Redistributions of source code must retain the above copyright
     31 *    notice, this list of conditions and the following disclaimer.
     32 * 2. Redistributions in binary form must reproduce the above copyright
     33 *    notice, this list of conditions and the following disclaimer in the
     34 *    documentation and/or other materials provided with the distribution.
     35 *
     36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
     37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     39 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
     40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     46 * SUCH DAMAGE.
     47 */
     48
     49#include "qemu/osdep.h"
     50#include "qemu-common.h"
     51#include "qemu/error-report.h"
     52
     53#include "sysemu/hvf.h"
     54#include "sysemu/hvf_int.h"
     55#include "sysemu/runstate.h"
     56#include "sysemu/cpus.h"
     57#include "hvf-i386.h"
     58#include "vmcs.h"
     59#include "vmx.h"
     60#include "x86.h"
     61#include "x86_descr.h"
     62#include "x86_mmu.h"
     63#include "x86_decode.h"
     64#include "x86_emu.h"
     65#include "x86_task.h"
     66#include "x86hvf.h"
     67
     68#include <Hypervisor/hv.h>
     69#include <Hypervisor/hv_vmx.h>
     70#include <sys/sysctl.h>
     71
     72#include "hw/i386/apic_internal.h"
     73#include "qemu/main-loop.h"
     74#include "qemu/accel.h"
     75#include "target/i386/cpu.h"
     76
     77void vmx_update_tpr(CPUState *cpu)
     78{
     79    /* TODO: need integrate APIC handling */
     80    X86CPU *x86_cpu = X86_CPU(cpu);
     81    int tpr = cpu_get_apic_tpr(x86_cpu->apic_state) << 4;
     82    int irr = apic_get_highest_priority_irr(x86_cpu->apic_state);
     83
     84    wreg(cpu->hvf->fd, HV_X86_TPR, tpr);
     85    if (irr == -1) {
     86        wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
     87    } else {
     88        wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, (irr > tpr) ? tpr >> 4 :
     89              irr >> 4);
     90    }
     91}
     92
     93static void update_apic_tpr(CPUState *cpu)
     94{
     95    X86CPU *x86_cpu = X86_CPU(cpu);
     96    int tpr = rreg(cpu->hvf->fd, HV_X86_TPR) >> 4;
     97    cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
     98}
     99
    100#define VECTORING_INFO_VECTOR_MASK     0xff
    101
    102void hvf_handle_io(CPUArchState *env, uint16_t port, void *buffer,
    103                  int direction, int size, int count)
    104{
    105    int i;
    106    uint8_t *ptr = buffer;
    107
    108    for (i = 0; i < count; i++) {
    109        address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED,
    110                         ptr, size,
    111                         direction);
    112        ptr += size;
    113    }
    114}
    115
    116static bool ept_emulation_fault(hvf_slot *slot, uint64_t gpa, uint64_t ept_qual)
    117{
    118    int read, write;
    119
    120    /* EPT fault on an instruction fetch doesn't make sense here */
    121    if (ept_qual & EPT_VIOLATION_INST_FETCH) {
    122        return false;
    123    }
    124
    125    /* EPT fault must be a read fault or a write fault */
    126    read = ept_qual & EPT_VIOLATION_DATA_READ ? 1 : 0;
    127    write = ept_qual & EPT_VIOLATION_DATA_WRITE ? 1 : 0;
    128    if ((read | write) == 0) {
    129        return false;
    130    }
    131
    132    if (write && slot) {
    133        if (slot->flags & HVF_SLOT_LOG) {
    134            memory_region_set_dirty(slot->region, gpa - slot->start, 1);
    135            hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
    136                          HV_MEMORY_READ | HV_MEMORY_WRITE);
    137        }
    138    }
    139
    140    /*
    141     * The EPT violation must have been caused by accessing a
    142     * guest-physical address that is a translation of a guest-linear
    143     * address.
    144     */
    145    if ((ept_qual & EPT_VIOLATION_GLA_VALID) == 0 ||
    146        (ept_qual & EPT_VIOLATION_XLAT_VALID) == 0) {
    147        return false;
    148    }
    149
    150    if (!slot) {
    151        return true;
    152    }
    153    if (!memory_region_is_ram(slot->region) &&
    154        !(read && memory_region_is_romd(slot->region))) {
    155        return true;
    156    }
    157    return false;
    158}
    159
    160void hvf_arch_vcpu_destroy(CPUState *cpu)
    161{
    162    X86CPU *x86_cpu = X86_CPU(cpu);
    163    CPUX86State *env = &x86_cpu->env;
    164
    165    g_free(env->hvf_mmio_buf);
    166}
    167
    168static void init_tsc_freq(CPUX86State *env)
    169{
    170    size_t length;
    171    uint64_t tsc_freq;
    172
    173    if (env->tsc_khz != 0) {
    174        return;
    175    }
    176
    177    length = sizeof(uint64_t);
    178    if (sysctlbyname("machdep.tsc.frequency", &tsc_freq, &length, NULL, 0)) {
    179        return;
    180    }
    181    env->tsc_khz = tsc_freq / 1000;  /* Hz to KHz */
    182}
    183
    184static void init_apic_bus_freq(CPUX86State *env)
    185{
    186    size_t length;
    187    uint64_t bus_freq;
    188
    189    if (env->apic_bus_freq != 0) {
    190        return;
    191    }
    192
    193    length = sizeof(uint64_t);
    194    if (sysctlbyname("hw.busfrequency", &bus_freq, &length, NULL, 0)) {
    195        return;
    196    }
    197    env->apic_bus_freq = bus_freq;
    198}
    199
    200static inline bool tsc_is_known(CPUX86State *env)
    201{
    202    return env->tsc_khz != 0;
    203}
    204
    205static inline bool apic_bus_freq_is_known(CPUX86State *env)
    206{
    207    return env->apic_bus_freq != 0;
    208}
    209
    210void hvf_kick_vcpu_thread(CPUState *cpu)
    211{
    212    cpus_kick_thread(cpu);
    213}
    214
    215int hvf_arch_init(void)
    216{
    217    return 0;
    218}
    219
    220int hvf_arch_init_vcpu(CPUState *cpu)
    221{
    222    X86CPU *x86cpu = X86_CPU(cpu);
    223    CPUX86State *env = &x86cpu->env;
    224
    225    init_emu();
    226    init_decoder();
    227
    228    hvf_state->hvf_caps = g_new0(struct hvf_vcpu_caps, 1);
    229    env->hvf_mmio_buf = g_new(char, 4096);
    230
    231    if (x86cpu->vmware_cpuid_freq) {
    232        init_tsc_freq(env);
    233        init_apic_bus_freq(env);
    234
    235        if (!tsc_is_known(env) || !apic_bus_freq_is_known(env)) {
    236            error_report("vmware-cpuid-freq: feature couldn't be enabled");
    237        }
    238    }
    239
    240    if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED,
    241        &hvf_state->hvf_caps->vmx_cap_pinbased)) {
    242        abort();
    243    }
    244    if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED,
    245        &hvf_state->hvf_caps->vmx_cap_procbased)) {
    246        abort();
    247    }
    248    if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2,
    249        &hvf_state->hvf_caps->vmx_cap_procbased2)) {
    250        abort();
    251    }
    252    if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY,
    253        &hvf_state->hvf_caps->vmx_cap_entry)) {
    254        abort();
    255    }
    256
    257    /* set VMCS control fields */
    258    wvmcs(cpu->hvf->fd, VMCS_PIN_BASED_CTLS,
    259          cap2ctrl(hvf_state->hvf_caps->vmx_cap_pinbased,
    260          VMCS_PIN_BASED_CTLS_EXTINT |
    261          VMCS_PIN_BASED_CTLS_NMI |
    262          VMCS_PIN_BASED_CTLS_VNMI));
    263    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS,
    264          cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased,
    265          VMCS_PRI_PROC_BASED_CTLS_HLT |
    266          VMCS_PRI_PROC_BASED_CTLS_MWAIT |
    267          VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET |
    268          VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW) |
    269          VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL);
    270    wvmcs(cpu->hvf->fd, VMCS_SEC_PROC_BASED_CTLS,
    271          cap2ctrl(hvf_state->hvf_caps->vmx_cap_procbased2,
    272                   VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES));
    273
    274    wvmcs(cpu->hvf->fd, VMCS_ENTRY_CTLS, cap2ctrl(hvf_state->hvf_caps->vmx_cap_entry,
    275          0));
    276    wvmcs(cpu->hvf->fd, VMCS_EXCEPTION_BITMAP, 0); /* Double fault */
    277
    278    wvmcs(cpu->hvf->fd, VMCS_TPR_THRESHOLD, 0);
    279
    280    x86cpu = X86_CPU(cpu);
    281    x86cpu->env.xsave_buf_len = 4096;
    282    x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len);
    283
    284    /*
    285     * The allocated storage must be large enough for all of the
    286     * possible XSAVE state components.
    287     */
    288    assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len);
    289
    290    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1);
    291    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1);
    292    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1);
    293    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FMASK, 1);
    294    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_FSBASE, 1);
    295    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_GSBASE, 1);
    296    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_KERNELGSBASE, 1);
    297    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_TSC_AUX, 1);
    298    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_TSC, 1);
    299    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_CS, 1);
    300    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_EIP, 1);
    301    hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_IA32_SYSENTER_ESP, 1);
    302
    303    return 0;
    304}
    305
    306static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_info)
    307{
    308    X86CPU *x86_cpu = X86_CPU(cpu);
    309    CPUX86State *env = &x86_cpu->env;
    310
    311    env->exception_nr = -1;
    312    env->exception_pending = 0;
    313    env->exception_injected = 0;
    314    env->interrupt_injected = -1;
    315    env->nmi_injected = false;
    316    env->ins_len = 0;
    317    env->has_error_code = false;
    318    if (idtvec_info & VMCS_IDT_VEC_VALID) {
    319        switch (idtvec_info & VMCS_IDT_VEC_TYPE) {
    320        case VMCS_IDT_VEC_HWINTR:
    321        case VMCS_IDT_VEC_SWINTR:
    322            env->interrupt_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
    323            break;
    324        case VMCS_IDT_VEC_NMI:
    325            env->nmi_injected = true;
    326            break;
    327        case VMCS_IDT_VEC_HWEXCEPTION:
    328        case VMCS_IDT_VEC_SWEXCEPTION:
    329            env->exception_nr = idtvec_info & VMCS_IDT_VEC_VECNUM;
    330            env->exception_injected = 1;
    331            break;
    332        case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
    333        default:
    334            abort();
    335        }
    336        if ((idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWEXCEPTION ||
    337            (idtvec_info & VMCS_IDT_VEC_TYPE) == VMCS_IDT_VEC_SWINTR) {
    338            env->ins_len = ins_len;
    339        }
    340        if (idtvec_info & VMCS_IDT_VEC_ERRCODE_VALID) {
    341            env->has_error_code = true;
    342            env->error_code = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_ERROR);
    343        }
    344    }
    345    if ((rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
    346        VMCS_INTERRUPTIBILITY_NMI_BLOCKING)) {
    347        env->hflags2 |= HF2_NMI_MASK;
    348    } else {
    349        env->hflags2 &= ~HF2_NMI_MASK;
    350    }
    351    if (rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY) &
    352         (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
    353         VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
    354        env->hflags |= HF_INHIBIT_IRQ_MASK;
    355    } else {
    356        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
    357    }
    358}
    359
    360static void hvf_cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
    361                              uint32_t *eax, uint32_t *ebx,
    362                              uint32_t *ecx, uint32_t *edx)
    363{
    364    /*
    365     * A wrapper extends cpu_x86_cpuid with 0x40000000 and 0x40000010 leafs,
    366     * leafs 0x40000001-0x4000000F are filled with zeros
    367     * Provides vmware-cpuid-freq support to hvf
    368     *
    369     * Note: leaf 0x40000000 not exposes HVF,
    370     * leaving hypervisor signature empty
    371     */
    372
    373    if (index < 0x40000000 || index > 0x40000010 ||
    374        !tsc_is_known(env) || !apic_bus_freq_is_known(env)) {
    375
    376        cpu_x86_cpuid(env, index, count, eax, ebx, ecx, edx);
    377        return;
    378    }
    379
    380    switch (index) {
    381    case 0x40000000:
    382        *eax = 0x40000010;    /* Max available cpuid leaf */
    383        *ebx = 0;             /* Leave signature empty */
    384        *ecx = 0;
    385        *edx = 0;
    386        break;
    387    case 0x40000010:
    388        *eax = env->tsc_khz;
    389        *ebx = env->apic_bus_freq / 1000; /* Hz to KHz */
    390        *ecx = 0;
    391        *edx = 0;
    392        break;
    393    default:
    394        *eax = 0;
    395        *ebx = 0;
    396        *ecx = 0;
    397        *edx = 0;
    398        break;
    399    }
    400}
    401
    402int hvf_vcpu_exec(CPUState *cpu)
    403{
    404    X86CPU *x86_cpu = X86_CPU(cpu);
    405    CPUX86State *env = &x86_cpu->env;
    406    int ret = 0;
    407    uint64_t rip = 0;
    408
    409    if (hvf_process_events(cpu)) {
    410        return EXCP_HLT;
    411    }
    412
    413    do {
    414        if (cpu->vcpu_dirty) {
    415            hvf_put_registers(cpu);
    416            cpu->vcpu_dirty = false;
    417        }
    418
    419        if (hvf_inject_interrupts(cpu)) {
    420            return EXCP_INTERRUPT;
    421        }
    422        vmx_update_tpr(cpu);
    423
    424        qemu_mutex_unlock_iothread();
    425        if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
    426            qemu_mutex_lock_iothread();
    427            return EXCP_HLT;
    428        }
    429
    430        hv_return_t r  = hv_vcpu_run(cpu->hvf->fd);
    431        assert_hvf_ok(r);
    432
    433        /* handle VMEXIT */
    434        uint64_t exit_reason = rvmcs(cpu->hvf->fd, VMCS_EXIT_REASON);
    435        uint64_t exit_qual = rvmcs(cpu->hvf->fd, VMCS_EXIT_QUALIFICATION);
    436        uint32_t ins_len = (uint32_t)rvmcs(cpu->hvf->fd,
    437                                           VMCS_EXIT_INSTRUCTION_LENGTH);
    438
    439        uint64_t idtvec_info = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
    440
    441        hvf_store_events(cpu, ins_len, idtvec_info);
    442        rip = rreg(cpu->hvf->fd, HV_X86_RIP);
    443        env->eflags = rreg(cpu->hvf->fd, HV_X86_RFLAGS);
    444
    445        qemu_mutex_lock_iothread();
    446
    447        update_apic_tpr(cpu);
    448        current_cpu = cpu;
    449
    450        ret = 0;
    451        switch (exit_reason) {
    452        case EXIT_REASON_HLT: {
    453            macvm_set_rip(cpu, rip + ins_len);
    454            if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
    455                (env->eflags & IF_MASK))
    456                && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
    457                !(idtvec_info & VMCS_IDT_VEC_VALID)) {
    458                cpu->halted = 1;
    459                ret = EXCP_HLT;
    460                break;
    461            }
    462            ret = EXCP_INTERRUPT;
    463            break;
    464        }
    465        case EXIT_REASON_MWAIT: {
    466            ret = EXCP_INTERRUPT;
    467            break;
    468        }
    469        /* Need to check if MMIO or unmapped fault */
    470        case EXIT_REASON_EPT_FAULT:
    471        {
    472            hvf_slot *slot;
    473            uint64_t gpa = rvmcs(cpu->hvf->fd, VMCS_GUEST_PHYSICAL_ADDRESS);
    474
    475            if (((idtvec_info & VMCS_IDT_VEC_VALID) == 0) &&
    476                ((exit_qual & EXIT_QUAL_NMIUDTI) != 0)) {
    477                vmx_set_nmi_blocking(cpu);
    478            }
    479
    480            slot = hvf_find_overlap_slot(gpa, 1);
    481            /* mmio */
    482            if (ept_emulation_fault(slot, gpa, exit_qual)) {
    483                struct x86_decode decode;
    484
    485                load_regs(cpu);
    486                decode_instruction(env, &decode);
    487                exec_instruction(env, &decode);
    488                store_regs(cpu);
    489                break;
    490            }
    491            break;
    492        }
    493        case EXIT_REASON_INOUT:
    494        {
    495            uint32_t in = (exit_qual & 8) != 0;
    496            uint32_t size =  (exit_qual & 7) + 1;
    497            uint32_t string =  (exit_qual & 16) != 0;
    498            uint32_t port =  exit_qual >> 16;
    499            /*uint32_t rep = (exit_qual & 0x20) != 0;*/
    500
    501            if (!string && in) {
    502                uint64_t val = 0;
    503                load_regs(cpu);
    504                hvf_handle_io(env, port, &val, 0, size, 1);
    505                if (size == 1) {
    506                    AL(env) = val;
    507                } else if (size == 2) {
    508                    AX(env) = val;
    509                } else if (size == 4) {
    510                    RAX(env) = (uint32_t)val;
    511                } else {
    512                    RAX(env) = (uint64_t)val;
    513                }
    514                env->eip += ins_len;
    515                store_regs(cpu);
    516                break;
    517            } else if (!string && !in) {
    518                RAX(env) = rreg(cpu->hvf->fd, HV_X86_RAX);
    519                hvf_handle_io(env, port, &RAX(env), 1, size, 1);
    520                macvm_set_rip(cpu, rip + ins_len);
    521                break;
    522            }
    523            struct x86_decode decode;
    524
    525            load_regs(cpu);
    526            decode_instruction(env, &decode);
    527            assert(ins_len == decode.len);
    528            exec_instruction(env, &decode);
    529            store_regs(cpu);
    530
    531            break;
    532        }
    533        case EXIT_REASON_CPUID: {
    534            uint32_t rax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
    535            uint32_t rbx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RBX);
    536            uint32_t rcx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
    537            uint32_t rdx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
    538
    539            if (rax == 1) {
    540                /* CPUID1.ecx.OSXSAVE needs to know CR4 */
    541                env->cr[4] = rvmcs(cpu->hvf->fd, VMCS_GUEST_CR4);
    542            }
    543            hvf_cpu_x86_cpuid(env, rax, rcx, &rax, &rbx, &rcx, &rdx);
    544
    545            wreg(cpu->hvf->fd, HV_X86_RAX, rax);
    546            wreg(cpu->hvf->fd, HV_X86_RBX, rbx);
    547            wreg(cpu->hvf->fd, HV_X86_RCX, rcx);
    548            wreg(cpu->hvf->fd, HV_X86_RDX, rdx);
    549
    550            macvm_set_rip(cpu, rip + ins_len);
    551            break;
    552        }
    553        case EXIT_REASON_XSETBV: {
    554            X86CPU *x86_cpu = X86_CPU(cpu);
    555            CPUX86State *env = &x86_cpu->env;
    556            uint32_t eax = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RAX);
    557            uint32_t ecx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RCX);
    558            uint32_t edx = (uint32_t)rreg(cpu->hvf->fd, HV_X86_RDX);
    559
    560            if (ecx) {
    561                macvm_set_rip(cpu, rip + ins_len);
    562                break;
    563            }
    564            env->xcr0 = ((uint64_t)edx << 32) | eax;
    565            wreg(cpu->hvf->fd, HV_X86_XCR0, env->xcr0 | 1);
    566            macvm_set_rip(cpu, rip + ins_len);
    567            break;
    568        }
    569        case EXIT_REASON_INTR_WINDOW:
    570            vmx_clear_int_window_exiting(cpu);
    571            ret = EXCP_INTERRUPT;
    572            break;
    573        case EXIT_REASON_NMI_WINDOW:
    574            vmx_clear_nmi_window_exiting(cpu);
    575            ret = EXCP_INTERRUPT;
    576            break;
    577        case EXIT_REASON_EXT_INTR:
    578            /* force exit and allow io handling */
    579            ret = EXCP_INTERRUPT;
    580            break;
    581        case EXIT_REASON_RDMSR:
    582        case EXIT_REASON_WRMSR:
    583        {
    584            load_regs(cpu);
    585            if (exit_reason == EXIT_REASON_RDMSR) {
    586                simulate_rdmsr(cpu);
    587            } else {
    588                simulate_wrmsr(cpu);
    589            }
    590            env->eip += ins_len;
    591            store_regs(cpu);
    592            break;
    593        }
    594        case EXIT_REASON_CR_ACCESS: {
    595            int cr;
    596            int reg;
    597
    598            load_regs(cpu);
    599            cr = exit_qual & 15;
    600            reg = (exit_qual >> 8) & 15;
    601
    602            switch (cr) {
    603            case 0x0: {
    604                macvm_set_cr0(cpu->hvf->fd, RRX(env, reg));
    605                break;
    606            }
    607            case 4: {
    608                macvm_set_cr4(cpu->hvf->fd, RRX(env, reg));
    609                break;
    610            }
    611            case 8: {
    612                X86CPU *x86_cpu = X86_CPU(cpu);
    613                if (exit_qual & 0x10) {
    614                    RRX(env, reg) = cpu_get_apic_tpr(x86_cpu->apic_state);
    615                } else {
    616                    int tpr = RRX(env, reg);
    617                    cpu_set_apic_tpr(x86_cpu->apic_state, tpr);
    618                    ret = EXCP_INTERRUPT;
    619                }
    620                break;
    621            }
    622            default:
    623                error_report("Unrecognized CR %d", cr);
    624                abort();
    625            }
    626            env->eip += ins_len;
    627            store_regs(cpu);
    628            break;
    629        }
    630        case EXIT_REASON_APIC_ACCESS: { /* TODO */
    631            struct x86_decode decode;
    632
    633            load_regs(cpu);
    634            decode_instruction(env, &decode);
    635            exec_instruction(env, &decode);
    636            store_regs(cpu);
    637            break;
    638        }
    639        case EXIT_REASON_TPR: {
    640            ret = 1;
    641            break;
    642        }
    643        case EXIT_REASON_TASK_SWITCH: {
    644            uint64_t vinfo = rvmcs(cpu->hvf->fd, VMCS_IDT_VECTORING_INFO);
    645            x68_segment_selector sel = {.sel = exit_qual & 0xffff};
    646            vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
    647             vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
    648             & VMCS_INTR_T_MASK);
    649            break;
    650        }
    651        case EXIT_REASON_TRIPLE_FAULT: {
    652            qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
    653            ret = EXCP_INTERRUPT;
    654            break;
    655        }
    656        case EXIT_REASON_RDPMC:
    657            wreg(cpu->hvf->fd, HV_X86_RAX, 0);
    658            wreg(cpu->hvf->fd, HV_X86_RDX, 0);
    659            macvm_set_rip(cpu, rip + ins_len);
    660            break;
    661        case VMX_REASON_VMCALL:
    662            env->exception_nr = EXCP0D_GPF;
    663            env->exception_injected = 1;
    664            env->has_error_code = true;
    665            env->error_code = 0;
    666            break;
    667        default:
    668            error_report("%llx: unhandled exit %llx", rip, exit_reason);
    669        }
    670    } while (ret == 0);
    671
    672    return ret;
    673}