cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

x86_task.c (6450B)


      1// This software is licensed under the terms of the GNU General Public
      2// License version 2, as published by the Free Software Foundation, and
      3// may be copied, distributed, and modified under those terms.
      4// 
      5// This program is distributed in the hope that it will be useful,
      6// but WITHOUT ANY WARRANTY; without even the implied warranty of
      7// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      8// GNU General Public License for more details.
      9#include "qemu/osdep.h"
     10#include "panic.h"
     11#include "qemu-common.h"
     12#include "qemu/error-report.h"
     13
     14#include "sysemu/hvf.h"
     15#include "hvf-i386.h"
     16#include "vmcs.h"
     17#include "vmx.h"
     18#include "x86.h"
     19#include "x86_descr.h"
     20#include "x86_mmu.h"
     21#include "x86_decode.h"
     22#include "x86_emu.h"
     23#include "x86_task.h"
     24#include "x86hvf.h"
     25
     26#include <Hypervisor/hv.h>
     27#include <Hypervisor/hv_vmx.h>
     28
     29#include "hw/i386/apic_internal.h"
     30#include "qemu/main-loop.h"
     31#include "qemu/accel.h"
     32#include "target/i386/cpu.h"
     33
     34// TODO: taskswitch handling
     35static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
     36{
     37    X86CPU *x86_cpu = X86_CPU(cpu);
     38    CPUX86State *env = &x86_cpu->env;
     39
     40    /* CR3 and ldt selector are not saved intentionally */
     41    tss->eip = (uint32_t)env->eip;
     42    tss->eflags = (uint32_t)env->eflags;
     43    tss->eax = EAX(env);
     44    tss->ecx = ECX(env);
     45    tss->edx = EDX(env);
     46    tss->ebx = EBX(env);
     47    tss->esp = ESP(env);
     48    tss->ebp = EBP(env);
     49    tss->esi = ESI(env);
     50    tss->edi = EDI(env);
     51
     52    tss->es = vmx_read_segment_selector(cpu, R_ES).sel;
     53    tss->cs = vmx_read_segment_selector(cpu, R_CS).sel;
     54    tss->ss = vmx_read_segment_selector(cpu, R_SS).sel;
     55    tss->ds = vmx_read_segment_selector(cpu, R_DS).sel;
     56    tss->fs = vmx_read_segment_selector(cpu, R_FS).sel;
     57    tss->gs = vmx_read_segment_selector(cpu, R_GS).sel;
     58}
     59
     60static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss)
     61{
     62    X86CPU *x86_cpu = X86_CPU(cpu);
     63    CPUX86State *env = &x86_cpu->env;
     64
     65    wvmcs(cpu->hvf->fd, VMCS_GUEST_CR3, tss->cr3);
     66
     67    env->eip = tss->eip;
     68    env->eflags = tss->eflags | 2;
     69
     70    /* General purpose registers */
     71    RAX(env) = tss->eax;
     72    RCX(env) = tss->ecx;
     73    RDX(env) = tss->edx;
     74    RBX(env) = tss->ebx;
     75    RSP(env) = tss->esp;
     76    RBP(env) = tss->ebp;
     77    RSI(env) = tss->esi;
     78    RDI(env) = tss->edi;
     79
     80    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR);
     81    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES);
     82    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS);
     83    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS);
     84    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS);
     85    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS);
     86    vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS);
     87}
     88
     89static int task_switch_32(CPUState *cpu, x68_segment_selector tss_sel, x68_segment_selector old_tss_sel,
     90                          uint64_t old_tss_base, struct x86_segment_descriptor *new_desc)
     91{
     92    struct x86_tss_segment32 tss_seg;
     93    uint32_t new_tss_base = x86_segment_base(new_desc);
     94    uint32_t eip_offset = offsetof(struct x86_tss_segment32, eip);
     95    uint32_t ldt_sel_offset = offsetof(struct x86_tss_segment32, ldt);
     96
     97    vmx_read_mem(cpu, &tss_seg, old_tss_base, sizeof(tss_seg));
     98    save_state_to_tss32(cpu, &tss_seg);
     99
    100    vmx_write_mem(cpu, old_tss_base + eip_offset, &tss_seg.eip, ldt_sel_offset - eip_offset);
    101    vmx_read_mem(cpu, &tss_seg, new_tss_base, sizeof(tss_seg));
    102
    103    if (old_tss_sel.sel != 0xffff) {
    104        tss_seg.prev_tss = old_tss_sel.sel;
    105
    106        vmx_write_mem(cpu, new_tss_base, &tss_seg.prev_tss, sizeof(tss_seg.prev_tss));
    107    }
    108    load_state_from_tss32(cpu, &tss_seg);
    109    return 0;
    110}
    111
    112void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type)
    113{
    114    uint64_t rip = rreg(cpu->hvf->fd, HV_X86_RIP);
    115    if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION &&
    116                        gate_type != VMCS_INTR_T_HWINTR &&
    117                        gate_type != VMCS_INTR_T_NMI)) {
    118        int ins_len = rvmcs(cpu->hvf->fd, VMCS_EXIT_INSTRUCTION_LENGTH);
    119        macvm_set_rip(cpu, rip + ins_len);
    120        return;
    121    }
    122
    123    load_regs(cpu);
    124
    125    struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
    126    int ret;
    127    x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
    128    uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
    129    uint32_t desc_limit;
    130    struct x86_call_gate task_gate_desc;
    131    struct vmx_segment vmx_seg;
    132
    133    X86CPU *x86_cpu = X86_CPU(cpu);
    134    CPUX86State *env = &x86_cpu->env;
    135
    136    x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel);
    137    x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
    138
    139    if (reason == TSR_IDT_GATE && gate_valid) {
    140        int dpl;
    141
    142        ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
    143
    144        dpl = task_gate_desc.dpl;
    145        x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
    146        if (tss_sel.rpl > dpl || cs.rpl > dpl)
    147            ;//DPRINTF("emulate_gp");
    148    }
    149
    150    desc_limit = x86_segment_limit(&next_tss_desc);
    151    if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) {
    152        VM_PANIC("emulate_ts");
    153    }
    154
    155    if (reason == TSR_IRET || reason == TSR_JMP) {
    156        curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
    157        x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel);
    158    }
    159
    160    if (reason == TSR_IRET)
    161        env->eflags &= ~NT_MASK;
    162
    163    if (reason != TSR_CALL && reason != TSR_IDT_GATE)
    164        old_tss_sel.sel = 0xffff;
    165
    166    if (reason != TSR_IRET) {
    167        next_tss_desc.type |= (1 << 1); /* set busy flag */
    168        x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
    169    }
    170
    171    if (next_tss_desc.type & 8)
    172        ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
    173    else
    174        //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
    175        VM_PANIC("task_switch_16");
    176
    177    macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS);
    178    x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg);
    179    vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR);
    180
    181    store_regs(cpu);
    182
    183    hv_vcpu_invalidate_tlb(cpu->hvf->fd);
    184    hv_vcpu_flush(cpu->hvf->fd);
    185}