cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

control-target.c (4393B)


      1/*
      2 * Interface for configuring and controlling the state of tracing events.
      3 *
      4 * Copyright (C) 2014-2017 LluĂ­s Vilanova <vilanova@ac.upc.edu>
      5 *
      6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
      7 * See the COPYING file in the top-level directory.
      8 */
      9
     10#include "qemu/osdep.h"
     11#include "cpu.h"
     12#include "trace/trace-root.h"
     13#include "trace/control.h"
     14
     15
     16void trace_event_set_state_dynamic_init(TraceEvent *ev, bool state)
     17{
     18    bool state_pre;
     19    assert(trace_event_get_state_static(ev));
     20    /*
     21     * We ignore the "vcpu" property here, since no vCPUs have been created
     22     * yet. Then dstate can only be 1 or 0.
     23     */
     24    state_pre = *ev->dstate;
     25    if (state_pre != state) {
     26        if (state) {
     27            trace_events_enabled_count++;
     28            *ev->dstate = 1;
     29        } else {
     30            trace_events_enabled_count--;
     31            *ev->dstate = 0;
     32        }
     33    }
     34}
     35
     36void trace_event_set_state_dynamic(TraceEvent *ev, bool state)
     37{
     38    CPUState *vcpu;
     39    assert(trace_event_get_state_static(ev));
     40    if (trace_event_is_vcpu(ev) && likely(first_cpu != NULL)) {
     41        CPU_FOREACH(vcpu) {
     42            trace_event_set_vcpu_state_dynamic(vcpu, ev, state);
     43        }
     44    } else {
     45        /*
     46         * Without the "vcpu" property, dstate can only be 1 or 0. With it, we
     47         * haven't instantiated any vCPU yet, so we will set a global state
     48         * instead, and trace_init_vcpu will reconcile it afterwards.
     49         */
     50        bool state_pre = *ev->dstate;
     51        if (state_pre != state) {
     52            if (state) {
     53                trace_events_enabled_count++;
     54                *ev->dstate = 1;
     55            } else {
     56                trace_events_enabled_count--;
     57                *ev->dstate = 0;
     58            }
     59        }
     60    }
     61}
     62
     63static void trace_event_synchronize_vcpu_state_dynamic(
     64    CPUState *vcpu, run_on_cpu_data ignored)
     65{
     66    bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
     67                CPU_TRACE_DSTATE_MAX_EVENTS);
     68    cpu_tb_jmp_cache_clear(vcpu);
     69}
     70
     71void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
     72                                        TraceEvent *ev, bool state)
     73{
     74    uint32_t vcpu_id;
     75    bool state_pre;
     76    assert(trace_event_get_state_static(ev));
     77    assert(trace_event_is_vcpu(ev));
     78    vcpu_id = trace_event_get_vcpu_id(ev);
     79    state_pre = test_bit(vcpu_id, vcpu->trace_dstate);
     80    if (state_pre != state) {
     81        if (state) {
     82            trace_events_enabled_count++;
     83            set_bit(vcpu_id, vcpu->trace_dstate_delayed);
     84            (*ev->dstate)++;
     85        } else {
     86            trace_events_enabled_count--;
     87            clear_bit(vcpu_id, vcpu->trace_dstate_delayed);
     88            (*ev->dstate)--;
     89        }
     90        if (vcpu->created) {
     91            /*
     92             * Delay changes until next TB; we want all TBs to be built from a
     93             * single set of dstate values to ensure consistency of generated
     94             * tracing code.
     95             */
     96            async_run_on_cpu(vcpu, trace_event_synchronize_vcpu_state_dynamic,
     97                             RUN_ON_CPU_NULL);
     98        } else {
     99            trace_event_synchronize_vcpu_state_dynamic(vcpu, RUN_ON_CPU_NULL);
    100        }
    101    }
    102}
    103
    104static bool adding_first_cpu1(void)
    105{
    106    CPUState *cpu;
    107    size_t count = 0;
    108    CPU_FOREACH(cpu) {
    109        count++;
    110        if (count > 1) {
    111            return false;
    112        }
    113    }
    114    return true;
    115}
    116
    117static bool adding_first_cpu(void)
    118{
    119    bool res;
    120    cpu_list_lock();
    121    res = adding_first_cpu1();
    122    cpu_list_unlock();
    123    return res;
    124}
    125
    126void trace_init_vcpu(CPUState *vcpu)
    127{
    128    TraceEventIter iter;
    129    TraceEvent *ev;
    130    trace_event_iter_init_all(&iter);
    131    while ((ev = trace_event_iter_next(&iter)) != NULL) {
    132        if (trace_event_is_vcpu(ev) &&
    133            trace_event_get_state_static(ev) &&
    134            trace_event_get_state_dynamic(ev)) {
    135            if (adding_first_cpu()) {
    136                /* check preconditions */
    137                assert(*ev->dstate == 1);
    138                /* disable early-init state ... */
    139                *ev->dstate = 0;
    140                trace_events_enabled_count--;
    141                /* ... and properly re-enable */
    142                trace_event_set_vcpu_state_dynamic(vcpu, ev, true);
    143            } else {
    144                trace_event_set_vcpu_state_dynamic(vcpu, ev, true);
    145            }
    146        }
    147    }
    148    trace_guest_cpu_enter(vcpu);
    149}