cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tcg-accel-ops-rr.c (8417B)


      1/*
      2 * QEMU TCG Single Threaded vCPUs implementation
      3 *
      4 * Copyright (c) 2003-2008 Fabrice Bellard
      5 * Copyright (c) 2014 Red Hat Inc.
      6 *
      7 * Permission is hereby granted, free of charge, to any person obtaining a copy
      8 * of this software and associated documentation files (the "Software"), to deal
      9 * in the Software without restriction, including without limitation the rights
     10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     11 * copies of the Software, and to permit persons to whom the Software is
     12 * furnished to do so, subject to the following conditions:
     13 *
     14 * The above copyright notice and this permission notice shall be included in
     15 * all copies or substantial portions of the Software.
     16 *
     17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     23 * THE SOFTWARE.
     24 */
     25
     26#include "qemu/osdep.h"
     27#include "qemu-common.h"
     28#include "sysemu/tcg.h"
     29#include "sysemu/replay.h"
     30#include "qemu/main-loop.h"
     31#include "qemu/guest-random.h"
     32#include "exec/exec-all.h"
     33
     34#include "tcg-accel-ops.h"
     35#include "tcg-accel-ops-rr.h"
     36#include "tcg-accel-ops-icount.h"
     37
     38/* Kick all RR vCPUs */
     39void rr_kick_vcpu_thread(CPUState *unused)
     40{
     41    CPUState *cpu;
     42
     43    CPU_FOREACH(cpu) {
     44        cpu_exit(cpu);
     45    };
     46}
     47
     48/*
     49 * TCG vCPU kick timer
     50 *
     51 * The kick timer is responsible for moving single threaded vCPU
     52 * emulation on to the next vCPU. If more than one vCPU is running a
     53 * timer event with force a cpu->exit so the next vCPU can get
     54 * scheduled.
     55 *
     56 * The timer is removed if all vCPUs are idle and restarted again once
     57 * idleness is complete.
     58 */
     59
     60static QEMUTimer *rr_kick_vcpu_timer;
     61static CPUState *rr_current_cpu;
     62
     63static inline int64_t rr_next_kick_time(void)
     64{
     65    return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
     66}
     67
     68/* Kick the currently round-robin scheduled vCPU to next */
     69static void rr_kick_next_cpu(void)
     70{
     71    CPUState *cpu;
     72    do {
     73        cpu = qatomic_mb_read(&rr_current_cpu);
     74        if (cpu) {
     75            cpu_exit(cpu);
     76        }
     77    } while (cpu != qatomic_mb_read(&rr_current_cpu));
     78}
     79
     80static void rr_kick_thread(void *opaque)
     81{
     82    timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
     83    rr_kick_next_cpu();
     84}
     85
     86static void rr_start_kick_timer(void)
     87{
     88    if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) {
     89        rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
     90                                           rr_kick_thread, NULL);
     91    }
     92    if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) {
     93        timer_mod(rr_kick_vcpu_timer, rr_next_kick_time());
     94    }
     95}
     96
     97static void rr_stop_kick_timer(void)
     98{
     99    if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) {
    100        timer_del(rr_kick_vcpu_timer);
    101    }
    102}
    103
    104static void rr_wait_io_event(void)
    105{
    106    CPUState *cpu;
    107
    108    while (all_cpu_threads_idle()) {
    109        rr_stop_kick_timer();
    110        qemu_cond_wait_iothread(first_cpu->halt_cond);
    111    }
    112
    113    rr_start_kick_timer();
    114
    115    CPU_FOREACH(cpu) {
    116        qemu_wait_io_event_common(cpu);
    117    }
    118}
    119
    120/*
    121 * Destroy any remaining vCPUs which have been unplugged and have
    122 * finished running
    123 */
    124static void rr_deal_with_unplugged_cpus(void)
    125{
    126    CPUState *cpu;
    127
    128    CPU_FOREACH(cpu) {
    129        if (cpu->unplug && !cpu_can_run(cpu)) {
    130            tcg_cpus_destroy(cpu);
    131            break;
    132        }
    133    }
    134}
    135
    136/*
    137 * In the single-threaded case each vCPU is simulated in turn. If
    138 * there is more than a single vCPU we create a simple timer to kick
    139 * the vCPU and ensure we don't get stuck in a tight loop in one vCPU.
    140 * This is done explicitly rather than relying on side-effects
    141 * elsewhere.
    142 */
    143
    144static void *rr_cpu_thread_fn(void *arg)
    145{
    146    CPUState *cpu = arg;
    147
    148    assert(tcg_enabled());
    149    rcu_register_thread();
    150    tcg_register_thread();
    151
    152    qemu_mutex_lock_iothread();
    153    qemu_thread_get_self(cpu->thread);
    154
    155    cpu->thread_id = qemu_get_thread_id();
    156    cpu->can_do_io = 1;
    157    cpu_thread_signal_created(cpu);
    158    qemu_guest_random_seed_thread_part2(cpu->random_seed);
    159
    160    /* wait for initial kick-off after machine start */
    161    while (first_cpu->stopped) {
    162        qemu_cond_wait_iothread(first_cpu->halt_cond);
    163
    164        /* process any pending work */
    165        CPU_FOREACH(cpu) {
    166            current_cpu = cpu;
    167            qemu_wait_io_event_common(cpu);
    168        }
    169    }
    170
    171    rr_start_kick_timer();
    172
    173    cpu = first_cpu;
    174
    175    /* process any pending work */
    176    cpu->exit_request = 1;
    177
    178    while (1) {
    179        qemu_mutex_unlock_iothread();
    180        replay_mutex_lock();
    181        qemu_mutex_lock_iothread();
    182
    183        if (icount_enabled()) {
    184            /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
    185            icount_account_warp_timer();
    186            /*
    187             * Run the timers here.  This is much more efficient than
    188             * waking up the I/O thread and waiting for completion.
    189             */
    190            icount_handle_deadline();
    191        }
    192
    193        replay_mutex_unlock();
    194
    195        if (!cpu) {
    196            cpu = first_cpu;
    197        }
    198
    199        while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
    200
    201            qatomic_mb_set(&rr_current_cpu, cpu);
    202            current_cpu = cpu;
    203
    204            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
    205                              (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
    206
    207            if (cpu_can_run(cpu)) {
    208                int r;
    209
    210                qemu_mutex_unlock_iothread();
    211                if (icount_enabled()) {
    212                    icount_prepare_for_run(cpu);
    213                }
    214                r = tcg_cpus_exec(cpu);
    215                if (icount_enabled()) {
    216                    icount_process_data(cpu);
    217                }
    218                qemu_mutex_lock_iothread();
    219
    220                if (r == EXCP_DEBUG) {
    221                    cpu_handle_guest_debug(cpu);
    222                    break;
    223                } else if (r == EXCP_ATOMIC) {
    224                    qemu_mutex_unlock_iothread();
    225                    cpu_exec_step_atomic(cpu);
    226                    qemu_mutex_lock_iothread();
    227                    break;
    228                }
    229            } else if (cpu->stop) {
    230                if (cpu->unplug) {
    231                    cpu = CPU_NEXT(cpu);
    232                }
    233                break;
    234            }
    235
    236            cpu = CPU_NEXT(cpu);
    237        } /* while (cpu && !cpu->exit_request).. */
    238
    239        /* Does not need qatomic_mb_set because a spurious wakeup is okay.  */
    240        qatomic_set(&rr_current_cpu, NULL);
    241
    242        if (cpu && cpu->exit_request) {
    243            qatomic_mb_set(&cpu->exit_request, 0);
    244        }
    245
    246        if (icount_enabled() && all_cpu_threads_idle()) {
    247            /*
    248             * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
    249             * in the main_loop, wake it up in order to start the warp timer.
    250             */
    251            qemu_notify_event();
    252        }
    253
    254        rr_wait_io_event();
    255        rr_deal_with_unplugged_cpus();
    256    }
    257
    258    rcu_unregister_thread();
    259    return NULL;
    260}
    261
    262void rr_start_vcpu_thread(CPUState *cpu)
    263{
    264    char thread_name[VCPU_THREAD_NAME_SIZE];
    265    static QemuCond *single_tcg_halt_cond;
    266    static QemuThread *single_tcg_cpu_thread;
    267
    268    g_assert(tcg_enabled());
    269    tcg_cpu_init_cflags(cpu, false);
    270
    271    if (!single_tcg_cpu_thread) {
    272        cpu->thread = g_malloc0(sizeof(QemuThread));
    273        cpu->halt_cond = g_malloc0(sizeof(QemuCond));
    274        qemu_cond_init(cpu->halt_cond);
    275
    276        /* share a single thread for all cpus with TCG */
    277        snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
    278        qemu_thread_create(cpu->thread, thread_name,
    279                           rr_cpu_thread_fn,
    280                           cpu, QEMU_THREAD_JOINABLE);
    281
    282        single_tcg_halt_cond = cpu->halt_cond;
    283        single_tcg_cpu_thread = cpu->thread;
    284#ifdef _WIN32
    285        cpu->hThread = qemu_thread_get_handle(cpu->thread);
    286#endif
    287    } else {
    288        /* we share the thread */
    289        cpu->thread = single_tcg_cpu_thread;
    290        cpu->halt_cond = single_tcg_halt_cond;
    291        cpu->thread_id = first_cpu->thread_id;
    292        cpu->can_do_io = 1;
    293        cpu->created = true;
    294    }
    295}