cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

accounting.c (8166B)


      1/*
      2 * QEMU System Emulator block accounting
      3 *
      4 * Copyright (c) 2011 Christoph Hellwig
      5 * Copyright (c) 2015 Igalia, S.L.
      6 *
      7 * Permission is hereby granted, free of charge, to any person obtaining a copy
      8 * of this software and associated documentation files (the "Software"), to deal
      9 * in the Software without restriction, including without limitation the rights
     10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     11 * copies of the Software, and to permit persons to whom the Software is
     12 * furnished to do so, subject to the following conditions:
     13 *
     14 * The above copyright notice and this permission notice shall be included in
     15 * all copies or substantial portions of the Software.
     16 *
     17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     23 * THE SOFTWARE.
     24 */
     25
     26#include "qemu/osdep.h"
     27#include "block/accounting.h"
     28#include "block/block_int.h"
     29#include "qemu/timer.h"
     30#include "sysemu/qtest.h"
     31
     32static QEMUClockType clock_type = QEMU_CLOCK_REALTIME;
     33static const int qtest_latency_ns = NANOSECONDS_PER_SECOND / 1000;
     34
     35void block_acct_init(BlockAcctStats *stats)
     36{
     37    qemu_mutex_init(&stats->lock);
     38    if (qtest_enabled()) {
     39        clock_type = QEMU_CLOCK_VIRTUAL;
     40    }
     41}
     42
     43void block_acct_setup(BlockAcctStats *stats, bool account_invalid,
     44                      bool account_failed)
     45{
     46    stats->account_invalid = account_invalid;
     47    stats->account_failed = account_failed;
     48}
     49
     50void block_acct_cleanup(BlockAcctStats *stats)
     51{
     52    BlockAcctTimedStats *s, *next;
     53    QSLIST_FOREACH_SAFE(s, &stats->intervals, entries, next) {
     54        g_free(s);
     55    }
     56    qemu_mutex_destroy(&stats->lock);
     57}
     58
     59void block_acct_add_interval(BlockAcctStats *stats, unsigned interval_length)
     60{
     61    BlockAcctTimedStats *s;
     62    unsigned i;
     63
     64    s = g_new0(BlockAcctTimedStats, 1);
     65    s->interval_length = interval_length;
     66    s->stats = stats;
     67    qemu_mutex_lock(&stats->lock);
     68    QSLIST_INSERT_HEAD(&stats->intervals, s, entries);
     69
     70    for (i = 0; i < BLOCK_MAX_IOTYPE; i++) {
     71        timed_average_init(&s->latency[i], clock_type,
     72                           (uint64_t) interval_length * NANOSECONDS_PER_SECOND);
     73    }
     74    qemu_mutex_unlock(&stats->lock);
     75}
     76
     77BlockAcctTimedStats *block_acct_interval_next(BlockAcctStats *stats,
     78                                              BlockAcctTimedStats *s)
     79{
     80    if (s == NULL) {
     81        return QSLIST_FIRST(&stats->intervals);
     82    } else {
     83        return QSLIST_NEXT(s, entries);
     84    }
     85}
     86
     87void block_acct_start(BlockAcctStats *stats, BlockAcctCookie *cookie,
     88                      int64_t bytes, enum BlockAcctType type)
     89{
     90    assert(type < BLOCK_MAX_IOTYPE);
     91
     92    cookie->bytes = bytes;
     93    cookie->start_time_ns = qemu_clock_get_ns(clock_type);
     94    cookie->type = type;
     95}
     96
     97/* block_latency_histogram_compare_func:
     98 * Compare @key with interval [@it[0], @it[1]).
     99 * Return: -1 if @key < @it[0]
    100 *          0 if @key in [@it[0], @it[1])
    101 *         +1 if @key >= @it[1]
    102 */
    103static int block_latency_histogram_compare_func(const void *key, const void *it)
    104{
    105    uint64_t k = *(uint64_t *)key;
    106    uint64_t a = ((uint64_t *)it)[0];
    107    uint64_t b = ((uint64_t *)it)[1];
    108
    109    return k < a ? -1 : (k < b ? 0 : 1);
    110}
    111
    112static void block_latency_histogram_account(BlockLatencyHistogram *hist,
    113                                            int64_t latency_ns)
    114{
    115    uint64_t *pos;
    116
    117    if (hist->bins == NULL) {
    118        /* histogram disabled */
    119        return;
    120    }
    121
    122
    123    if (latency_ns < hist->boundaries[0]) {
    124        hist->bins[0]++;
    125        return;
    126    }
    127
    128    if (latency_ns >= hist->boundaries[hist->nbins - 2]) {
    129        hist->bins[hist->nbins - 1]++;
    130        return;
    131    }
    132
    133    pos = bsearch(&latency_ns, hist->boundaries, hist->nbins - 2,
    134                  sizeof(hist->boundaries[0]),
    135                  block_latency_histogram_compare_func);
    136    assert(pos != NULL);
    137
    138    hist->bins[pos - hist->boundaries + 1]++;
    139}
    140
    141int block_latency_histogram_set(BlockAcctStats *stats, enum BlockAcctType type,
    142                                uint64List *boundaries)
    143{
    144    BlockLatencyHistogram *hist = &stats->latency_histogram[type];
    145    uint64List *entry;
    146    uint64_t *ptr;
    147    uint64_t prev = 0;
    148    int new_nbins = 1;
    149
    150    for (entry = boundaries; entry; entry = entry->next) {
    151        if (entry->value <= prev) {
    152            return -EINVAL;
    153        }
    154        new_nbins++;
    155        prev = entry->value;
    156    }
    157
    158    hist->nbins = new_nbins;
    159    g_free(hist->boundaries);
    160    hist->boundaries = g_new(uint64_t, hist->nbins - 1);
    161    for (entry = boundaries, ptr = hist->boundaries; entry;
    162         entry = entry->next, ptr++)
    163    {
    164        *ptr = entry->value;
    165    }
    166
    167    g_free(hist->bins);
    168    hist->bins = g_new0(uint64_t, hist->nbins);
    169
    170    return 0;
    171}
    172
    173void block_latency_histograms_clear(BlockAcctStats *stats)
    174{
    175    int i;
    176
    177    for (i = 0; i < BLOCK_MAX_IOTYPE; i++) {
    178        BlockLatencyHistogram *hist = &stats->latency_histogram[i];
    179        g_free(hist->bins);
    180        g_free(hist->boundaries);
    181        memset(hist, 0, sizeof(*hist));
    182    }
    183}
    184
    185static void block_account_one_io(BlockAcctStats *stats, BlockAcctCookie *cookie,
    186                                 bool failed)
    187{
    188    BlockAcctTimedStats *s;
    189    int64_t time_ns = qemu_clock_get_ns(clock_type);
    190    int64_t latency_ns = time_ns - cookie->start_time_ns;
    191
    192    if (qtest_enabled()) {
    193        latency_ns = qtest_latency_ns;
    194    }
    195
    196    assert(cookie->type < BLOCK_MAX_IOTYPE);
    197
    198    if (cookie->type == BLOCK_ACCT_NONE) {
    199        return;
    200    }
    201
    202    WITH_QEMU_LOCK_GUARD(&stats->lock) {
    203        if (failed) {
    204            stats->failed_ops[cookie->type]++;
    205        } else {
    206            stats->nr_bytes[cookie->type] += cookie->bytes;
    207            stats->nr_ops[cookie->type]++;
    208        }
    209
    210        block_latency_histogram_account(&stats->latency_histogram[cookie->type],
    211                                        latency_ns);
    212
    213        if (!failed || stats->account_failed) {
    214            stats->total_time_ns[cookie->type] += latency_ns;
    215            stats->last_access_time_ns = time_ns;
    216
    217            QSLIST_FOREACH(s, &stats->intervals, entries) {
    218                timed_average_account(&s->latency[cookie->type], latency_ns);
    219            }
    220        }
    221    }
    222
    223    cookie->type = BLOCK_ACCT_NONE;
    224}
    225
    226void block_acct_done(BlockAcctStats *stats, BlockAcctCookie *cookie)
    227{
    228    block_account_one_io(stats, cookie, false);
    229}
    230
    231void block_acct_failed(BlockAcctStats *stats, BlockAcctCookie *cookie)
    232{
    233    block_account_one_io(stats, cookie, true);
    234}
    235
    236void block_acct_invalid(BlockAcctStats *stats, enum BlockAcctType type)
    237{
    238    assert(type < BLOCK_MAX_IOTYPE);
    239
    240    /* block_account_one_io() updates total_time_ns[], but this one does
    241     * not.  The reason is that invalid requests are accounted during their
    242     * submission, therefore there's no actual I/O involved.
    243     */
    244    qemu_mutex_lock(&stats->lock);
    245    stats->invalid_ops[type]++;
    246
    247    if (stats->account_invalid) {
    248        stats->last_access_time_ns = qemu_clock_get_ns(clock_type);
    249    }
    250    qemu_mutex_unlock(&stats->lock);
    251}
    252
    253void block_acct_merge_done(BlockAcctStats *stats, enum BlockAcctType type,
    254                      int num_requests)
    255{
    256    assert(type < BLOCK_MAX_IOTYPE);
    257
    258    qemu_mutex_lock(&stats->lock);
    259    stats->merged[type] += num_requests;
    260    qemu_mutex_unlock(&stats->lock);
    261}
    262
    263int64_t block_acct_idle_time_ns(BlockAcctStats *stats)
    264{
    265    return qemu_clock_get_ns(clock_type) - stats->last_access_time_ns;
    266}
    267
    268double block_acct_queue_depth(BlockAcctTimedStats *stats,
    269                              enum BlockAcctType type)
    270{
    271    uint64_t sum, elapsed;
    272
    273    assert(type < BLOCK_MAX_IOTYPE);
    274
    275    qemu_mutex_lock(&stats->stats->lock);
    276    sum = timed_average_sum(&stats->latency[type], &elapsed);
    277    qemu_mutex_unlock(&stats->stats->lock);
    278
    279    return (double) sum / elapsed;
    280}