cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

linux-aio.c (13082B)


      1/*
      2 * Linux native AIO support.
      3 *
      4 * Copyright (C) 2009 IBM, Corp.
      5 * Copyright (C) 2009 Red Hat, Inc.
      6 *
      7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
      8 * See the COPYING file in the top-level directory.
      9 */
     10#include "qemu/osdep.h"
     11#include "block/aio.h"
     12#include "qemu/queue.h"
     13#include "block/block.h"
     14#include "block/raw-aio.h"
     15#include "qemu/event_notifier.h"
     16#include "qemu/coroutine.h"
     17#include "qapi/error.h"
     18
     19#include <libaio.h>
     20
     21/*
     22 * Queue size (per-device).
     23 *
     24 * XXX: eventually we need to communicate this to the guest and/or make it
     25 *      tunable by the guest.  If we get more outstanding requests at a time
     26 *      than this we will get EAGAIN from io_submit which is communicated to
     27 *      the guest as an I/O error.
     28 */
     29#define MAX_EVENTS 1024
     30
     31/* Maximum number of requests in a batch. (default value) */
     32#define DEFAULT_MAX_BATCH 32
     33
     34struct qemu_laiocb {
     35    Coroutine *co;
     36    LinuxAioState *ctx;
     37    struct iocb iocb;
     38    ssize_t ret;
     39    size_t nbytes;
     40    QEMUIOVector *qiov;
     41    bool is_read;
     42    QSIMPLEQ_ENTRY(qemu_laiocb) next;
     43};
     44
     45typedef struct {
     46    int plugged;
     47    unsigned int in_queue;
     48    unsigned int in_flight;
     49    bool blocked;
     50    QSIMPLEQ_HEAD(, qemu_laiocb) pending;
     51} LaioQueue;
     52
     53struct LinuxAioState {
     54    AioContext *aio_context;
     55
     56    io_context_t ctx;
     57    EventNotifier e;
     58
     59    /* io queue for submit at batch.  Protected by AioContext lock. */
     60    LaioQueue io_q;
     61
     62    /* I/O completion processing.  Only runs in I/O thread.  */
     63    QEMUBH *completion_bh;
     64    int event_idx;
     65    int event_max;
     66};
     67
     68static void ioq_submit(LinuxAioState *s);
     69
     70static inline ssize_t io_event_ret(struct io_event *ev)
     71{
     72    return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
     73}
     74
     75/*
     76 * Completes an AIO request.
     77 */
     78static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
     79{
     80    int ret;
     81
     82    ret = laiocb->ret;
     83    if (ret != -ECANCELED) {
     84        if (ret == laiocb->nbytes) {
     85            ret = 0;
     86        } else if (ret >= 0) {
     87            /* Short reads mean EOF, pad with zeros. */
     88            if (laiocb->is_read) {
     89                qemu_iovec_memset(laiocb->qiov, ret, 0,
     90                    laiocb->qiov->size - ret);
     91            } else {
     92                ret = -ENOSPC;
     93            }
     94        }
     95    }
     96
     97    laiocb->ret = ret;
     98
     99    /*
    100     * If the coroutine is already entered it must be in ioq_submit() and
    101     * will notice laio->ret has been filled in when it eventually runs
    102     * later.  Coroutines cannot be entered recursively so avoid doing
    103     * that!
    104     */
    105    if (!qemu_coroutine_entered(laiocb->co)) {
    106        aio_co_wake(laiocb->co);
    107    }
    108}
    109
    110/**
    111 * aio_ring buffer which is shared between userspace and kernel.
    112 *
    113 * This copied from linux/fs/aio.c, common header does not exist
    114 * but AIO exists for ages so we assume ABI is stable.
    115 */
    116struct aio_ring {
    117    unsigned    id;    /* kernel internal index number */
    118    unsigned    nr;    /* number of io_events */
    119    unsigned    head;  /* Written to by userland or by kernel. */
    120    unsigned    tail;
    121
    122    unsigned    magic;
    123    unsigned    compat_features;
    124    unsigned    incompat_features;
    125    unsigned    header_length;  /* size of aio_ring */
    126
    127    struct io_event io_events[];
    128};
    129
    130/**
    131 * io_getevents_peek:
    132 * @ctx: AIO context
    133 * @events: pointer on events array, output value
    134
    135 * Returns the number of completed events and sets a pointer
    136 * on events array.  This function does not update the internal
    137 * ring buffer, only reads head and tail.  When @events has been
    138 * processed io_getevents_commit() must be called.
    139 */
    140static inline unsigned int io_getevents_peek(io_context_t ctx,
    141                                             struct io_event **events)
    142{
    143    struct aio_ring *ring = (struct aio_ring *)ctx;
    144    unsigned int head = ring->head, tail = ring->tail;
    145    unsigned int nr;
    146
    147    nr = tail >= head ? tail - head : ring->nr - head;
    148    *events = ring->io_events + head;
    149    /* To avoid speculative loads of s->events[i] before observing tail.
    150       Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
    151    smp_rmb();
    152
    153    return nr;
    154}
    155
    156/**
    157 * io_getevents_commit:
    158 * @ctx: AIO context
    159 * @nr: the number of events on which head should be advanced
    160 *
    161 * Advances head of a ring buffer.
    162 */
    163static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
    164{
    165    struct aio_ring *ring = (struct aio_ring *)ctx;
    166
    167    if (nr) {
    168        ring->head = (ring->head + nr) % ring->nr;
    169    }
    170}
    171
    172/**
    173 * io_getevents_advance_and_peek:
    174 * @ctx: AIO context
    175 * @events: pointer on events array, output value
    176 * @nr: the number of events on which head should be advanced
    177 *
    178 * Advances head of a ring buffer and returns number of elements left.
    179 */
    180static inline unsigned int
    181io_getevents_advance_and_peek(io_context_t ctx,
    182                              struct io_event **events,
    183                              unsigned int nr)
    184{
    185    io_getevents_commit(ctx, nr);
    186    return io_getevents_peek(ctx, events);
    187}
    188
    189/**
    190 * qemu_laio_process_completions:
    191 * @s: AIO state
    192 *
    193 * Fetches completed I/O requests and invokes their callbacks.
    194 *
    195 * The function is somewhat tricky because it supports nested event loops, for
    196 * example when a request callback invokes aio_poll().  In order to do this,
    197 * indices are kept in LinuxAioState.  Function schedules BH completion so it
    198 * can be called again in a nested event loop.  When there are no events left
    199 * to complete the BH is being canceled.
    200 */
    201static void qemu_laio_process_completions(LinuxAioState *s)
    202{
    203    struct io_event *events;
    204
    205    /* Reschedule so nested event loops see currently pending completions */
    206    qemu_bh_schedule(s->completion_bh);
    207
    208    while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
    209                                                         s->event_idx))) {
    210        for (s->event_idx = 0; s->event_idx < s->event_max; ) {
    211            struct iocb *iocb = events[s->event_idx].obj;
    212            struct qemu_laiocb *laiocb =
    213                container_of(iocb, struct qemu_laiocb, iocb);
    214
    215            laiocb->ret = io_event_ret(&events[s->event_idx]);
    216
    217            /* Change counters one-by-one because we can be nested. */
    218            s->io_q.in_flight--;
    219            s->event_idx++;
    220            qemu_laio_process_completion(laiocb);
    221        }
    222    }
    223
    224    qemu_bh_cancel(s->completion_bh);
    225
    226    /* If we are nested we have to notify the level above that we are done
    227     * by setting event_max to zero, upper level will then jump out of it's
    228     * own `for` loop.  If we are the last all counters droped to zero. */
    229    s->event_max = 0;
    230    s->event_idx = 0;
    231}
    232
    233static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
    234{
    235    aio_context_acquire(s->aio_context);
    236    qemu_laio_process_completions(s);
    237
    238    if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
    239        ioq_submit(s);
    240    }
    241    aio_context_release(s->aio_context);
    242}
    243
    244static void qemu_laio_completion_bh(void *opaque)
    245{
    246    LinuxAioState *s = opaque;
    247
    248    qemu_laio_process_completions_and_submit(s);
    249}
    250
    251static void qemu_laio_completion_cb(EventNotifier *e)
    252{
    253    LinuxAioState *s = container_of(e, LinuxAioState, e);
    254
    255    if (event_notifier_test_and_clear(&s->e)) {
    256        qemu_laio_process_completions_and_submit(s);
    257    }
    258}
    259
    260static bool qemu_laio_poll_cb(void *opaque)
    261{
    262    EventNotifier *e = opaque;
    263    LinuxAioState *s = container_of(e, LinuxAioState, e);
    264    struct io_event *events;
    265
    266    if (!io_getevents_peek(s->ctx, &events)) {
    267        return false;
    268    }
    269
    270    qemu_laio_process_completions_and_submit(s);
    271    return true;
    272}
    273
    274static void ioq_init(LaioQueue *io_q)
    275{
    276    QSIMPLEQ_INIT(&io_q->pending);
    277    io_q->plugged = 0;
    278    io_q->in_queue = 0;
    279    io_q->in_flight = 0;
    280    io_q->blocked = false;
    281}
    282
    283static void ioq_submit(LinuxAioState *s)
    284{
    285    int ret, len;
    286    struct qemu_laiocb *aiocb;
    287    struct iocb *iocbs[MAX_EVENTS];
    288    QSIMPLEQ_HEAD(, qemu_laiocb) completed;
    289
    290    do {
    291        if (s->io_q.in_flight >= MAX_EVENTS) {
    292            break;
    293        }
    294        len = 0;
    295        QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
    296            iocbs[len++] = &aiocb->iocb;
    297            if (s->io_q.in_flight + len >= MAX_EVENTS) {
    298                break;
    299            }
    300        }
    301
    302        ret = io_submit(s->ctx, len, iocbs);
    303        if (ret == -EAGAIN) {
    304            break;
    305        }
    306        if (ret < 0) {
    307            /* Fail the first request, retry the rest */
    308            aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
    309            QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
    310            s->io_q.in_queue--;
    311            aiocb->ret = ret;
    312            qemu_laio_process_completion(aiocb);
    313            continue;
    314        }
    315
    316        s->io_q.in_flight += ret;
    317        s->io_q.in_queue  -= ret;
    318        aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
    319        QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
    320    } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
    321    s->io_q.blocked = (s->io_q.in_queue > 0);
    322
    323    if (s->io_q.in_flight) {
    324        /* We can try to complete something just right away if there are
    325         * still requests in-flight. */
    326        qemu_laio_process_completions(s);
    327        /*
    328         * Even we have completed everything (in_flight == 0), the queue can
    329         * have still pended requests (in_queue > 0).  We do not attempt to
    330         * repeat submission to avoid IO hang.  The reason is simple: s->e is
    331         * still set and completion callback will be called shortly and all
    332         * pended requests will be submitted from there.
    333         */
    334    }
    335}
    336
    337void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
    338{
    339    s->io_q.plugged++;
    340}
    341
    342void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
    343{
    344    assert(s->io_q.plugged);
    345    if (--s->io_q.plugged == 0 &&
    346        !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
    347        ioq_submit(s);
    348    }
    349}
    350
    351static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
    352                          int type)
    353{
    354    LinuxAioState *s = laiocb->ctx;
    355    struct iocb *iocbs = &laiocb->iocb;
    356    QEMUIOVector *qiov = laiocb->qiov;
    357    int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
    358
    359    /* limit the batch with the number of available events */
    360    max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
    361
    362    switch (type) {
    363    case QEMU_AIO_WRITE:
    364        io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
    365        break;
    366    case QEMU_AIO_READ:
    367        io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
    368        break;
    369    /* Currently Linux kernel does not support other operations */
    370    default:
    371        fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
    372                        __func__, type);
    373        return -EIO;
    374    }
    375    io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
    376
    377    QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
    378    s->io_q.in_queue++;
    379    if (!s->io_q.blocked &&
    380        (!s->io_q.plugged ||
    381         s->io_q.in_queue >= max_batch)) {
    382        ioq_submit(s);
    383    }
    384
    385    return 0;
    386}
    387
    388int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
    389                                uint64_t offset, QEMUIOVector *qiov, int type)
    390{
    391    int ret;
    392    struct qemu_laiocb laiocb = {
    393        .co         = qemu_coroutine_self(),
    394        .nbytes     = qiov->size,
    395        .ctx        = s,
    396        .ret        = -EINPROGRESS,
    397        .is_read    = (type == QEMU_AIO_READ),
    398        .qiov       = qiov,
    399    };
    400
    401    ret = laio_do_submit(fd, &laiocb, offset, type);
    402    if (ret < 0) {
    403        return ret;
    404    }
    405
    406    if (laiocb.ret == -EINPROGRESS) {
    407        qemu_coroutine_yield();
    408    }
    409    return laiocb.ret;
    410}
    411
    412void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
    413{
    414    aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
    415    qemu_bh_delete(s->completion_bh);
    416    s->aio_context = NULL;
    417}
    418
    419void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
    420{
    421    s->aio_context = new_context;
    422    s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
    423    aio_set_event_notifier(new_context, &s->e, false,
    424                           qemu_laio_completion_cb,
    425                           qemu_laio_poll_cb);
    426}
    427
    428LinuxAioState *laio_init(Error **errp)
    429{
    430    int rc;
    431    LinuxAioState *s;
    432
    433    s = g_malloc0(sizeof(*s));
    434    rc = event_notifier_init(&s->e, false);
    435    if (rc < 0) {
    436        error_setg_errno(errp, -rc, "failed to to initialize event notifier");
    437        goto out_free_state;
    438    }
    439
    440    rc = io_setup(MAX_EVENTS, &s->ctx);
    441    if (rc < 0) {
    442        error_setg_errno(errp, -rc, "failed to create linux AIO context");
    443        goto out_close_efd;
    444    }
    445
    446    ioq_init(&s->io_q);
    447
    448    return s;
    449
    450out_close_efd:
    451    event_notifier_cleanup(&s->e);
    452out_free_state:
    453    g_free(s);
    454    return NULL;
    455}
    456
    457void laio_cleanup(LinuxAioState *s)
    458{
    459    event_notifier_cleanup(&s->e);
    460
    461    if (io_destroy(s->ctx) != 0) {
    462        fprintf(stderr, "%s: destroy AIO context %p failed\n",
    463                        __func__, &s->ctx);
    464    }
    465    g_free(s);
    466}