cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

aio-win32.c (12810B)


      1/*
      2 * QEMU aio implementation
      3 *
      4 * Copyright IBM Corp., 2008
      5 * Copyright Red Hat Inc., 2012
      6 *
      7 * Authors:
      8 *  Anthony Liguori   <aliguori@us.ibm.com>
      9 *  Paolo Bonzini     <pbonzini@redhat.com>
     10 *
     11 * This work is licensed under the terms of the GNU GPL, version 2.  See
     12 * the COPYING file in the top-level directory.
     13 *
     14 * Contributions after 2012-01-13 are licensed under the terms of the
     15 * GNU GPL, version 2 or (at your option) any later version.
     16 */
     17
     18#include "qemu/osdep.h"
     19#include "qemu-common.h"
     20#include "block/block.h"
     21#include "qemu/main-loop.h"
     22#include "qemu/queue.h"
     23#include "qemu/sockets.h"
     24#include "qapi/error.h"
     25#include "qemu/rcu_queue.h"
     26
     27struct AioHandler {
     28    EventNotifier *e;
     29    IOHandler *io_read;
     30    IOHandler *io_write;
     31    EventNotifierHandler *io_notify;
     32    GPollFD pfd;
     33    int deleted;
     34    void *opaque;
     35    bool is_external;
     36    QLIST_ENTRY(AioHandler) node;
     37};
     38
     39static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
     40{
     41    /*
     42     * If the GSource is in the process of being destroyed then
     43     * g_source_remove_poll() causes an assertion failure.  Skip
     44     * removal in that case, because glib cleans up its state during
     45     * destruction anyway.
     46     */
     47    if (!g_source_is_destroyed(&ctx->source)) {
     48        g_source_remove_poll(&ctx->source, &node->pfd);
     49    }
     50
     51    /* If aio_poll is in progress, just mark the node as deleted */
     52    if (qemu_lockcnt_count(&ctx->list_lock)) {
     53        node->deleted = 1;
     54        node->pfd.revents = 0;
     55    } else {
     56        /* Otherwise, delete it for real.  We can't just mark it as
     57         * deleted because deleted nodes are only cleaned up after
     58         * releasing the list_lock.
     59         */
     60        QLIST_REMOVE(node, node);
     61        g_free(node);
     62    }
     63}
     64
     65void aio_set_fd_handler(AioContext *ctx,
     66                        int fd,
     67                        bool is_external,
     68                        IOHandler *io_read,
     69                        IOHandler *io_write,
     70                        AioPollFn *io_poll,
     71                        void *opaque)
     72{
     73    /* fd is a SOCKET in our case */
     74    AioHandler *old_node;
     75    AioHandler *node = NULL;
     76
     77    qemu_lockcnt_lock(&ctx->list_lock);
     78    QLIST_FOREACH(old_node, &ctx->aio_handlers, node) {
     79        if (old_node->pfd.fd == fd && !old_node->deleted) {
     80            break;
     81        }
     82    }
     83
     84    if (io_read || io_write) {
     85        HANDLE event;
     86        long bitmask = 0;
     87
     88        /* Alloc and insert if it's not already there */
     89        node = g_new0(AioHandler, 1);
     90        node->pfd.fd = fd;
     91
     92        node->pfd.events = 0;
     93        if (node->io_read) {
     94            node->pfd.events |= G_IO_IN;
     95        }
     96        if (node->io_write) {
     97            node->pfd.events |= G_IO_OUT;
     98        }
     99
    100        node->e = &ctx->notifier;
    101
    102        /* Update handler with latest information */
    103        node->opaque = opaque;
    104        node->io_read = io_read;
    105        node->io_write = io_write;
    106        node->is_external = is_external;
    107
    108        if (io_read) {
    109            bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE;
    110        }
    111
    112        if (io_write) {
    113            bitmask |= FD_WRITE | FD_CONNECT;
    114        }
    115
    116        QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
    117        event = event_notifier_get_handle(&ctx->notifier);
    118        WSAEventSelect(node->pfd.fd, event, bitmask);
    119    }
    120    if (old_node) {
    121        aio_remove_fd_handler(ctx, old_node);
    122    }
    123
    124    qemu_lockcnt_unlock(&ctx->list_lock);
    125    aio_notify(ctx);
    126}
    127
    128void aio_set_fd_poll(AioContext *ctx, int fd,
    129                     IOHandler *io_poll_begin,
    130                     IOHandler *io_poll_end)
    131{
    132    /* Not implemented */
    133}
    134
    135void aio_set_event_notifier(AioContext *ctx,
    136                            EventNotifier *e,
    137                            bool is_external,
    138                            EventNotifierHandler *io_notify,
    139                            AioPollFn *io_poll)
    140{
    141    AioHandler *node;
    142
    143    qemu_lockcnt_lock(&ctx->list_lock);
    144    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
    145        if (node->e == e && !node->deleted) {
    146            break;
    147        }
    148    }
    149
    150    /* Are we deleting the fd handler? */
    151    if (!io_notify) {
    152        if (node) {
    153            aio_remove_fd_handler(ctx, node);
    154        }
    155    } else {
    156        if (node == NULL) {
    157            /* Alloc and insert if it's not already there */
    158            node = g_new0(AioHandler, 1);
    159            node->e = e;
    160            node->pfd.fd = (uintptr_t)event_notifier_get_handle(e);
    161            node->pfd.events = G_IO_IN;
    162            node->is_external = is_external;
    163            QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node);
    164
    165            g_source_add_poll(&ctx->source, &node->pfd);
    166        }
    167        /* Update handler with latest information */
    168        node->io_notify = io_notify;
    169    }
    170
    171    qemu_lockcnt_unlock(&ctx->list_lock);
    172    aio_notify(ctx);
    173}
    174
    175void aio_set_event_notifier_poll(AioContext *ctx,
    176                                 EventNotifier *notifier,
    177                                 EventNotifierHandler *io_poll_begin,
    178                                 EventNotifierHandler *io_poll_end)
    179{
    180    /* Not implemented */
    181}
    182
    183bool aio_prepare(AioContext *ctx)
    184{
    185    static struct timeval tv0;
    186    AioHandler *node;
    187    bool have_select_revents = false;
    188    fd_set rfds, wfds;
    189
    190    /*
    191     * We have to walk very carefully in case aio_set_fd_handler is
    192     * called while we're walking.
    193     */
    194    qemu_lockcnt_inc(&ctx->list_lock);
    195
    196    /* fill fd sets */
    197    FD_ZERO(&rfds);
    198    FD_ZERO(&wfds);
    199    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
    200        if (node->io_read) {
    201            FD_SET ((SOCKET)node->pfd.fd, &rfds);
    202        }
    203        if (node->io_write) {
    204            FD_SET ((SOCKET)node->pfd.fd, &wfds);
    205        }
    206    }
    207
    208    if (select(0, &rfds, &wfds, NULL, &tv0) > 0) {
    209        QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
    210            node->pfd.revents = 0;
    211            if (FD_ISSET(node->pfd.fd, &rfds)) {
    212                node->pfd.revents |= G_IO_IN;
    213                have_select_revents = true;
    214            }
    215
    216            if (FD_ISSET(node->pfd.fd, &wfds)) {
    217                node->pfd.revents |= G_IO_OUT;
    218                have_select_revents = true;
    219            }
    220        }
    221    }
    222
    223    qemu_lockcnt_dec(&ctx->list_lock);
    224    return have_select_revents;
    225}
    226
    227bool aio_pending(AioContext *ctx)
    228{
    229    AioHandler *node;
    230    bool result = false;
    231
    232    /*
    233     * We have to walk very carefully in case aio_set_fd_handler is
    234     * called while we're walking.
    235     */
    236    qemu_lockcnt_inc(&ctx->list_lock);
    237    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
    238        if (node->pfd.revents && node->io_notify) {
    239            result = true;
    240            break;
    241        }
    242
    243        if ((node->pfd.revents & G_IO_IN) && node->io_read) {
    244            result = true;
    245            break;
    246        }
    247        if ((node->pfd.revents & G_IO_OUT) && node->io_write) {
    248            result = true;
    249            break;
    250        }
    251    }
    252
    253    qemu_lockcnt_dec(&ctx->list_lock);
    254    return result;
    255}
    256
    257static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event)
    258{
    259    AioHandler *node;
    260    bool progress = false;
    261    AioHandler *tmp;
    262
    263    /*
    264     * We have to walk very carefully in case aio_set_fd_handler is
    265     * called while we're walking.
    266     */
    267    QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
    268        int revents = node->pfd.revents;
    269
    270        if (!node->deleted &&
    271            (revents || event_notifier_get_handle(node->e) == event) &&
    272            node->io_notify) {
    273            node->pfd.revents = 0;
    274            node->io_notify(node->e);
    275
    276            /* aio_notify() does not count as progress */
    277            if (node->e != &ctx->notifier) {
    278                progress = true;
    279            }
    280        }
    281
    282        if (!node->deleted &&
    283            (node->io_read || node->io_write)) {
    284            node->pfd.revents = 0;
    285            if ((revents & G_IO_IN) && node->io_read) {
    286                node->io_read(node->opaque);
    287                progress = true;
    288            }
    289            if ((revents & G_IO_OUT) && node->io_write) {
    290                node->io_write(node->opaque);
    291                progress = true;
    292            }
    293
    294            /* if the next select() will return an event, we have progressed */
    295            if (event == event_notifier_get_handle(&ctx->notifier)) {
    296                WSANETWORKEVENTS ev;
    297                WSAEnumNetworkEvents(node->pfd.fd, event, &ev);
    298                if (ev.lNetworkEvents) {
    299                    progress = true;
    300                }
    301            }
    302        }
    303
    304        if (node->deleted) {
    305            if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
    306                QLIST_REMOVE(node, node);
    307                g_free(node);
    308                qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
    309            }
    310        }
    311    }
    312
    313    return progress;
    314}
    315
    316void aio_dispatch(AioContext *ctx)
    317{
    318    qemu_lockcnt_inc(&ctx->list_lock);
    319    aio_bh_poll(ctx);
    320    aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE);
    321    qemu_lockcnt_dec(&ctx->list_lock);
    322    timerlistgroup_run_timers(&ctx->tlg);
    323}
    324
    325bool aio_poll(AioContext *ctx, bool blocking)
    326{
    327    AioHandler *node;
    328    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
    329    bool progress, have_select_revents, first;
    330    int count;
    331    int timeout;
    332
    333    /*
    334     * There cannot be two concurrent aio_poll calls for the same AioContext (or
    335     * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
    336     * We rely on this below to avoid slow locked accesses to ctx->notify_me.
    337     *
    338     * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
    339     * is special in that it runs in the main thread, but that thread's context
    340     * is qemu_aio_context.
    341     */
    342    assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
    343                                      qemu_get_aio_context() : ctx));
    344    progress = false;
    345
    346    /* aio_notify can avoid the expensive event_notifier_set if
    347     * everything (file descriptors, bottom halves, timers) will
    348     * be re-evaluated before the next blocking poll().  This is
    349     * already true when aio_poll is called with blocking == false;
    350     * if blocking == true, it is only true after poll() returns,
    351     * so disable the optimization now.
    352     */
    353    if (blocking) {
    354        qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
    355        /*
    356         * Write ctx->notify_me before computing the timeout
    357         * (reading bottom half flags, etc.).  Pairs with
    358         * smp_mb in aio_notify().
    359         */
    360        smp_mb();
    361    }
    362
    363    qemu_lockcnt_inc(&ctx->list_lock);
    364    have_select_revents = aio_prepare(ctx);
    365
    366    /* fill fd sets */
    367    count = 0;
    368    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
    369        if (!node->deleted && node->io_notify
    370            && aio_node_check(ctx, node->is_external)) {
    371            events[count++] = event_notifier_get_handle(node->e);
    372        }
    373    }
    374
    375    first = true;
    376
    377    /* ctx->notifier is always registered.  */
    378    assert(count > 0);
    379
    380    /* Multiple iterations, all of them non-blocking except the first,
    381     * may be necessary to process all pending events.  After the first
    382     * WaitForMultipleObjects call ctx->notify_me will be decremented.
    383     */
    384    do {
    385        HANDLE event;
    386        int ret;
    387
    388        timeout = blocking && !have_select_revents
    389            ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0;
    390        ret = WaitForMultipleObjects(count, events, FALSE, timeout);
    391        if (blocking) {
    392            assert(first);
    393            qatomic_store_release(&ctx->notify_me,
    394                                  qatomic_read(&ctx->notify_me) - 2);
    395            aio_notify_accept(ctx);
    396        }
    397
    398        if (first) {
    399            progress |= aio_bh_poll(ctx);
    400            first = false;
    401        }
    402
    403        /* if we have any signaled events, dispatch event */
    404        event = NULL;
    405        if ((DWORD) (ret - WAIT_OBJECT_0) < count) {
    406            event = events[ret - WAIT_OBJECT_0];
    407            events[ret - WAIT_OBJECT_0] = events[--count];
    408        } else if (!have_select_revents) {
    409            break;
    410        }
    411
    412        have_select_revents = false;
    413        blocking = false;
    414
    415        progress |= aio_dispatch_handlers(ctx, event);
    416    } while (count > 0);
    417
    418    qemu_lockcnt_dec(&ctx->list_lock);
    419
    420    progress |= timerlistgroup_run_timers(&ctx->tlg);
    421    return progress;
    422}
    423
    424void aio_context_setup(AioContext *ctx)
    425{
    426}
    427
    428void aio_context_destroy(AioContext *ctx)
    429{
    430}
    431
    432void aio_context_use_g_source(AioContext *ctx)
    433{
    434}
    435
    436void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
    437                                 int64_t grow, int64_t shrink, Error **errp)
    438{
    439    if (max_ns) {
    440        error_setg(errp, "AioContext polling is not implemented on Windows");
    441    }
    442}
    443
    444void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
    445                                Error **errp)
    446{
    447}