cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

vhost_net.c (13182B)


      1/*
      2 * vhost-net support
      3 *
      4 * Copyright Red Hat, Inc. 2010
      5 *
      6 * Authors:
      7 *  Michael S. Tsirkin <mst@redhat.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2.  See
     10 * the COPYING file in the top-level directory.
     11 *
     12 * Contributions after 2012-01-13 are licensed under the terms of the
     13 * GNU GPL, version 2 or (at your option) any later version.
     14 */
     15
     16#include "qemu/osdep.h"
     17#include "net/net.h"
     18#include "net/tap.h"
     19#include "net/vhost-user.h"
     20#include "net/vhost-vdpa.h"
     21
     22#include "standard-headers/linux/vhost_types.h"
     23#include "hw/virtio/virtio-net.h"
     24#include "net/vhost_net.h"
     25#include "qapi/error.h"
     26#include "qemu/error-report.h"
     27#include "qemu/main-loop.h"
     28
     29#include <sys/socket.h>
     30#include <net/if.h>
     31#include <netinet/in.h>
     32
     33
     34#include "standard-headers/linux/virtio_ring.h"
     35#include "hw/virtio/vhost.h"
     36#include "hw/virtio/virtio-bus.h"
     37
     38
     39/* Features supported by host kernel. */
     40static const int kernel_feature_bits[] = {
     41    VIRTIO_F_NOTIFY_ON_EMPTY,
     42    VIRTIO_RING_F_INDIRECT_DESC,
     43    VIRTIO_RING_F_EVENT_IDX,
     44    VIRTIO_NET_F_MRG_RXBUF,
     45    VIRTIO_F_VERSION_1,
     46    VIRTIO_NET_F_MTU,
     47    VIRTIO_F_IOMMU_PLATFORM,
     48    VIRTIO_F_RING_PACKED,
     49    VIRTIO_NET_F_HASH_REPORT,
     50    VHOST_INVALID_FEATURE_BIT
     51};
     52
     53/* Features supported by others. */
     54static const int user_feature_bits[] = {
     55    VIRTIO_F_NOTIFY_ON_EMPTY,
     56    VIRTIO_RING_F_INDIRECT_DESC,
     57    VIRTIO_RING_F_EVENT_IDX,
     58
     59    VIRTIO_F_ANY_LAYOUT,
     60    VIRTIO_F_VERSION_1,
     61    VIRTIO_NET_F_CSUM,
     62    VIRTIO_NET_F_GUEST_CSUM,
     63    VIRTIO_NET_F_GSO,
     64    VIRTIO_NET_F_GUEST_TSO4,
     65    VIRTIO_NET_F_GUEST_TSO6,
     66    VIRTIO_NET_F_GUEST_ECN,
     67    VIRTIO_NET_F_GUEST_UFO,
     68    VIRTIO_NET_F_HOST_TSO4,
     69    VIRTIO_NET_F_HOST_TSO6,
     70    VIRTIO_NET_F_HOST_ECN,
     71    VIRTIO_NET_F_HOST_UFO,
     72    VIRTIO_NET_F_MRG_RXBUF,
     73    VIRTIO_NET_F_MTU,
     74    VIRTIO_F_IOMMU_PLATFORM,
     75    VIRTIO_F_RING_PACKED,
     76    VIRTIO_NET_F_RSS,
     77    VIRTIO_NET_F_HASH_REPORT,
     78
     79    /* This bit implies RARP isn't sent by QEMU out of band */
     80    VIRTIO_NET_F_GUEST_ANNOUNCE,
     81
     82    VIRTIO_NET_F_MQ,
     83
     84    VHOST_INVALID_FEATURE_BIT
     85};
     86
     87static const int *vhost_net_get_feature_bits(struct vhost_net *net)
     88{
     89    const int *feature_bits = 0;
     90
     91    switch (net->nc->info->type) {
     92    case NET_CLIENT_DRIVER_TAP:
     93        feature_bits = kernel_feature_bits;
     94        break;
     95    case NET_CLIENT_DRIVER_VHOST_USER:
     96        feature_bits = user_feature_bits;
     97        break;
     98#ifdef CONFIG_VHOST_NET_VDPA
     99    case NET_CLIENT_DRIVER_VHOST_VDPA:
    100        feature_bits = vdpa_feature_bits;
    101        break;
    102#endif
    103    default:
    104        error_report("Feature bits not defined for this type: %d",
    105                net->nc->info->type);
    106        break;
    107    }
    108
    109    return feature_bits;
    110}
    111
    112uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features)
    113{
    114    return vhost_get_features(&net->dev, vhost_net_get_feature_bits(net),
    115            features);
    116}
    117int vhost_net_get_config(struct vhost_net *net,  uint8_t *config,
    118                         uint32_t config_len)
    119{
    120    return vhost_dev_get_config(&net->dev, config, config_len, NULL);
    121}
    122int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
    123                         uint32_t offset, uint32_t size, uint32_t flags)
    124{
    125    return vhost_dev_set_config(&net->dev, data, offset, size, flags);
    126}
    127
    128void vhost_net_ack_features(struct vhost_net *net, uint64_t features)
    129{
    130    net->dev.acked_features = net->dev.backend_features;
    131    vhost_ack_features(&net->dev, vhost_net_get_feature_bits(net), features);
    132}
    133
    134uint64_t vhost_net_get_max_queues(VHostNetState *net)
    135{
    136    return net->dev.max_queues;
    137}
    138
    139uint64_t vhost_net_get_acked_features(VHostNetState *net)
    140{
    141    return net->dev.acked_features;
    142}
    143
    144static int vhost_net_get_fd(NetClientState *backend)
    145{
    146    switch (backend->info->type) {
    147    case NET_CLIENT_DRIVER_TAP:
    148        return tap_get_fd(backend);
    149    default:
    150        fprintf(stderr, "vhost-net requires tap backend\n");
    151        return -ENOSYS;
    152    }
    153}
    154
    155struct vhost_net *vhost_net_init(VhostNetOptions *options)
    156{
    157    int r;
    158    bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL;
    159    struct vhost_net *net = g_new0(struct vhost_net, 1);
    160    uint64_t features = 0;
    161    Error *local_err = NULL;
    162
    163    if (!options->net_backend) {
    164        fprintf(stderr, "vhost-net requires net backend to be setup\n");
    165        goto fail;
    166    }
    167    net->nc = options->net_backend;
    168    net->dev.nvqs = options->nvqs;
    169
    170    net->dev.max_queues = 1;
    171    net->dev.vqs = net->vqs;
    172
    173    if (backend_kernel) {
    174        r = vhost_net_get_fd(options->net_backend);
    175        if (r < 0) {
    176            goto fail;
    177        }
    178        net->dev.backend_features = qemu_has_vnet_hdr(options->net_backend)
    179            ? 0 : (1ULL << VHOST_NET_F_VIRTIO_NET_HDR);
    180        net->backend = r;
    181        net->dev.protocol_features = 0;
    182    } else {
    183        net->dev.backend_features = 0;
    184        net->dev.protocol_features = 0;
    185        net->backend = -1;
    186
    187        /* vhost-user needs vq_index to initiate a specific queue pair */
    188        net->dev.vq_index = net->nc->queue_index * net->dev.nvqs;
    189    }
    190
    191    r = vhost_dev_init(&net->dev, options->opaque,
    192                       options->backend_type, options->busyloop_timeout,
    193                       &local_err);
    194    if (r < 0) {
    195        error_report_err(local_err);
    196        goto fail;
    197    }
    198    if (backend_kernel) {
    199        if (!qemu_has_vnet_hdr_len(options->net_backend,
    200                               sizeof(struct virtio_net_hdr_mrg_rxbuf))) {
    201            net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF);
    202        }
    203        if (~net->dev.features & net->dev.backend_features) {
    204            fprintf(stderr, "vhost lacks feature mask %" PRIu64
    205                   " for backend\n",
    206                   (uint64_t)(~net->dev.features & net->dev.backend_features));
    207            goto fail;
    208        }
    209    }
    210
    211    /* Set sane init value. Override when guest acks. */
    212#ifdef CONFIG_VHOST_NET_USER
    213    if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
    214        features = vhost_user_get_acked_features(net->nc);
    215        if (~net->dev.features & features) {
    216            fprintf(stderr, "vhost lacks feature mask %" PRIu64
    217                    " for backend\n",
    218                    (uint64_t)(~net->dev.features & features));
    219            goto fail;
    220        }
    221    }
    222#endif
    223
    224    vhost_net_ack_features(net, features);
    225
    226    return net;
    227
    228fail:
    229    vhost_dev_cleanup(&net->dev);
    230    g_free(net);
    231    return NULL;
    232}
    233
    234static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index)
    235{
    236    net->dev.vq_index = vq_index;
    237}
    238
    239static int vhost_net_start_one(struct vhost_net *net,
    240                               VirtIODevice *dev)
    241{
    242    struct vhost_vring_file file = { };
    243    int r;
    244
    245    r = vhost_dev_enable_notifiers(&net->dev, dev);
    246    if (r < 0) {
    247        goto fail_notifiers;
    248    }
    249
    250    r = vhost_dev_start(&net->dev, dev);
    251    if (r < 0) {
    252        goto fail_start;
    253    }
    254
    255    if (net->nc->info->poll) {
    256        net->nc->info->poll(net->nc, false);
    257    }
    258
    259    if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
    260        qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
    261        file.fd = net->backend;
    262        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
    263            if (!virtio_queue_enabled(dev, net->dev.vq_index +
    264                                      file.index)) {
    265                /* Queue might not be ready for start */
    266                continue;
    267            }
    268            r = vhost_net_set_backend(&net->dev, &file);
    269            if (r < 0) {
    270                r = -errno;
    271                goto fail;
    272            }
    273        }
    274    }
    275    return 0;
    276fail:
    277    file.fd = -1;
    278    if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
    279        while (file.index-- > 0) {
    280            if (!virtio_queue_enabled(dev, net->dev.vq_index +
    281                                      file.index)) {
    282                /* Queue might not be ready for start */
    283                continue;
    284            }
    285            int r = vhost_net_set_backend(&net->dev, &file);
    286            assert(r >= 0);
    287        }
    288    }
    289    if (net->nc->info->poll) {
    290        net->nc->info->poll(net->nc, true);
    291    }
    292    vhost_dev_stop(&net->dev, dev);
    293fail_start:
    294    vhost_dev_disable_notifiers(&net->dev, dev);
    295fail_notifiers:
    296    return r;
    297}
    298
    299static void vhost_net_stop_one(struct vhost_net *net,
    300                               VirtIODevice *dev)
    301{
    302    struct vhost_vring_file file = { .fd = -1 };
    303
    304    if (net->nc->info->type == NET_CLIENT_DRIVER_TAP) {
    305        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
    306            int r = vhost_net_set_backend(&net->dev, &file);
    307            assert(r >= 0);
    308        }
    309    }
    310    if (net->nc->info->poll) {
    311        net->nc->info->poll(net->nc, true);
    312    }
    313    vhost_dev_stop(&net->dev, dev);
    314    vhost_dev_disable_notifiers(&net->dev, dev);
    315}
    316
    317int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
    318                    int total_queues)
    319{
    320    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
    321    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    322    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
    323    struct vhost_net *net;
    324    int r, e, i;
    325    NetClientState *peer;
    326
    327    if (!k->set_guest_notifiers) {
    328        error_report("binding does not support guest notifiers");
    329        return -ENOSYS;
    330    }
    331
    332    for (i = 0; i < total_queues; i++) {
    333
    334        peer = qemu_get_peer(ncs, i);
    335        net = get_vhost_net(peer);
    336        vhost_net_set_vq_index(net, i * 2);
    337
    338        /* Suppress the masking guest notifiers on vhost user
    339         * because vhost user doesn't interrupt masking/unmasking
    340         * properly.
    341         */
    342        if (net->nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
    343            dev->use_guest_notifier_mask = false;
    344        }
    345     }
    346
    347    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
    348    if (r < 0) {
    349        error_report("Error binding guest notifier: %d", -r);
    350        goto err;
    351    }
    352
    353    for (i = 0; i < total_queues; i++) {
    354        peer = qemu_get_peer(ncs, i);
    355        r = vhost_net_start_one(get_vhost_net(peer), dev);
    356
    357        if (r < 0) {
    358            goto err_start;
    359        }
    360
    361        if (peer->vring_enable) {
    362            /* restore vring enable state */
    363            r = vhost_set_vring_enable(peer, peer->vring_enable);
    364
    365            if (r < 0) {
    366                goto err_start;
    367            }
    368        }
    369    }
    370
    371    return 0;
    372
    373err_start:
    374    while (--i >= 0) {
    375        peer = qemu_get_peer(ncs , i);
    376        vhost_net_stop_one(get_vhost_net(peer), dev);
    377    }
    378    e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
    379    if (e < 0) {
    380        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
    381        fflush(stderr);
    382    }
    383err:
    384    return r;
    385}
    386
    387void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
    388                    int total_queues)
    389{
    390    BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
    391    VirtioBusState *vbus = VIRTIO_BUS(qbus);
    392    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
    393    int i, r;
    394
    395    for (i = 0; i < total_queues; i++) {
    396        vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev);
    397    }
    398
    399    r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false);
    400    if (r < 0) {
    401        fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r);
    402        fflush(stderr);
    403    }
    404    assert(r >= 0);
    405}
    406
    407void vhost_net_cleanup(struct vhost_net *net)
    408{
    409    vhost_dev_cleanup(&net->dev);
    410}
    411
    412int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr)
    413{
    414    const VhostOps *vhost_ops = net->dev.vhost_ops;
    415
    416    assert(vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
    417    assert(vhost_ops->vhost_migration_done);
    418
    419    return vhost_ops->vhost_migration_done(&net->dev, mac_addr);
    420}
    421
    422bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
    423{
    424    return vhost_virtqueue_pending(&net->dev, idx);
    425}
    426
    427void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
    428                              int idx, bool mask)
    429{
    430    vhost_virtqueue_mask(&net->dev, dev, idx, mask);
    431}
    432
    433VHostNetState *get_vhost_net(NetClientState *nc)
    434{
    435    VHostNetState *vhost_net = 0;
    436
    437    if (!nc) {
    438        return 0;
    439    }
    440
    441    switch (nc->info->type) {
    442    case NET_CLIENT_DRIVER_TAP:
    443        vhost_net = tap_get_vhost_net(nc);
    444        break;
    445#ifdef CONFIG_VHOST_NET_USER
    446    case NET_CLIENT_DRIVER_VHOST_USER:
    447        vhost_net = vhost_user_get_vhost_net(nc);
    448        assert(vhost_net);
    449        break;
    450#endif
    451#ifdef CONFIG_VHOST_NET_VDPA
    452    case NET_CLIENT_DRIVER_VHOST_VDPA:
    453        vhost_net = vhost_vdpa_get_vhost_net(nc);
    454        assert(vhost_net);
    455        break;
    456#endif
    457    default:
    458        break;
    459    }
    460
    461    return vhost_net;
    462}
    463
    464int vhost_set_vring_enable(NetClientState *nc, int enable)
    465{
    466    VHostNetState *net = get_vhost_net(nc);
    467    const VhostOps *vhost_ops = net->dev.vhost_ops;
    468
    469    nc->vring_enable = enable;
    470
    471    if (vhost_ops && vhost_ops->vhost_set_vring_enable) {
    472        return vhost_ops->vhost_set_vring_enable(&net->dev, enable);
    473    }
    474
    475    return 0;
    476}
    477
    478int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu)
    479{
    480    const VhostOps *vhost_ops = net->dev.vhost_ops;
    481
    482    if (!vhost_ops->vhost_net_set_mtu) {
    483        return 0;
    484    }
    485
    486    return vhost_ops->vhost_net_set_mtu(&net->dev, mtu);
    487}