cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

vhost-backend.c (12613B)


      1/*
      2 * vhost-backend
      3 *
      4 * Copyright (c) 2013 Virtual Open Systems Sarl.
      5 *
      6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
      7 * See the COPYING file in the top-level directory.
      8 *
      9 */
     10
     11#include "qemu/osdep.h"
     12#include "hw/virtio/vhost.h"
     13#include "hw/virtio/vhost-backend.h"
     14#include "qemu/error-report.h"
     15#include "qemu/main-loop.h"
     16#include "standard-headers/linux/vhost_types.h"
     17
     18#include "hw/virtio/vhost-vdpa.h"
     19#ifdef CONFIG_VHOST_KERNEL
     20#include <linux/vhost.h>
     21#include <sys/ioctl.h>
     22
     23static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request,
     24                             void *arg)
     25{
     26    int fd = (uintptr_t) dev->opaque;
     27    int ret;
     28
     29    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     30
     31    ret = ioctl(fd, request, arg);
     32    return ret < 0 ? -errno : ret;
     33}
     34
     35static int vhost_kernel_init(struct vhost_dev *dev, void *opaque, Error **errp)
     36{
     37    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     38
     39    dev->opaque = opaque;
     40
     41    return 0;
     42}
     43
     44static int vhost_kernel_cleanup(struct vhost_dev *dev)
     45{
     46    int fd = (uintptr_t) dev->opaque;
     47
     48    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_KERNEL);
     49
     50    return close(fd);
     51}
     52
     53static int vhost_kernel_memslots_limit(struct vhost_dev *dev)
     54{
     55    int limit = 64;
     56    char *s;
     57
     58    if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions",
     59                            &s, NULL, NULL)) {
     60        uint64_t val = g_ascii_strtoull(s, NULL, 10);
     61        if (!((val == G_MAXUINT64 || !val) && errno)) {
     62            g_free(s);
     63            return val;
     64        }
     65        error_report("ignoring invalid max_mem_regions value in vhost module:"
     66                     " %s", s);
     67    }
     68    g_free(s);
     69    return limit;
     70}
     71
     72static int vhost_kernel_net_set_backend(struct vhost_dev *dev,
     73                                        struct vhost_vring_file *file)
     74{
     75    return vhost_kernel_call(dev, VHOST_NET_SET_BACKEND, file);
     76}
     77
     78static int vhost_kernel_scsi_set_endpoint(struct vhost_dev *dev,
     79                                          struct vhost_scsi_target *target)
     80{
     81    return vhost_kernel_call(dev, VHOST_SCSI_SET_ENDPOINT, target);
     82}
     83
     84static int vhost_kernel_scsi_clear_endpoint(struct vhost_dev *dev,
     85                                            struct vhost_scsi_target *target)
     86{
     87    return vhost_kernel_call(dev, VHOST_SCSI_CLEAR_ENDPOINT, target);
     88}
     89
     90static int vhost_kernel_scsi_get_abi_version(struct vhost_dev *dev, int *version)
     91{
     92    return vhost_kernel_call(dev, VHOST_SCSI_GET_ABI_VERSION, version);
     93}
     94
     95static int vhost_kernel_set_log_base(struct vhost_dev *dev, uint64_t base,
     96                                     struct vhost_log *log)
     97{
     98    return vhost_kernel_call(dev, VHOST_SET_LOG_BASE, &base);
     99}
    100
    101static int vhost_kernel_set_mem_table(struct vhost_dev *dev,
    102                                      struct vhost_memory *mem)
    103{
    104    return vhost_kernel_call(dev, VHOST_SET_MEM_TABLE, mem);
    105}
    106
    107static int vhost_kernel_set_vring_addr(struct vhost_dev *dev,
    108                                       struct vhost_vring_addr *addr)
    109{
    110    return vhost_kernel_call(dev, VHOST_SET_VRING_ADDR, addr);
    111}
    112
    113static int vhost_kernel_set_vring_endian(struct vhost_dev *dev,
    114                                         struct vhost_vring_state *ring)
    115{
    116    return vhost_kernel_call(dev, VHOST_SET_VRING_ENDIAN, ring);
    117}
    118
    119static int vhost_kernel_set_vring_num(struct vhost_dev *dev,
    120                                      struct vhost_vring_state *ring)
    121{
    122    return vhost_kernel_call(dev, VHOST_SET_VRING_NUM, ring);
    123}
    124
    125static int vhost_kernel_set_vring_base(struct vhost_dev *dev,
    126                                       struct vhost_vring_state *ring)
    127{
    128    return vhost_kernel_call(dev, VHOST_SET_VRING_BASE, ring);
    129}
    130
    131static int vhost_kernel_get_vring_base(struct vhost_dev *dev,
    132                                       struct vhost_vring_state *ring)
    133{
    134    return vhost_kernel_call(dev, VHOST_GET_VRING_BASE, ring);
    135}
    136
    137static int vhost_kernel_set_vring_kick(struct vhost_dev *dev,
    138                                       struct vhost_vring_file *file)
    139{
    140    return vhost_kernel_call(dev, VHOST_SET_VRING_KICK, file);
    141}
    142
    143static int vhost_kernel_set_vring_call(struct vhost_dev *dev,
    144                                       struct vhost_vring_file *file)
    145{
    146    return vhost_kernel_call(dev, VHOST_SET_VRING_CALL, file);
    147}
    148
    149static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
    150                                                   struct vhost_vring_state *s)
    151{
    152    return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
    153}
    154
    155static int vhost_kernel_set_features(struct vhost_dev *dev,
    156                                     uint64_t features)
    157{
    158    return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features);
    159}
    160
    161static int vhost_kernel_set_backend_cap(struct vhost_dev *dev)
    162{
    163    uint64_t features;
    164    uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2;
    165    int r;
    166
    167    if (vhost_kernel_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
    168        return 0;
    169    }
    170
    171    features &= f;
    172    r = vhost_kernel_call(dev, VHOST_SET_BACKEND_FEATURES,
    173                              &features);
    174    if (r) {
    175        return 0;
    176    }
    177
    178    dev->backend_cap = features;
    179
    180    return 0;
    181}
    182
    183static int vhost_kernel_get_features(struct vhost_dev *dev,
    184                                     uint64_t *features)
    185{
    186    return vhost_kernel_call(dev, VHOST_GET_FEATURES, features);
    187}
    188
    189static int vhost_kernel_set_owner(struct vhost_dev *dev)
    190{
    191    return vhost_kernel_call(dev, VHOST_SET_OWNER, NULL);
    192}
    193
    194static int vhost_kernel_reset_device(struct vhost_dev *dev)
    195{
    196    return vhost_kernel_call(dev, VHOST_RESET_OWNER, NULL);
    197}
    198
    199static int vhost_kernel_get_vq_index(struct vhost_dev *dev, int idx)
    200{
    201    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
    202
    203    return idx - dev->vq_index;
    204}
    205
    206#ifdef CONFIG_VHOST_VSOCK
    207static int vhost_kernel_vsock_set_guest_cid(struct vhost_dev *dev,
    208                                            uint64_t guest_cid)
    209{
    210    return vhost_kernel_call(dev, VHOST_VSOCK_SET_GUEST_CID, &guest_cid);
    211}
    212
    213static int vhost_kernel_vsock_set_running(struct vhost_dev *dev, int start)
    214{
    215    return vhost_kernel_call(dev, VHOST_VSOCK_SET_RUNNING, &start);
    216}
    217#endif /* CONFIG_VHOST_VSOCK */
    218
    219static void vhost_kernel_iotlb_read(void *opaque)
    220{
    221    struct vhost_dev *dev = opaque;
    222    ssize_t len;
    223
    224    if (dev->backend_cap &
    225        (0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
    226        struct vhost_msg_v2 msg;
    227
    228        while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
    229            if (len < sizeof msg) {
    230                error_report("Wrong vhost message len: %d", (int)len);
    231                break;
    232            }
    233            if (msg.type != VHOST_IOTLB_MSG_V2) {
    234                error_report("Unknown vhost iotlb message type");
    235                break;
    236            }
    237
    238            vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
    239        }
    240    } else {
    241        struct vhost_msg msg;
    242
    243        while ((len = read((uintptr_t)dev->opaque, &msg, sizeof msg)) > 0) {
    244            if (len < sizeof msg) {
    245                error_report("Wrong vhost message len: %d", (int)len);
    246                break;
    247            }
    248            if (msg.type != VHOST_IOTLB_MSG) {
    249                error_report("Unknown vhost iotlb message type");
    250                break;
    251            }
    252
    253            vhost_backend_handle_iotlb_msg(dev, &msg.iotlb);
    254        }
    255    }
    256}
    257
    258static int vhost_kernel_send_device_iotlb_msg(struct vhost_dev *dev,
    259                                              struct vhost_iotlb_msg *imsg)
    260{
    261    if (dev->backend_cap & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)) {
    262        struct vhost_msg_v2 msg = {};
    263
    264        msg.type = VHOST_IOTLB_MSG_V2;
    265        msg.iotlb = *imsg;
    266
    267        if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
    268            error_report("Fail to update device iotlb");
    269            return -EFAULT;
    270        }
    271    } else {
    272        struct vhost_msg msg = {};
    273
    274        msg.type = VHOST_IOTLB_MSG;
    275        msg.iotlb = *imsg;
    276
    277        if (write((uintptr_t)dev->opaque, &msg, sizeof msg) != sizeof msg) {
    278            error_report("Fail to update device iotlb");
    279            return -EFAULT;
    280        }
    281    }
    282
    283    return 0;
    284}
    285
    286static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev,
    287                                           int enabled)
    288{
    289    if (enabled)
    290        qemu_set_fd_handler((uintptr_t)dev->opaque,
    291                            vhost_kernel_iotlb_read, NULL, dev);
    292    else
    293        qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL);
    294}
    295
    296const VhostOps kernel_ops = {
    297        .backend_type = VHOST_BACKEND_TYPE_KERNEL,
    298        .vhost_backend_init = vhost_kernel_init,
    299        .vhost_backend_cleanup = vhost_kernel_cleanup,
    300        .vhost_backend_memslots_limit = vhost_kernel_memslots_limit,
    301        .vhost_net_set_backend = vhost_kernel_net_set_backend,
    302        .vhost_scsi_set_endpoint = vhost_kernel_scsi_set_endpoint,
    303        .vhost_scsi_clear_endpoint = vhost_kernel_scsi_clear_endpoint,
    304        .vhost_scsi_get_abi_version = vhost_kernel_scsi_get_abi_version,
    305        .vhost_set_log_base = vhost_kernel_set_log_base,
    306        .vhost_set_mem_table = vhost_kernel_set_mem_table,
    307        .vhost_set_vring_addr = vhost_kernel_set_vring_addr,
    308        .vhost_set_vring_endian = vhost_kernel_set_vring_endian,
    309        .vhost_set_vring_num = vhost_kernel_set_vring_num,
    310        .vhost_set_vring_base = vhost_kernel_set_vring_base,
    311        .vhost_get_vring_base = vhost_kernel_get_vring_base,
    312        .vhost_set_vring_kick = vhost_kernel_set_vring_kick,
    313        .vhost_set_vring_call = vhost_kernel_set_vring_call,
    314        .vhost_set_vring_busyloop_timeout =
    315                                vhost_kernel_set_vring_busyloop_timeout,
    316        .vhost_set_features = vhost_kernel_set_features,
    317        .vhost_get_features = vhost_kernel_get_features,
    318        .vhost_set_backend_cap = vhost_kernel_set_backend_cap,
    319        .vhost_set_owner = vhost_kernel_set_owner,
    320        .vhost_reset_device = vhost_kernel_reset_device,
    321        .vhost_get_vq_index = vhost_kernel_get_vq_index,
    322#ifdef CONFIG_VHOST_VSOCK
    323        .vhost_vsock_set_guest_cid = vhost_kernel_vsock_set_guest_cid,
    324        .vhost_vsock_set_running = vhost_kernel_vsock_set_running,
    325#endif /* CONFIG_VHOST_VSOCK */
    326        .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback,
    327        .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg,
    328};
    329#endif
    330
    331int vhost_backend_update_device_iotlb(struct vhost_dev *dev,
    332                                             uint64_t iova, uint64_t uaddr,
    333                                             uint64_t len,
    334                                             IOMMUAccessFlags perm)
    335{
    336    struct vhost_iotlb_msg imsg;
    337
    338    imsg.iova =  iova;
    339    imsg.uaddr = uaddr;
    340    imsg.size = len;
    341    imsg.type = VHOST_IOTLB_UPDATE;
    342
    343    switch (perm) {
    344    case IOMMU_RO:
    345        imsg.perm = VHOST_ACCESS_RO;
    346        break;
    347    case IOMMU_WO:
    348        imsg.perm = VHOST_ACCESS_WO;
    349        break;
    350    case IOMMU_RW:
    351        imsg.perm = VHOST_ACCESS_RW;
    352        break;
    353    default:
    354        return -EINVAL;
    355    }
    356
    357    if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
    358        return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
    359
    360    return -ENODEV;
    361}
    362
    363int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
    364                                                 uint64_t iova, uint64_t len)
    365{
    366    struct vhost_iotlb_msg imsg;
    367
    368    imsg.iova = iova;
    369    imsg.size = len;
    370    imsg.type = VHOST_IOTLB_INVALIDATE;
    371
    372    if (dev->vhost_ops && dev->vhost_ops->vhost_send_device_iotlb_msg)
    373        return dev->vhost_ops->vhost_send_device_iotlb_msg(dev, &imsg);
    374
    375    return -ENODEV;
    376}
    377
    378int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
    379                                          struct vhost_iotlb_msg *imsg)
    380{
    381    int ret = 0;
    382
    383    if (unlikely(!dev->vdev)) {
    384        error_report("Unexpected IOTLB message when virtio device is stopped");
    385        return -EINVAL;
    386    }
    387
    388    switch (imsg->type) {
    389    case VHOST_IOTLB_MISS:
    390        ret = vhost_device_iotlb_miss(dev, imsg->iova,
    391                                      imsg->perm != VHOST_ACCESS_RO);
    392        break;
    393    case VHOST_IOTLB_ACCESS_FAIL:
    394        /* FIXME: report device iotlb error */
    395        error_report("Access failure IOTLB message type not supported");
    396        ret = -ENOTSUP;
    397        break;
    398    case VHOST_IOTLB_UPDATE:
    399    case VHOST_IOTLB_INVALIDATE:
    400    default:
    401        error_report("Unexpected IOTLB message type");
    402        ret = -EINVAL;
    403        break;
    404    }
    405
    406    return ret;
    407}