cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

vhost-user-gpu.c (17576B)


      1/*
      2 * vhost-user GPU Device
      3 *
      4 * Copyright Red Hat, Inc. 2018
      5 *
      6 * Authors:
      7 *     Marc-André Lureau <marcandre.lureau@redhat.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     10 * See the COPYING file in the top-level directory.
     11 */
     12
     13#include "qemu/osdep.h"
     14#include "hw/qdev-properties.h"
     15#include "hw/virtio/virtio-gpu.h"
     16#include "chardev/char-fe.h"
     17#include "qapi/error.h"
     18#include "migration/blocker.h"
     19
     20typedef enum VhostUserGpuRequest {
     21    VHOST_USER_GPU_NONE = 0,
     22    VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
     23    VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
     24    VHOST_USER_GPU_GET_DISPLAY_INFO,
     25    VHOST_USER_GPU_CURSOR_POS,
     26    VHOST_USER_GPU_CURSOR_POS_HIDE,
     27    VHOST_USER_GPU_CURSOR_UPDATE,
     28    VHOST_USER_GPU_SCANOUT,
     29    VHOST_USER_GPU_UPDATE,
     30    VHOST_USER_GPU_DMABUF_SCANOUT,
     31    VHOST_USER_GPU_DMABUF_UPDATE,
     32} VhostUserGpuRequest;
     33
     34typedef struct VhostUserGpuDisplayInfoReply {
     35    struct virtio_gpu_resp_display_info info;
     36} VhostUserGpuDisplayInfoReply;
     37
     38typedef struct VhostUserGpuCursorPos {
     39    uint32_t scanout_id;
     40    uint32_t x;
     41    uint32_t y;
     42} QEMU_PACKED VhostUserGpuCursorPos;
     43
     44typedef struct VhostUserGpuCursorUpdate {
     45    VhostUserGpuCursorPos pos;
     46    uint32_t hot_x;
     47    uint32_t hot_y;
     48    uint32_t data[64 * 64];
     49} QEMU_PACKED VhostUserGpuCursorUpdate;
     50
     51typedef struct VhostUserGpuScanout {
     52    uint32_t scanout_id;
     53    uint32_t width;
     54    uint32_t height;
     55} QEMU_PACKED VhostUserGpuScanout;
     56
     57typedef struct VhostUserGpuUpdate {
     58    uint32_t scanout_id;
     59    uint32_t x;
     60    uint32_t y;
     61    uint32_t width;
     62    uint32_t height;
     63    uint8_t data[];
     64} QEMU_PACKED VhostUserGpuUpdate;
     65
     66typedef struct VhostUserGpuDMABUFScanout {
     67    uint32_t scanout_id;
     68    uint32_t x;
     69    uint32_t y;
     70    uint32_t width;
     71    uint32_t height;
     72    uint32_t fd_width;
     73    uint32_t fd_height;
     74    uint32_t fd_stride;
     75    uint32_t fd_flags;
     76    int fd_drm_fourcc;
     77} QEMU_PACKED VhostUserGpuDMABUFScanout;
     78
     79typedef struct VhostUserGpuMsg {
     80    uint32_t request; /* VhostUserGpuRequest */
     81    uint32_t flags;
     82    uint32_t size; /* the following payload size */
     83    union {
     84        VhostUserGpuCursorPos cursor_pos;
     85        VhostUserGpuCursorUpdate cursor_update;
     86        VhostUserGpuScanout scanout;
     87        VhostUserGpuUpdate update;
     88        VhostUserGpuDMABUFScanout dmabuf_scanout;
     89        struct virtio_gpu_resp_display_info display_info;
     90        uint64_t u64;
     91    } payload;
     92} QEMU_PACKED VhostUserGpuMsg;
     93
     94static VhostUserGpuMsg m __attribute__ ((unused));
     95#define VHOST_USER_GPU_HDR_SIZE \
     96    (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
     97
     98#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
     99
    100static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
    101
    102static void
    103vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
    104{
    105    VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
    106    struct virtio_gpu_scanout *s;
    107
    108    if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
    109        return;
    110    }
    111    s = &g->parent_obj.scanout[pos->scanout_id];
    112
    113    if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
    114        VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
    115        if (!s->current_cursor) {
    116            s->current_cursor = cursor_alloc(64, 64);
    117        }
    118
    119        s->current_cursor->hot_x = up->hot_x;
    120        s->current_cursor->hot_y = up->hot_y;
    121
    122        memcpy(s->current_cursor->data, up->data,
    123               64 * 64 * sizeof(uint32_t));
    124
    125        dpy_cursor_define(s->con, s->current_cursor);
    126    }
    127
    128    dpy_mouse_set(s->con, pos->x, pos->y,
    129                  msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
    130}
    131
    132static void
    133vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
    134{
    135    qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
    136                      VHOST_USER_GPU_HDR_SIZE + msg->size);
    137}
    138
    139static void
    140vhost_user_gpu_unblock(VhostUserGPU *g)
    141{
    142    VhostUserGpuMsg msg = {
    143        .request = VHOST_USER_GPU_DMABUF_UPDATE,
    144        .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    145    };
    146
    147    vhost_user_gpu_send_msg(g, &msg);
    148}
    149
    150static void
    151vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
    152{
    153    QemuConsole *con = NULL;
    154    struct virtio_gpu_scanout *s;
    155
    156    switch (msg->request) {
    157    case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
    158        VhostUserGpuMsg reply = {
    159            .request = msg->request,
    160            .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    161            .size = sizeof(uint64_t),
    162        };
    163
    164        vhost_user_gpu_send_msg(g, &reply);
    165        break;
    166    }
    167    case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
    168        break;
    169    }
    170    case VHOST_USER_GPU_GET_DISPLAY_INFO: {
    171        struct virtio_gpu_resp_display_info display_info = { {} };
    172        VhostUserGpuMsg reply = {
    173            .request = msg->request,
    174            .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
    175            .size = sizeof(struct virtio_gpu_resp_display_info),
    176        };
    177
    178        display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
    179        virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
    180        memcpy(&reply.payload.display_info, &display_info,
    181               sizeof(display_info));
    182        vhost_user_gpu_send_msg(g, &reply);
    183        break;
    184    }
    185    case VHOST_USER_GPU_SCANOUT: {
    186        VhostUserGpuScanout *m = &msg->payload.scanout;
    187
    188        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    189            return;
    190        }
    191
    192        g->parent_obj.enable = 1;
    193        s = &g->parent_obj.scanout[m->scanout_id];
    194        con = s->con;
    195
    196        if (m->width == 0) {
    197            dpy_gfx_replace_surface(con, NULL);
    198        } else {
    199            s->ds = qemu_create_displaysurface(m->width, m->height);
    200            /* replace surface on next update */
    201        }
    202
    203        break;
    204    }
    205    case VHOST_USER_GPU_DMABUF_SCANOUT: {
    206        VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
    207        int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
    208        QemuDmaBuf *dmabuf;
    209
    210        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    211            error_report("invalid scanout: %d", m->scanout_id);
    212            if (fd >= 0) {
    213                close(fd);
    214            }
    215            break;
    216        }
    217
    218        g->parent_obj.enable = 1;
    219        con = g->parent_obj.scanout[m->scanout_id].con;
    220        dmabuf = &g->dmabuf[m->scanout_id];
    221        if (dmabuf->fd >= 0) {
    222            close(dmabuf->fd);
    223            dmabuf->fd = -1;
    224        }
    225        dpy_gl_release_dmabuf(con, dmabuf);
    226        if (fd == -1) {
    227            dpy_gl_scanout_disable(con);
    228            break;
    229        }
    230        *dmabuf = (QemuDmaBuf) {
    231            .fd = fd,
    232            .width = m->fd_width,
    233            .height = m->fd_height,
    234            .stride = m->fd_stride,
    235            .fourcc = m->fd_drm_fourcc,
    236            .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
    237        };
    238        dpy_gl_scanout_dmabuf(con, dmabuf);
    239        break;
    240    }
    241    case VHOST_USER_GPU_DMABUF_UPDATE: {
    242        VhostUserGpuUpdate *m = &msg->payload.update;
    243
    244        if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
    245            !g->parent_obj.scanout[m->scanout_id].con) {
    246            error_report("invalid scanout update: %d", m->scanout_id);
    247            vhost_user_gpu_unblock(g);
    248            break;
    249        }
    250
    251        con = g->parent_obj.scanout[m->scanout_id].con;
    252        if (!console_has_gl(con)) {
    253            error_report("console doesn't support GL!");
    254            vhost_user_gpu_unblock(g);
    255            break;
    256        }
    257        dpy_gl_update(con, m->x, m->y, m->width, m->height);
    258        g->backend_blocked = true;
    259        break;
    260    }
    261    case VHOST_USER_GPU_UPDATE: {
    262        VhostUserGpuUpdate *m = &msg->payload.update;
    263
    264        if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
    265            break;
    266        }
    267        s = &g->parent_obj.scanout[m->scanout_id];
    268        con = s->con;
    269        pixman_image_t *image =
    270            pixman_image_create_bits(PIXMAN_x8r8g8b8,
    271                                     m->width,
    272                                     m->height,
    273                                     (uint32_t *)m->data,
    274                                     m->width * 4);
    275
    276        pixman_image_composite(PIXMAN_OP_SRC,
    277                               image, NULL, s->ds->image,
    278                               0, 0, 0, 0, m->x, m->y, m->width, m->height);
    279
    280        pixman_image_unref(image);
    281        if (qemu_console_surface(con) != s->ds) {
    282            dpy_gfx_replace_surface(con, s->ds);
    283        } else {
    284            dpy_gfx_update(con, m->x, m->y, m->width, m->height);
    285        }
    286        break;
    287    }
    288    default:
    289        g_warning("unhandled message %d %d", msg->request, msg->size);
    290    }
    291
    292    if (con && qemu_console_is_gl_blocked(con)) {
    293        vhost_user_gpu_update_blocked(g, true);
    294    }
    295}
    296
    297static void
    298vhost_user_gpu_chr_read(void *opaque)
    299{
    300    VhostUserGPU *g = opaque;
    301    VhostUserGpuMsg *msg = NULL;
    302    VhostUserGpuRequest request;
    303    uint32_t size, flags;
    304    int r;
    305
    306    r = qemu_chr_fe_read_all(&g->vhost_chr,
    307                             (uint8_t *)&request, sizeof(uint32_t));
    308    if (r != sizeof(uint32_t)) {
    309        error_report("failed to read msg header: %d, %d", r, errno);
    310        goto end;
    311    }
    312
    313    r = qemu_chr_fe_read_all(&g->vhost_chr,
    314                             (uint8_t *)&flags, sizeof(uint32_t));
    315    if (r != sizeof(uint32_t)) {
    316        error_report("failed to read msg flags");
    317        goto end;
    318    }
    319
    320    r = qemu_chr_fe_read_all(&g->vhost_chr,
    321                             (uint8_t *)&size, sizeof(uint32_t));
    322    if (r != sizeof(uint32_t)) {
    323        error_report("failed to read msg size");
    324        goto end;
    325    }
    326
    327    msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
    328
    329    r = qemu_chr_fe_read_all(&g->vhost_chr,
    330                             (uint8_t *)&msg->payload, size);
    331    if (r != size) {
    332        error_report("failed to read msg payload %d != %d", r, size);
    333        goto end;
    334    }
    335
    336    msg->request = request;
    337    msg->flags = size;
    338    msg->size = size;
    339
    340    if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
    341        request == VHOST_USER_GPU_CURSOR_POS ||
    342        request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
    343        vhost_user_gpu_handle_cursor(g, msg);
    344    } else {
    345        vhost_user_gpu_handle_display(g, msg);
    346    }
    347
    348end:
    349    g_free(msg);
    350}
    351
    352static void
    353vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
    354{
    355    qemu_set_fd_handler(g->vhost_gpu_fd,
    356                        blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
    357}
    358
    359static void
    360vhost_user_gpu_gl_flushed(VirtIOGPUBase *b)
    361{
    362    VhostUserGPU *g = VHOST_USER_GPU(b);
    363
    364    if (g->backend_blocked) {
    365        vhost_user_gpu_unblock(VHOST_USER_GPU(g));
    366        g->backend_blocked = false;
    367    }
    368
    369    vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
    370}
    371
    372static bool
    373vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
    374{
    375    Chardev *chr;
    376    int sv[2];
    377
    378    if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
    379        error_setg_errno(errp, errno, "socketpair() failed");
    380        return false;
    381    }
    382
    383    chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
    384    if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
    385        error_setg(errp, "Failed to make socket chardev");
    386        goto err;
    387    }
    388    if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
    389        goto err;
    390    }
    391    if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
    392        error_setg(errp, "Failed to set vhost-user-gpu socket");
    393        qemu_chr_fe_deinit(&g->vhost_chr, false);
    394        goto err;
    395    }
    396
    397    g->vhost_gpu_fd = sv[0];
    398    vhost_user_gpu_update_blocked(g, false);
    399    close(sv[1]);
    400    return true;
    401
    402err:
    403    close(sv[0]);
    404    close(sv[1]);
    405    if (chr) {
    406        object_unref(OBJECT(chr));
    407    }
    408    return false;
    409}
    410
    411static void
    412vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
    413{
    414    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    415    VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
    416    struct virtio_gpu_config *vgconfig =
    417        (struct virtio_gpu_config *)config_data;
    418    Error *local_err = NULL;
    419    int ret;
    420
    421    memset(config_data, 0, sizeof(struct virtio_gpu_config));
    422
    423    ret = vhost_dev_get_config(&g->vhost->dev,
    424                               config_data, sizeof(struct virtio_gpu_config),
    425                               &local_err);
    426    if (ret) {
    427        error_report_err(local_err);
    428        return;
    429    }
    430
    431    /* those fields are managed by qemu */
    432    vgconfig->num_scanouts = b->virtio_config.num_scanouts;
    433    vgconfig->events_read = b->virtio_config.events_read;
    434    vgconfig->events_clear = b->virtio_config.events_clear;
    435}
    436
    437static void
    438vhost_user_gpu_set_config(VirtIODevice *vdev,
    439                          const uint8_t *config_data)
    440{
    441    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    442    VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
    443    const struct virtio_gpu_config *vgconfig =
    444        (const struct virtio_gpu_config *)config_data;
    445    int ret;
    446
    447    if (vgconfig->events_clear) {
    448        b->virtio_config.events_read &= ~vgconfig->events_clear;
    449    }
    450
    451    ret = vhost_dev_set_config(&g->vhost->dev, config_data,
    452                               0, sizeof(struct virtio_gpu_config),
    453                               VHOST_SET_CONFIG_TYPE_MASTER);
    454    if (ret) {
    455        error_report("vhost-user-gpu: set device config space failed");
    456        return;
    457    }
    458}
    459
    460static void
    461vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
    462{
    463    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    464    Error *err = NULL;
    465
    466    if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
    467        if (!vhost_user_gpu_do_set_socket(g, &err)) {
    468            error_report_err(err);
    469            return;
    470        }
    471        vhost_user_backend_start(g->vhost);
    472    } else {
    473        /* unblock any wait and stop processing */
    474        if (g->vhost_gpu_fd != -1) {
    475            vhost_user_gpu_update_blocked(g, true);
    476            qemu_chr_fe_deinit(&g->vhost_chr, true);
    477            g->vhost_gpu_fd = -1;
    478        }
    479        vhost_user_backend_stop(g->vhost);
    480    }
    481}
    482
    483static bool
    484vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
    485{
    486    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    487
    488    return vhost_virtqueue_pending(&g->vhost->dev, idx);
    489}
    490
    491static void
    492vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
    493{
    494    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    495
    496    vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
    497}
    498
    499static void
    500vhost_user_gpu_instance_init(Object *obj)
    501{
    502    VhostUserGPU *g = VHOST_USER_GPU(obj);
    503
    504    g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
    505    object_property_add_alias(obj, "chardev",
    506                              OBJECT(g->vhost), "chardev");
    507}
    508
    509static void
    510vhost_user_gpu_instance_finalize(Object *obj)
    511{
    512    VhostUserGPU *g = VHOST_USER_GPU(obj);
    513
    514    object_unref(OBJECT(g->vhost));
    515}
    516
    517static void
    518vhost_user_gpu_reset(VirtIODevice *vdev)
    519{
    520    VhostUserGPU *g = VHOST_USER_GPU(vdev);
    521
    522    virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
    523
    524    vhost_user_backend_stop(g->vhost);
    525}
    526
    527static int
    528vhost_user_gpu_config_change(struct vhost_dev *dev)
    529{
    530    error_report("vhost-user-gpu: unhandled backend config change");
    531    return -1;
    532}
    533
    534static const VhostDevConfigOps config_ops = {
    535    .vhost_dev_config_notifier = vhost_user_gpu_config_change,
    536};
    537
    538static void
    539vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
    540{
    541    VhostUserGPU *g = VHOST_USER_GPU(qdev);
    542    VirtIODevice *vdev = VIRTIO_DEVICE(g);
    543
    544    vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
    545    if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
    546        return;
    547    }
    548
    549    /* existing backend may send DMABUF, so let's add that requirement */
    550    g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED;
    551    if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
    552        g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
    553    }
    554    if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_EDID)) {
    555        g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_EDID_ENABLED;
    556    } else {
    557        error_report("EDID requested but the backend doesn't support it.");
    558        g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_EDID_ENABLED);
    559    }
    560
    561    if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
    562        return;
    563    }
    564
    565    g->vhost_gpu_fd = -1;
    566}
    567
    568static Property vhost_user_gpu_properties[] = {
    569    VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
    570    DEFINE_PROP_END_OF_LIST(),
    571};
    572
    573static void
    574vhost_user_gpu_class_init(ObjectClass *klass, void *data)
    575{
    576    DeviceClass *dc = DEVICE_CLASS(klass);
    577    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
    578    VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
    579
    580    vgc->gl_flushed = vhost_user_gpu_gl_flushed;
    581
    582    vdc->realize = vhost_user_gpu_device_realize;
    583    vdc->reset = vhost_user_gpu_reset;
    584    vdc->set_status   = vhost_user_gpu_set_status;
    585    vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
    586    vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
    587    vdc->get_config = vhost_user_gpu_get_config;
    588    vdc->set_config = vhost_user_gpu_set_config;
    589
    590    device_class_set_props(dc, vhost_user_gpu_properties);
    591}
    592
    593static const TypeInfo vhost_user_gpu_info = {
    594    .name = TYPE_VHOST_USER_GPU,
    595    .parent = TYPE_VIRTIO_GPU_BASE,
    596    .instance_size = sizeof(VhostUserGPU),
    597    .instance_init = vhost_user_gpu_instance_init,
    598    .instance_finalize = vhost_user_gpu_instance_finalize,
    599    .class_init = vhost_user_gpu_class_init,
    600};
    601module_obj(TYPE_VHOST_USER_GPU);
    602
    603static void vhost_user_gpu_register_types(void)
    604{
    605    type_register_static(&vhost_user_gpu_info);
    606}
    607
    608type_init(vhost_user_gpu_register_types)