cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

virtio-gpu-udmabuf.c (5777B)


      1/*
      2 * Virtio GPU Device
      3 *
      4 * Copyright Red Hat, Inc. 2013-2014
      5 *
      6 * Authors:
      7 *     Dave Airlie <airlied@redhat.com>
      8 *     Gerd Hoffmann <kraxel@redhat.com>
      9 *
     10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     11 * See the COPYING file in the top-level directory.
     12 */
     13
     14#include "qemu/osdep.h"
     15#include "qemu/units.h"
     16#include "qemu-common.h"
     17#include "qemu/iov.h"
     18#include "ui/console.h"
     19#include "hw/virtio/virtio-gpu.h"
     20#include "hw/virtio/virtio-gpu-pixman.h"
     21#include "trace.h"
     22#include "exec/ramblock.h"
     23#include "sysemu/hostmem.h"
     24#include <sys/ioctl.h>
     25#include <fcntl.h>
     26#include <linux/memfd.h>
     27#include "qemu/memfd.h"
     28#include "standard-headers/linux/udmabuf.h"
     29
     30static void virtio_gpu_create_udmabuf(struct virtio_gpu_simple_resource *res)
     31{
     32    struct udmabuf_create_list *list;
     33    RAMBlock *rb;
     34    ram_addr_t offset;
     35    int udmabuf, i;
     36
     37    udmabuf = udmabuf_fd();
     38    if (udmabuf < 0) {
     39        return;
     40    }
     41
     42    list = g_malloc0(sizeof(struct udmabuf_create_list) +
     43                     sizeof(struct udmabuf_create_item) * res->iov_cnt);
     44
     45    for (i = 0; i < res->iov_cnt; i++) {
     46        rcu_read_lock();
     47        rb = qemu_ram_block_from_host(res->iov[i].iov_base, false, &offset);
     48        rcu_read_unlock();
     49
     50        if (!rb || rb->fd < 0) {
     51            g_free(list);
     52            return;
     53        }
     54
     55        list->list[i].memfd  = rb->fd;
     56        list->list[i].offset = offset;
     57        list->list[i].size   = res->iov[i].iov_len;
     58    }
     59
     60    list->count = res->iov_cnt;
     61    list->flags = UDMABUF_FLAGS_CLOEXEC;
     62
     63    res->dmabuf_fd = ioctl(udmabuf, UDMABUF_CREATE_LIST, list);
     64    if (res->dmabuf_fd < 0) {
     65        warn_report("%s: UDMABUF_CREATE_LIST: %s", __func__,
     66                    strerror(errno));
     67    }
     68    g_free(list);
     69}
     70
     71static void virtio_gpu_remap_udmabuf(struct virtio_gpu_simple_resource *res)
     72{
     73    res->remapped = mmap(NULL, res->blob_size, PROT_READ,
     74                         MAP_SHARED, res->dmabuf_fd, 0);
     75    if (res->remapped == MAP_FAILED) {
     76        warn_report("%s: dmabuf mmap failed: %s", __func__,
     77                    strerror(errno));
     78        res->remapped = NULL;
     79    }
     80}
     81
     82static void virtio_gpu_destroy_udmabuf(struct virtio_gpu_simple_resource *res)
     83{
     84    if (res->remapped) {
     85        munmap(res->remapped, res->blob_size);
     86        res->remapped = NULL;
     87    }
     88    if (res->dmabuf_fd >= 0) {
     89        close(res->dmabuf_fd);
     90        res->dmabuf_fd = -1;
     91    }
     92}
     93
     94static int find_memory_backend_type(Object *obj, void *opaque)
     95{
     96    bool *memfd_backend = opaque;
     97    int ret;
     98
     99    if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
    100        HostMemoryBackend *backend = MEMORY_BACKEND(obj);
    101        RAMBlock *rb = backend->mr.ram_block;
    102
    103        if (rb && rb->fd > 0) {
    104            ret = fcntl(rb->fd, F_GET_SEALS);
    105            if (ret > 0) {
    106                *memfd_backend = true;
    107            }
    108        }
    109    }
    110
    111    return 0;
    112}
    113
    114bool virtio_gpu_have_udmabuf(void)
    115{
    116    Object *memdev_root;
    117    int udmabuf;
    118    bool memfd_backend = false;
    119
    120    udmabuf = udmabuf_fd();
    121    if (udmabuf < 0) {
    122        return false;
    123    }
    124
    125    memdev_root = object_resolve_path("/objects", NULL);
    126    object_child_foreach(memdev_root, find_memory_backend_type, &memfd_backend);
    127
    128    return memfd_backend;
    129}
    130
    131void virtio_gpu_init_udmabuf(struct virtio_gpu_simple_resource *res)
    132{
    133    void *pdata = NULL;
    134
    135    res->dmabuf_fd = -1;
    136    if (res->iov_cnt == 1) {
    137        pdata = res->iov[0].iov_base;
    138    } else {
    139        virtio_gpu_create_udmabuf(res);
    140        if (res->dmabuf_fd < 0) {
    141            return;
    142        }
    143        virtio_gpu_remap_udmabuf(res);
    144        if (!res->remapped) {
    145            return;
    146        }
    147        pdata = res->remapped;
    148    }
    149
    150    res->blob = pdata;
    151}
    152
    153void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
    154{
    155    if (res->remapped) {
    156        virtio_gpu_destroy_udmabuf(res);
    157    }
    158}
    159
    160static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
    161{
    162    struct virtio_gpu_scanout *scanout;
    163
    164    scanout = &g->parent_obj.scanout[dmabuf->scanout_id];
    165    dpy_gl_release_dmabuf(scanout->con, &dmabuf->buf);
    166    QTAILQ_REMOVE(&g->dmabuf.bufs, dmabuf, next);
    167    g_free(dmabuf);
    168}
    169
    170static VGPUDMABuf
    171*virtio_gpu_create_dmabuf(VirtIOGPU *g,
    172                          uint32_t scanout_id,
    173                          struct virtio_gpu_simple_resource *res,
    174                          struct virtio_gpu_framebuffer *fb)
    175{
    176    VGPUDMABuf *dmabuf;
    177
    178    if (res->dmabuf_fd < 0) {
    179        return NULL;
    180    }
    181
    182    dmabuf = g_new0(VGPUDMABuf, 1);
    183    dmabuf->buf.width = fb->width;
    184    dmabuf->buf.height = fb->height;
    185    dmabuf->buf.stride = fb->stride;
    186    dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format);
    187    dmabuf->buf.fd = res->dmabuf_fd;
    188    dmabuf->buf.allow_fences = true;
    189
    190    dmabuf->scanout_id = scanout_id;
    191    QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next);
    192
    193    return dmabuf;
    194}
    195
    196int virtio_gpu_update_dmabuf(VirtIOGPU *g,
    197                             uint32_t scanout_id,
    198                             struct virtio_gpu_simple_resource *res,
    199                             struct virtio_gpu_framebuffer *fb)
    200{
    201    struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
    202    VGPUDMABuf *new_primary, *old_primary = NULL;
    203
    204    new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb);
    205    if (!new_primary) {
    206        return -EINVAL;
    207    }
    208
    209    if (g->dmabuf.primary) {
    210        old_primary = g->dmabuf.primary;
    211    }
    212
    213    g->dmabuf.primary = new_primary;
    214    qemu_console_resize(scanout->con,
    215                        new_primary->buf.width,
    216                        new_primary->buf.height);
    217    dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
    218
    219    if (old_primary) {
    220        virtio_gpu_free_dmabuf(g, old_primary);
    221    }
    222
    223    return 0;
    224}