cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

virtio-mmio.c (27748B)


      1/*
      2 * Virtio MMIO bindings
      3 *
      4 * Copyright (c) 2011 Linaro Limited
      5 *
      6 * Author:
      7 *  Peter Maydell <peter.maydell@linaro.org>
      8 *
      9 * This program is free software; you can redistribute it and/or modify
     10 * it under the terms of the GNU General Public License; either version 2
     11 * of the License, or (at your option) any later version.
     12 *
     13 * This program is distributed in the hope that it will be useful,
     14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     16 * GNU General Public License for more details.
     17 *
     18 * You should have received a copy of the GNU General Public License along
     19 * with this program; if not, see <http://www.gnu.org/licenses/>.
     20 */
     21
     22#include "qemu/osdep.h"
     23#include "standard-headers/linux/virtio_mmio.h"
     24#include "hw/irq.h"
     25#include "hw/qdev-properties.h"
     26#include "hw/sysbus.h"
     27#include "hw/virtio/virtio.h"
     28#include "migration/qemu-file-types.h"
     29#include "qemu/host-utils.h"
     30#include "qemu/module.h"
     31#include "sysemu/kvm.h"
     32#include "sysemu/replay.h"
     33#include "hw/virtio/virtio-mmio.h"
     34#include "qemu/error-report.h"
     35#include "qemu/log.h"
     36#include "trace.h"
     37
     38static bool virtio_mmio_ioeventfd_enabled(DeviceState *d)
     39{
     40    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
     41
     42    return (proxy->flags & VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD) != 0;
     43}
     44
     45static int virtio_mmio_ioeventfd_assign(DeviceState *d,
     46                                        EventNotifier *notifier,
     47                                        int n, bool assign)
     48{
     49    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
     50
     51    if (assign) {
     52        memory_region_add_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
     53                                  true, n, notifier);
     54    } else {
     55        memory_region_del_eventfd(&proxy->iomem, VIRTIO_MMIO_QUEUE_NOTIFY, 4,
     56                                  true, n, notifier);
     57    }
     58    return 0;
     59}
     60
     61static void virtio_mmio_start_ioeventfd(VirtIOMMIOProxy *proxy)
     62{
     63    virtio_bus_start_ioeventfd(&proxy->bus);
     64}
     65
     66static void virtio_mmio_stop_ioeventfd(VirtIOMMIOProxy *proxy)
     67{
     68    virtio_bus_stop_ioeventfd(&proxy->bus);
     69}
     70
     71static void virtio_mmio_soft_reset(VirtIOMMIOProxy *proxy)
     72{
     73    int i;
     74
     75    if (proxy->legacy) {
     76        return;
     77    }
     78
     79    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
     80        proxy->vqs[i].enabled = 0;
     81    }
     82}
     83
     84static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size)
     85{
     86    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
     87    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
     88
     89    trace_virtio_mmio_read(offset);
     90
     91    if (!vdev) {
     92        /* If no backend is present, we treat most registers as
     93         * read-as-zero, except for the magic number, version and
     94         * vendor ID. This is not strictly sanctioned by the virtio
     95         * spec, but it allows us to provide transports with no backend
     96         * plugged in which don't confuse Linux's virtio code: the
     97         * probe won't complain about the bad magic number, but the
     98         * device ID of zero means no backend will claim it.
     99         */
    100        switch (offset) {
    101        case VIRTIO_MMIO_MAGIC_VALUE:
    102            return VIRT_MAGIC;
    103        case VIRTIO_MMIO_VERSION:
    104            if (proxy->legacy) {
    105                return VIRT_VERSION_LEGACY;
    106            } else {
    107                return VIRT_VERSION;
    108            }
    109        case VIRTIO_MMIO_VENDOR_ID:
    110            return VIRT_VENDOR;
    111        default:
    112            return 0;
    113        }
    114    }
    115
    116    if (offset >= VIRTIO_MMIO_CONFIG) {
    117        offset -= VIRTIO_MMIO_CONFIG;
    118        if (proxy->legacy) {
    119            switch (size) {
    120            case 1:
    121                return virtio_config_readb(vdev, offset);
    122            case 2:
    123                return virtio_config_readw(vdev, offset);
    124            case 4:
    125                return virtio_config_readl(vdev, offset);
    126            default:
    127                abort();
    128            }
    129        } else {
    130            switch (size) {
    131            case 1:
    132                return virtio_config_modern_readb(vdev, offset);
    133            case 2:
    134                return virtio_config_modern_readw(vdev, offset);
    135            case 4:
    136                return virtio_config_modern_readl(vdev, offset);
    137            default:
    138                abort();
    139            }
    140        }
    141    }
    142    if (size != 4) {
    143        qemu_log_mask(LOG_GUEST_ERROR,
    144                      "%s: wrong size access to register!\n",
    145                      __func__);
    146        return 0;
    147    }
    148    switch (offset) {
    149    case VIRTIO_MMIO_MAGIC_VALUE:
    150        return VIRT_MAGIC;
    151    case VIRTIO_MMIO_VERSION:
    152        if (proxy->legacy) {
    153            return VIRT_VERSION_LEGACY;
    154        } else {
    155            return VIRT_VERSION;
    156        }
    157    case VIRTIO_MMIO_DEVICE_ID:
    158        return vdev->device_id;
    159    case VIRTIO_MMIO_VENDOR_ID:
    160        return VIRT_VENDOR;
    161    case VIRTIO_MMIO_DEVICE_FEATURES:
    162        if (proxy->legacy) {
    163            if (proxy->host_features_sel) {
    164                return 0;
    165            } else {
    166                return vdev->host_features;
    167            }
    168        } else {
    169            VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
    170            return (vdev->host_features & ~vdc->legacy_features)
    171                >> (32 * proxy->host_features_sel);
    172        }
    173    case VIRTIO_MMIO_QUEUE_NUM_MAX:
    174        if (!virtio_queue_get_num(vdev, vdev->queue_sel)) {
    175            return 0;
    176        }
    177        return VIRTQUEUE_MAX_SIZE;
    178    case VIRTIO_MMIO_QUEUE_PFN:
    179        if (!proxy->legacy) {
    180            qemu_log_mask(LOG_GUEST_ERROR,
    181                          "%s: read from legacy register (0x%"
    182                          HWADDR_PRIx ") in non-legacy mode\n",
    183                          __func__, offset);
    184            return 0;
    185        }
    186        return virtio_queue_get_addr(vdev, vdev->queue_sel)
    187            >> proxy->guest_page_shift;
    188    case VIRTIO_MMIO_QUEUE_READY:
    189        if (proxy->legacy) {
    190            qemu_log_mask(LOG_GUEST_ERROR,
    191                          "%s: read from non-legacy register (0x%"
    192                          HWADDR_PRIx ") in legacy mode\n",
    193                          __func__, offset);
    194            return 0;
    195        }
    196        return proxy->vqs[vdev->queue_sel].enabled;
    197    case VIRTIO_MMIO_INTERRUPT_STATUS:
    198        return qatomic_read(&vdev->isr);
    199    case VIRTIO_MMIO_STATUS:
    200        return vdev->status;
    201    case VIRTIO_MMIO_CONFIG_GENERATION:
    202        if (proxy->legacy) {
    203            qemu_log_mask(LOG_GUEST_ERROR,
    204                          "%s: read from non-legacy register (0x%"
    205                          HWADDR_PRIx ") in legacy mode\n",
    206                          __func__, offset);
    207            return 0;
    208        }
    209        return vdev->generation;
    210   case VIRTIO_MMIO_SHM_LEN_LOW:
    211   case VIRTIO_MMIO_SHM_LEN_HIGH:
    212        /*
    213         * VIRTIO_MMIO_SHM_SEL is unimplemented
    214         * according to the linux driver, if region length is -1
    215         * the shared memory doesn't exist
    216         */
    217        return -1;
    218    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
    219    case VIRTIO_MMIO_DRIVER_FEATURES:
    220    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
    221    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
    222    case VIRTIO_MMIO_QUEUE_SEL:
    223    case VIRTIO_MMIO_QUEUE_NUM:
    224    case VIRTIO_MMIO_QUEUE_ALIGN:
    225    case VIRTIO_MMIO_QUEUE_NOTIFY:
    226    case VIRTIO_MMIO_INTERRUPT_ACK:
    227    case VIRTIO_MMIO_QUEUE_DESC_LOW:
    228    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
    229    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
    230    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
    231    case VIRTIO_MMIO_QUEUE_USED_LOW:
    232    case VIRTIO_MMIO_QUEUE_USED_HIGH:
    233        qemu_log_mask(LOG_GUEST_ERROR,
    234                      "%s: read of write-only register (0x%" HWADDR_PRIx ")\n",
    235                      __func__, offset);
    236        return 0;
    237    default:
    238        qemu_log_mask(LOG_GUEST_ERROR,
    239                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
    240                      __func__, offset);
    241        return 0;
    242    }
    243    return 0;
    244}
    245
    246static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
    247                              unsigned size)
    248{
    249    VirtIOMMIOProxy *proxy = (VirtIOMMIOProxy *)opaque;
    250    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    251
    252    trace_virtio_mmio_write_offset(offset, value);
    253
    254    if (!vdev) {
    255        /* If no backend is present, we just make all registers
    256         * write-ignored. This allows us to provide transports with
    257         * no backend plugged in.
    258         */
    259        return;
    260    }
    261
    262    if (offset >= VIRTIO_MMIO_CONFIG) {
    263        offset -= VIRTIO_MMIO_CONFIG;
    264        if (proxy->legacy) {
    265            switch (size) {
    266            case 1:
    267                virtio_config_writeb(vdev, offset, value);
    268                break;
    269            case 2:
    270                virtio_config_writew(vdev, offset, value);
    271                break;
    272            case 4:
    273                virtio_config_writel(vdev, offset, value);
    274                break;
    275            default:
    276                abort();
    277            }
    278            return;
    279        } else {
    280            switch (size) {
    281            case 1:
    282                virtio_config_modern_writeb(vdev, offset, value);
    283                break;
    284            case 2:
    285                virtio_config_modern_writew(vdev, offset, value);
    286                break;
    287            case 4:
    288                virtio_config_modern_writel(vdev, offset, value);
    289                break;
    290            default:
    291                abort();
    292            }
    293            return;
    294        }
    295    }
    296    if (size != 4) {
    297        qemu_log_mask(LOG_GUEST_ERROR,
    298                      "%s: wrong size access to register!\n",
    299                      __func__);
    300        return;
    301    }
    302    switch (offset) {
    303    case VIRTIO_MMIO_DEVICE_FEATURES_SEL:
    304        if (value) {
    305            proxy->host_features_sel = 1;
    306        } else {
    307            proxy->host_features_sel = 0;
    308        }
    309        break;
    310    case VIRTIO_MMIO_DRIVER_FEATURES:
    311        if (proxy->legacy) {
    312            if (proxy->guest_features_sel) {
    313                qemu_log_mask(LOG_GUEST_ERROR,
    314                              "%s: attempt to write guest features with "
    315                              "guest_features_sel > 0 in legacy mode\n",
    316                              __func__);
    317            } else {
    318                virtio_set_features(vdev, value);
    319            }
    320        } else {
    321            proxy->guest_features[proxy->guest_features_sel] = value;
    322        }
    323        break;
    324    case VIRTIO_MMIO_DRIVER_FEATURES_SEL:
    325        if (value) {
    326            proxy->guest_features_sel = 1;
    327        } else {
    328            proxy->guest_features_sel = 0;
    329        }
    330        break;
    331    case VIRTIO_MMIO_GUEST_PAGE_SIZE:
    332        if (!proxy->legacy) {
    333            qemu_log_mask(LOG_GUEST_ERROR,
    334                          "%s: write to legacy register (0x%"
    335                          HWADDR_PRIx ") in non-legacy mode\n",
    336                          __func__, offset);
    337            return;
    338        }
    339        proxy->guest_page_shift = ctz32(value);
    340        if (proxy->guest_page_shift > 31) {
    341            proxy->guest_page_shift = 0;
    342        }
    343        trace_virtio_mmio_guest_page(value, proxy->guest_page_shift);
    344        break;
    345    case VIRTIO_MMIO_QUEUE_SEL:
    346        if (value < VIRTIO_QUEUE_MAX) {
    347            vdev->queue_sel = value;
    348        }
    349        break;
    350    case VIRTIO_MMIO_QUEUE_NUM:
    351        trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE);
    352        virtio_queue_set_num(vdev, vdev->queue_sel, value);
    353
    354        if (proxy->legacy) {
    355            virtio_queue_update_rings(vdev, vdev->queue_sel);
    356        } else {
    357            proxy->vqs[vdev->queue_sel].num = value;
    358        }
    359        break;
    360    case VIRTIO_MMIO_QUEUE_ALIGN:
    361        if (!proxy->legacy) {
    362            qemu_log_mask(LOG_GUEST_ERROR,
    363                          "%s: write to legacy register (0x%"
    364                          HWADDR_PRIx ") in non-legacy mode\n",
    365                          __func__, offset);
    366            return;
    367        }
    368        virtio_queue_set_align(vdev, vdev->queue_sel, value);
    369        break;
    370    case VIRTIO_MMIO_QUEUE_PFN:
    371        if (!proxy->legacy) {
    372            qemu_log_mask(LOG_GUEST_ERROR,
    373                          "%s: write to legacy register (0x%"
    374                          HWADDR_PRIx ") in non-legacy mode\n",
    375                          __func__, offset);
    376            return;
    377        }
    378        if (value == 0) {
    379            virtio_reset(vdev);
    380        } else {
    381            virtio_queue_set_addr(vdev, vdev->queue_sel,
    382                                  value << proxy->guest_page_shift);
    383        }
    384        break;
    385    case VIRTIO_MMIO_QUEUE_READY:
    386        if (proxy->legacy) {
    387            qemu_log_mask(LOG_GUEST_ERROR,
    388                          "%s: write to non-legacy register (0x%"
    389                          HWADDR_PRIx ") in legacy mode\n",
    390                          __func__, offset);
    391            return;
    392        }
    393        if (value) {
    394            virtio_queue_set_num(vdev, vdev->queue_sel,
    395                                 proxy->vqs[vdev->queue_sel].num);
    396            virtio_queue_set_rings(vdev, vdev->queue_sel,
    397                ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
    398                proxy->vqs[vdev->queue_sel].desc[0],
    399                ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
    400                proxy->vqs[vdev->queue_sel].avail[0],
    401                ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
    402                proxy->vqs[vdev->queue_sel].used[0]);
    403            proxy->vqs[vdev->queue_sel].enabled = 1;
    404        } else {
    405            proxy->vqs[vdev->queue_sel].enabled = 0;
    406        }
    407        break;
    408    case VIRTIO_MMIO_QUEUE_NOTIFY:
    409        if (value < VIRTIO_QUEUE_MAX) {
    410            virtio_queue_notify(vdev, value);
    411        }
    412        break;
    413    case VIRTIO_MMIO_INTERRUPT_ACK:
    414        qatomic_and(&vdev->isr, ~value);
    415        virtio_update_irq(vdev);
    416        break;
    417    case VIRTIO_MMIO_STATUS:
    418        if (!(value & VIRTIO_CONFIG_S_DRIVER_OK)) {
    419            virtio_mmio_stop_ioeventfd(proxy);
    420        }
    421
    422        if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) {
    423            virtio_set_features(vdev,
    424                                ((uint64_t)proxy->guest_features[1]) << 32 |
    425                                proxy->guest_features[0]);
    426        }
    427
    428        virtio_set_status(vdev, value & 0xff);
    429
    430        if (value & VIRTIO_CONFIG_S_DRIVER_OK) {
    431            virtio_mmio_start_ioeventfd(proxy);
    432        }
    433
    434        if (vdev->status == 0) {
    435            virtio_reset(vdev);
    436            virtio_mmio_soft_reset(proxy);
    437        }
    438        break;
    439    case VIRTIO_MMIO_QUEUE_DESC_LOW:
    440        if (proxy->legacy) {
    441            qemu_log_mask(LOG_GUEST_ERROR,
    442                          "%s: write to non-legacy register (0x%"
    443                          HWADDR_PRIx ") in legacy mode\n",
    444                          __func__, offset);
    445            return;
    446        }
    447        proxy->vqs[vdev->queue_sel].desc[0] = value;
    448        break;
    449    case VIRTIO_MMIO_QUEUE_DESC_HIGH:
    450        if (proxy->legacy) {
    451            qemu_log_mask(LOG_GUEST_ERROR,
    452                          "%s: write to non-legacy register (0x%"
    453                          HWADDR_PRIx ") in legacy mode\n",
    454                          __func__, offset);
    455            return;
    456        }
    457        proxy->vqs[vdev->queue_sel].desc[1] = value;
    458        break;
    459    case VIRTIO_MMIO_QUEUE_AVAIL_LOW:
    460        if (proxy->legacy) {
    461            qemu_log_mask(LOG_GUEST_ERROR,
    462                          "%s: write to non-legacy register (0x%"
    463                          HWADDR_PRIx ") in legacy mode\n",
    464                          __func__, offset);
    465            return;
    466        }
    467        proxy->vqs[vdev->queue_sel].avail[0] = value;
    468        break;
    469    case VIRTIO_MMIO_QUEUE_AVAIL_HIGH:
    470        if (proxy->legacy) {
    471            qemu_log_mask(LOG_GUEST_ERROR,
    472                          "%s: write to non-legacy register (0x%"
    473                          HWADDR_PRIx ") in legacy mode\n",
    474                          __func__, offset);
    475            return;
    476        }
    477        proxy->vqs[vdev->queue_sel].avail[1] = value;
    478        break;
    479    case VIRTIO_MMIO_QUEUE_USED_LOW:
    480        if (proxy->legacy) {
    481            qemu_log_mask(LOG_GUEST_ERROR,
    482                          "%s: write to non-legacy register (0x%"
    483                          HWADDR_PRIx ") in legacy mode\n",
    484                          __func__, offset);
    485            return;
    486        }
    487        proxy->vqs[vdev->queue_sel].used[0] = value;
    488        break;
    489    case VIRTIO_MMIO_QUEUE_USED_HIGH:
    490        if (proxy->legacy) {
    491            qemu_log_mask(LOG_GUEST_ERROR,
    492                          "%s: write to non-legacy register (0x%"
    493                          HWADDR_PRIx ") in legacy mode\n",
    494                          __func__, offset);
    495            return;
    496        }
    497        proxy->vqs[vdev->queue_sel].used[1] = value;
    498        break;
    499    case VIRTIO_MMIO_MAGIC_VALUE:
    500    case VIRTIO_MMIO_VERSION:
    501    case VIRTIO_MMIO_DEVICE_ID:
    502    case VIRTIO_MMIO_VENDOR_ID:
    503    case VIRTIO_MMIO_DEVICE_FEATURES:
    504    case VIRTIO_MMIO_QUEUE_NUM_MAX:
    505    case VIRTIO_MMIO_INTERRUPT_STATUS:
    506    case VIRTIO_MMIO_CONFIG_GENERATION:
    507        qemu_log_mask(LOG_GUEST_ERROR,
    508                      "%s: write to read-only register (0x%" HWADDR_PRIx ")\n",
    509                      __func__, offset);
    510        break;
    511
    512    default:
    513        qemu_log_mask(LOG_GUEST_ERROR,
    514                      "%s: bad register offset (0x%" HWADDR_PRIx ")\n",
    515                      __func__, offset);
    516    }
    517}
    518
    519static const MemoryRegionOps virtio_legacy_mem_ops = {
    520    .read = virtio_mmio_read,
    521    .write = virtio_mmio_write,
    522    .endianness = DEVICE_NATIVE_ENDIAN,
    523};
    524
    525static const MemoryRegionOps virtio_mem_ops = {
    526    .read = virtio_mmio_read,
    527    .write = virtio_mmio_write,
    528    .endianness = DEVICE_LITTLE_ENDIAN,
    529};
    530
    531static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector)
    532{
    533    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    534    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    535    int level;
    536
    537    if (!vdev) {
    538        return;
    539    }
    540    level = (qatomic_read(&vdev->isr) != 0);
    541    trace_virtio_mmio_setting_irq(level);
    542    qemu_set_irq(proxy->irq, level);
    543}
    544
    545static int virtio_mmio_load_config(DeviceState *opaque, QEMUFile *f)
    546{
    547    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    548
    549    proxy->host_features_sel = qemu_get_be32(f);
    550    proxy->guest_features_sel = qemu_get_be32(f);
    551    proxy->guest_page_shift = qemu_get_be32(f);
    552    return 0;
    553}
    554
    555static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f)
    556{
    557    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    558
    559    qemu_put_be32(f, proxy->host_features_sel);
    560    qemu_put_be32(f, proxy->guest_features_sel);
    561    qemu_put_be32(f, proxy->guest_page_shift);
    562}
    563
    564static const VMStateDescription vmstate_virtio_mmio_queue_state = {
    565    .name = "virtio_mmio/queue_state",
    566    .version_id = 1,
    567    .minimum_version_id = 1,
    568    .fields = (VMStateField[]) {
    569        VMSTATE_UINT16(num, VirtIOMMIOQueue),
    570        VMSTATE_BOOL(enabled, VirtIOMMIOQueue),
    571        VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2),
    572        VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2),
    573        VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2),
    574        VMSTATE_END_OF_LIST()
    575    }
    576};
    577
    578static const VMStateDescription vmstate_virtio_mmio_state_sub = {
    579    .name = "virtio_mmio/state",
    580    .version_id = 1,
    581    .minimum_version_id = 1,
    582    .fields = (VMStateField[]) {
    583        VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2),
    584        VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0,
    585                             vmstate_virtio_mmio_queue_state,
    586                             VirtIOMMIOQueue),
    587        VMSTATE_END_OF_LIST()
    588    }
    589};
    590
    591static const VMStateDescription vmstate_virtio_mmio = {
    592    .name = "virtio_mmio",
    593    .version_id = 1,
    594    .minimum_version_id = 1,
    595    .minimum_version_id_old = 1,
    596    .fields = (VMStateField[]) {
    597        VMSTATE_END_OF_LIST()
    598    },
    599    .subsections = (const VMStateDescription * []) {
    600        &vmstate_virtio_mmio_state_sub,
    601        NULL
    602    }
    603};
    604
    605static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f)
    606{
    607    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    608
    609    vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL);
    610}
    611
    612static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f)
    613{
    614    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    615
    616    return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1);
    617}
    618
    619static bool virtio_mmio_has_extra_state(DeviceState *opaque)
    620{
    621    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque);
    622
    623    return !proxy->legacy;
    624}
    625
    626static void virtio_mmio_reset(DeviceState *d)
    627{
    628    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
    629    int i;
    630
    631    virtio_mmio_stop_ioeventfd(proxy);
    632    virtio_bus_reset(&proxy->bus);
    633    proxy->host_features_sel = 0;
    634    proxy->guest_features_sel = 0;
    635    proxy->guest_page_shift = 0;
    636
    637    if (!proxy->legacy) {
    638        proxy->guest_features[0] = proxy->guest_features[1] = 0;
    639
    640        for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
    641            proxy->vqs[i].enabled = 0;
    642            proxy->vqs[i].num = 0;
    643            proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
    644            proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
    645            proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
    646        }
    647    }
    648}
    649
    650static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign,
    651                                          bool with_irqfd)
    652{
    653    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
    654    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    655    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
    656    VirtQueue *vq = virtio_get_queue(vdev, n);
    657    EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
    658
    659    if (assign) {
    660        int r = event_notifier_init(notifier, 0);
    661        if (r < 0) {
    662            return r;
    663        }
    664        virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
    665    } else {
    666        virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
    667        event_notifier_cleanup(notifier);
    668    }
    669
    670    if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) {
    671        vdc->guest_notifier_mask(vdev, n, !assign);
    672    }
    673
    674    return 0;
    675}
    676
    677static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs,
    678                                           bool assign)
    679{
    680    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
    681    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    682    /* TODO: need to check if kvm-arm supports irqfd */
    683    bool with_irqfd = false;
    684    int r, n;
    685
    686    nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX);
    687
    688    for (n = 0; n < nvqs; n++) {
    689        if (!virtio_queue_get_num(vdev, n)) {
    690            break;
    691        }
    692
    693        r = virtio_mmio_set_guest_notifier(d, n, assign, with_irqfd);
    694        if (r < 0) {
    695            goto assign_error;
    696        }
    697    }
    698
    699    return 0;
    700
    701assign_error:
    702    /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
    703    assert(assign);
    704    while (--n >= 0) {
    705        virtio_mmio_set_guest_notifier(d, n, !assign, false);
    706    }
    707    return r;
    708}
    709
    710static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp)
    711{
    712    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
    713    VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
    714
    715    if (!proxy->legacy) {
    716        virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
    717    }
    718}
    719
    720/* virtio-mmio device */
    721
    722static Property virtio_mmio_properties[] = {
    723    DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy,
    724                     format_transport_address, true),
    725    DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true),
    726    DEFINE_PROP_BIT("ioeventfd", VirtIOMMIOProxy, flags,
    727                    VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD_BIT, true),
    728    DEFINE_PROP_END_OF_LIST(),
    729};
    730
    731static void virtio_mmio_realizefn(DeviceState *d, Error **errp)
    732{
    733    VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d);
    734    SysBusDevice *sbd = SYS_BUS_DEVICE(d);
    735
    736    qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL);
    737    sysbus_init_irq(sbd, &proxy->irq);
    738
    739    if (!kvm_eventfds_enabled()) {
    740        proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
    741    }
    742
    743    /* fd-based ioevents can't be synchronized in record/replay */
    744    if (replay_mode != REPLAY_MODE_NONE) {
    745        proxy->flags &= ~VIRTIO_IOMMIO_FLAG_USE_IOEVENTFD;
    746    }
    747
    748    if (proxy->legacy) {
    749        memory_region_init_io(&proxy->iomem, OBJECT(d),
    750                              &virtio_legacy_mem_ops, proxy,
    751                              TYPE_VIRTIO_MMIO, 0x200);
    752    } else {
    753        memory_region_init_io(&proxy->iomem, OBJECT(d),
    754                              &virtio_mem_ops, proxy,
    755                              TYPE_VIRTIO_MMIO, 0x200);
    756    }
    757    sysbus_init_mmio(sbd, &proxy->iomem);
    758}
    759
    760static void virtio_mmio_class_init(ObjectClass *klass, void *data)
    761{
    762    DeviceClass *dc = DEVICE_CLASS(klass);
    763
    764    dc->realize = virtio_mmio_realizefn;
    765    dc->reset = virtio_mmio_reset;
    766    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
    767    device_class_set_props(dc, virtio_mmio_properties);
    768}
    769
    770static const TypeInfo virtio_mmio_info = {
    771    .name          = TYPE_VIRTIO_MMIO,
    772    .parent        = TYPE_SYS_BUS_DEVICE,
    773    .instance_size = sizeof(VirtIOMMIOProxy),
    774    .class_init    = virtio_mmio_class_init,
    775};
    776
    777/* virtio-mmio-bus. */
    778
    779static char *virtio_mmio_bus_get_dev_path(DeviceState *dev)
    780{
    781    BusState *virtio_mmio_bus;
    782    VirtIOMMIOProxy *virtio_mmio_proxy;
    783    char *proxy_path;
    784    char *path;
    785    MemoryRegionSection section;
    786
    787    virtio_mmio_bus = qdev_get_parent_bus(dev);
    788    virtio_mmio_proxy = VIRTIO_MMIO(virtio_mmio_bus->parent);
    789    proxy_path = qdev_get_dev_path(DEVICE(virtio_mmio_proxy));
    790
    791    /*
    792     * If @format_transport_address is false, then we just perform the same as
    793     * virtio_bus_get_dev_path(): we delegate the address formatting for the
    794     * device on the virtio-mmio bus to the bus that the virtio-mmio proxy
    795     * (i.e., the device that implements the virtio-mmio bus) resides on. In
    796     * this case the base address of the virtio-mmio transport will be
    797     * invisible.
    798     */
    799    if (!virtio_mmio_proxy->format_transport_address) {
    800        return proxy_path;
    801    }
    802
    803    /* Otherwise, we append the base address of the transport. */
    804    section = memory_region_find(&virtio_mmio_proxy->iomem, 0, 0x200);
    805    assert(section.mr);
    806
    807    if (proxy_path) {
    808        path = g_strdup_printf("%s/virtio-mmio@" TARGET_FMT_plx, proxy_path,
    809                               section.offset_within_address_space);
    810    } else {
    811        path = g_strdup_printf("virtio-mmio@" TARGET_FMT_plx,
    812                               section.offset_within_address_space);
    813    }
    814    memory_region_unref(section.mr);
    815
    816    g_free(proxy_path);
    817    return path;
    818}
    819
    820static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data)
    821{
    822    BusClass *bus_class = BUS_CLASS(klass);
    823    VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
    824
    825    k->notify = virtio_mmio_update_irq;
    826    k->save_config = virtio_mmio_save_config;
    827    k->load_config = virtio_mmio_load_config;
    828    k->save_extra_state = virtio_mmio_save_extra_state;
    829    k->load_extra_state = virtio_mmio_load_extra_state;
    830    k->has_extra_state = virtio_mmio_has_extra_state;
    831    k->set_guest_notifiers = virtio_mmio_set_guest_notifiers;
    832    k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled;
    833    k->ioeventfd_assign = virtio_mmio_ioeventfd_assign;
    834    k->pre_plugged = virtio_mmio_pre_plugged;
    835    k->has_variable_vring_alignment = true;
    836    bus_class->max_dev = 1;
    837    bus_class->get_dev_path = virtio_mmio_bus_get_dev_path;
    838}
    839
    840static const TypeInfo virtio_mmio_bus_info = {
    841    .name          = TYPE_VIRTIO_MMIO_BUS,
    842    .parent        = TYPE_VIRTIO_BUS,
    843    .instance_size = sizeof(VirtioBusState),
    844    .class_init    = virtio_mmio_bus_class_init,
    845};
    846
    847static void virtio_mmio_register_types(void)
    848{
    849    type_register_static(&virtio_mmio_bus_info);
    850    type_register_static(&virtio_mmio_info);
    851}
    852
    853type_init(virtio_mmio_register_types)