cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

generic_fuzz.c (31797B)


      1/*
      2 * Generic Virtual-Device Fuzzing Target
      3 *
      4 * Copyright Red Hat Inc., 2020
      5 *
      6 * Authors:
      7 *  Alexander Bulekov   <alxndr@bu.edu>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     10 * See the COPYING file in the top-level directory.
     11 */
     12
     13#include "qemu/osdep.h"
     14
     15#include <wordexp.h>
     16
     17#include "hw/core/cpu.h"
     18#include "tests/qtest/libqos/libqtest.h"
     19#include "tests/qtest/libqos/pci-pc.h"
     20#include "fuzz.h"
     21#include "fork_fuzz.h"
     22#include "string.h"
     23#include "exec/memory.h"
     24#include "exec/ramblock.h"
     25#include "hw/qdev-core.h"
     26#include "hw/pci/pci.h"
     27#include "hw/boards.h"
     28#include "generic_fuzz_configs.h"
     29#include "hw/mem/sparse-mem.h"
     30
     31/*
     32 * SEPARATOR is used to separate "operations" in the fuzz input
     33 */
     34#define SEPARATOR "FUZZ"
     35
     36enum cmds {
     37    OP_IN,
     38    OP_OUT,
     39    OP_READ,
     40    OP_WRITE,
     41    OP_PCI_READ,
     42    OP_PCI_WRITE,
     43    OP_DISABLE_PCI,
     44    OP_ADD_DMA_PATTERN,
     45    OP_CLEAR_DMA_PATTERNS,
     46    OP_CLOCK_STEP,
     47};
     48
     49#define DEFAULT_TIMEOUT_US 100000
     50#define USEC_IN_SEC 1000000000
     51
     52#define MAX_DMA_FILL_SIZE 0x10000
     53
     54#define PCI_HOST_BRIDGE_CFG 0xcf8
     55#define PCI_HOST_BRIDGE_DATA 0xcfc
     56
     57typedef struct {
     58    ram_addr_t addr;
     59    ram_addr_t size; /* The number of bytes until the end of the I/O region */
     60} address_range;
     61
     62static useconds_t timeout = DEFAULT_TIMEOUT_US;
     63
     64static bool qtest_log_enabled;
     65
     66MemoryRegion *sparse_mem_mr;
     67
     68/*
     69 * A pattern used to populate a DMA region or perform a memwrite. This is
     70 * useful for e.g. populating tables of unique addresses.
     71 * Example {.index = 1; .stride = 2; .len = 3; .data = "\x00\x01\x02"}
     72 * Renders as: 00 01 02   00 03 02   00 05 02   00 07 02 ...
     73 */
     74typedef struct {
     75    uint8_t index;      /* Index of a byte to increment by stride */
     76    uint8_t stride;     /* Increment each index'th byte by this amount */
     77    size_t len;
     78    const uint8_t *data;
     79} pattern;
     80
     81/* Avoid filling the same DMA region between MMIO/PIO commands ? */
     82static bool avoid_double_fetches;
     83
     84static QTestState *qts_global; /* Need a global for the DMA callback */
     85
     86/*
     87 * List of memory regions that are children of QOM objects specified by the
     88 * user for fuzzing.
     89 */
     90static GHashTable *fuzzable_memoryregions;
     91static GPtrArray *fuzzable_pci_devices;
     92
     93struct get_io_cb_info {
     94    int index;
     95    int found;
     96    address_range result;
     97};
     98
     99static bool get_io_address_cb(Int128 start, Int128 size,
    100                              const MemoryRegion *mr,
    101                              hwaddr offset_in_region,
    102                              void *opaque)
    103{
    104    struct get_io_cb_info *info = opaque;
    105    if (g_hash_table_lookup(fuzzable_memoryregions, mr)) {
    106        if (info->index == 0) {
    107            info->result.addr = (ram_addr_t)start;
    108            info->result.size = (ram_addr_t)size;
    109            info->found = 1;
    110            return true;
    111        }
    112        info->index--;
    113    }
    114    return false;
    115}
    116
    117/*
    118 * List of dma regions populated since the last fuzzing command. Used to ensure
    119 * that we only write to each DMA address once, to avoid race conditions when
    120 * building reproducers.
    121 */
    122static GArray *dma_regions;
    123
    124static GArray *dma_patterns;
    125static int dma_pattern_index;
    126static bool pci_disabled;
    127
    128/*
    129 * Allocate a block of memory and populate it with a pattern.
    130 */
    131static void *pattern_alloc(pattern p, size_t len)
    132{
    133    int i;
    134    uint8_t *buf = g_malloc(len);
    135    uint8_t sum = 0;
    136
    137    for (i = 0; i < len; ++i) {
    138        buf[i] = p.data[i % p.len];
    139        if ((i % p.len) == p.index) {
    140            buf[i] += sum;
    141            sum += p.stride;
    142        }
    143    }
    144    return buf;
    145}
    146
    147static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
    148{
    149    unsigned access_size_max = mr->ops->valid.max_access_size;
    150
    151    /*
    152     * Regions are assumed to support 1-4 byte accesses unless
    153     * otherwise specified.
    154     */
    155    if (access_size_max == 0) {
    156        access_size_max = 4;
    157    }
    158
    159    /* Bound the maximum access by the alignment of the address.  */
    160    if (!mr->ops->impl.unaligned) {
    161        unsigned align_size_max = addr & -addr;
    162        if (align_size_max != 0 && align_size_max < access_size_max) {
    163            access_size_max = align_size_max;
    164        }
    165    }
    166
    167    /* Don't attempt accesses larger than the maximum.  */
    168    if (l > access_size_max) {
    169        l = access_size_max;
    170    }
    171    l = pow2floor(l);
    172
    173    return l;
    174}
    175
    176/*
    177 * Call-back for functions that perform DMA reads from guest memory. Confirm
    178 * that the region has not already been populated since the last loop in
    179 * generic_fuzz(), avoiding potential race-conditions, which we don't have
    180 * a good way for reproducing right now.
    181 */
    182void fuzz_dma_read_cb(size_t addr, size_t len, MemoryRegion *mr)
    183{
    184    /* Are we in the generic-fuzzer or are we using another fuzz-target? */
    185    if (!qts_global) {
    186        return;
    187    }
    188
    189    /*
    190     * Return immediately if:
    191     * - We have no DMA patterns defined
    192     * - The length of the DMA read request is zero
    193     * - The DMA read is hitting an MR other than the machine's main RAM
    194     * - The DMA request hits past the bounds of our RAM
    195     */
    196    if (dma_patterns->len == 0
    197        || len == 0
    198        || (mr != current_machine->ram && mr != sparse_mem_mr)) {
    199        return;
    200    }
    201
    202    /*
    203     * If we overlap with any existing dma_regions, split the range and only
    204     * populate the non-overlapping parts.
    205     */
    206    address_range region;
    207    bool double_fetch = false;
    208    for (int i = 0;
    209         i < dma_regions->len && (avoid_double_fetches || qtest_log_enabled);
    210         ++i) {
    211        region = g_array_index(dma_regions, address_range, i);
    212        if (addr < region.addr + region.size && addr + len > region.addr) {
    213            double_fetch = true;
    214            if (addr < region.addr
    215                && avoid_double_fetches) {
    216                fuzz_dma_read_cb(addr, region.addr - addr, mr);
    217            }
    218            if (addr + len > region.addr + region.size
    219                && avoid_double_fetches) {
    220                fuzz_dma_read_cb(region.addr + region.size,
    221                        addr + len - (region.addr + region.size), mr);
    222            }
    223            return;
    224        }
    225    }
    226
    227    /* Cap the length of the DMA access to something reasonable */
    228    len = MIN(len, MAX_DMA_FILL_SIZE);
    229
    230    address_range ar = {addr, len};
    231    g_array_append_val(dma_regions, ar);
    232    pattern p = g_array_index(dma_patterns, pattern, dma_pattern_index);
    233    void *buf_base = pattern_alloc(p, ar.size);
    234    void *buf = buf_base;
    235    hwaddr l, addr1;
    236    MemoryRegion *mr1;
    237    while (len > 0) {
    238        l = len;
    239        mr1 = address_space_translate(first_cpu->as,
    240                                      addr, &addr1, &l, true,
    241                                      MEMTXATTRS_UNSPECIFIED);
    242
    243        /*
    244         *  If mr1 isn't RAM, address_space_translate doesn't update l. Use
    245         *  memory_access_size to identify the number of bytes that it is safe
    246         *  to write without accidentally writing to another MemoryRegion.
    247         */
    248        if (!memory_region_is_ram(mr1)) {
    249            l = memory_access_size(mr1, l, addr1);
    250        }
    251        if (memory_region_is_ram(mr1) ||
    252            memory_region_is_romd(mr1) ||
    253            mr1 == sparse_mem_mr) {
    254            /* ROM/RAM case */
    255            if (qtest_log_enabled) {
    256                /*
    257                * With QTEST_LOG, use a normal, slow QTest memwrite. Prefix the log
    258                * that will be written by qtest.c with a DMA tag, so we can reorder
    259                * the resulting QTest trace so the DMA fills precede the last PIO/MMIO
    260                * command.
    261                */
    262                fprintf(stderr, "[DMA] ");
    263                if (double_fetch) {
    264                    fprintf(stderr, "[DOUBLE-FETCH] ");
    265                }
    266                fflush(stderr);
    267            }
    268            qtest_memwrite(qts_global, addr, buf, l);
    269        }
    270        len -= l;
    271        buf += l;
    272        addr += l;
    273
    274    }
    275    g_free(buf_base);
    276
    277    /* Increment the index of the pattern for the next DMA access */
    278    dma_pattern_index = (dma_pattern_index + 1) % dma_patterns->len;
    279}
    280
    281/*
    282 * Here we want to convert a fuzzer-provided [io-region-index, offset] to
    283 * a physical address. To do this, we iterate over all of the matched
    284 * MemoryRegions. Check whether each region exists within the particular io
    285 * space. Return the absolute address of the offset within the index'th region
    286 * that is a subregion of the io_space and the distance until the end of the
    287 * memory region.
    288 */
    289static bool get_io_address(address_range *result, AddressSpace *as,
    290                            uint8_t index,
    291                            uint32_t offset) {
    292    FlatView *view;
    293    view = as->current_map;
    294    g_assert(view);
    295    struct get_io_cb_info cb_info = {};
    296
    297    cb_info.index = index;
    298
    299    /*
    300     * Loop around the FlatView until we match "index" number of
    301     * fuzzable_memoryregions, or until we know that there are no matching
    302     * memory_regions.
    303     */
    304    do {
    305        flatview_for_each_range(view, get_io_address_cb , &cb_info);
    306    } while (cb_info.index != index && !cb_info.found);
    307
    308    *result = cb_info.result;
    309    if (result->size) {
    310        offset = offset % result->size;
    311        result->addr += offset;
    312        result->size -= offset;
    313    }
    314    return cb_info.found;
    315}
    316
    317static bool get_pio_address(address_range *result,
    318                            uint8_t index, uint16_t offset)
    319{
    320    /*
    321     * PIO BARs can be set past the maximum port address (0xFFFF). Thus, result
    322     * can contain an addr that extends past the PIO space. When we pass this
    323     * address to qtest_in/qtest_out, it is cast to a uint16_t, so we might end
    324     * up fuzzing a completely different MemoryRegion/Device. Therefore, check
    325     * that the address here is within the PIO space limits.
    326     */
    327    bool found = get_io_address(result, &address_space_io, index, offset);
    328    return result->addr <= 0xFFFF ? found : false;
    329}
    330
    331static bool get_mmio_address(address_range *result,
    332                             uint8_t index, uint32_t offset)
    333{
    334    return get_io_address(result, &address_space_memory, index, offset);
    335}
    336
    337static void op_in(QTestState *s, const unsigned char * data, size_t len)
    338{
    339    enum Sizes {Byte, Word, Long, end_sizes};
    340    struct {
    341        uint8_t size;
    342        uint8_t base;
    343        uint16_t offset;
    344    } a;
    345    address_range abs;
    346
    347    if (len < sizeof(a)) {
    348        return;
    349    }
    350    memcpy(&a, data, sizeof(a));
    351    if (get_pio_address(&abs, a.base, a.offset) == 0) {
    352        return;
    353    }
    354
    355    switch (a.size %= end_sizes) {
    356    case Byte:
    357        qtest_inb(s, abs.addr);
    358        break;
    359    case Word:
    360        if (abs.size >= 2) {
    361            qtest_inw(s, abs.addr);
    362        }
    363        break;
    364    case Long:
    365        if (abs.size >= 4) {
    366            qtest_inl(s, abs.addr);
    367        }
    368        break;
    369    }
    370}
    371
    372static void op_out(QTestState *s, const unsigned char * data, size_t len)
    373{
    374    enum Sizes {Byte, Word, Long, end_sizes};
    375    struct {
    376        uint8_t size;
    377        uint8_t base;
    378        uint16_t offset;
    379        uint32_t value;
    380    } a;
    381    address_range abs;
    382
    383    if (len < sizeof(a)) {
    384        return;
    385    }
    386    memcpy(&a, data, sizeof(a));
    387
    388    if (get_pio_address(&abs, a.base, a.offset) == 0) {
    389        return;
    390    }
    391
    392    switch (a.size %= end_sizes) {
    393    case Byte:
    394        qtest_outb(s, abs.addr, a.value & 0xFF);
    395        break;
    396    case Word:
    397        if (abs.size >= 2) {
    398            qtest_outw(s, abs.addr, a.value & 0xFFFF);
    399        }
    400        break;
    401    case Long:
    402        if (abs.size >= 4) {
    403            qtest_outl(s, abs.addr, a.value);
    404        }
    405        break;
    406    }
    407}
    408
    409static void op_read(QTestState *s, const unsigned char * data, size_t len)
    410{
    411    enum Sizes {Byte, Word, Long, Quad, end_sizes};
    412    struct {
    413        uint8_t size;
    414        uint8_t base;
    415        uint32_t offset;
    416    } a;
    417    address_range abs;
    418
    419    if (len < sizeof(a)) {
    420        return;
    421    }
    422    memcpy(&a, data, sizeof(a));
    423
    424    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
    425        return;
    426    }
    427
    428    switch (a.size %= end_sizes) {
    429    case Byte:
    430        qtest_readb(s, abs.addr);
    431        break;
    432    case Word:
    433        if (abs.size >= 2) {
    434            qtest_readw(s, abs.addr);
    435        }
    436        break;
    437    case Long:
    438        if (abs.size >= 4) {
    439            qtest_readl(s, abs.addr);
    440        }
    441        break;
    442    case Quad:
    443        if (abs.size >= 8) {
    444            qtest_readq(s, abs.addr);
    445        }
    446        break;
    447    }
    448}
    449
    450static void op_write(QTestState *s, const unsigned char * data, size_t len)
    451{
    452    enum Sizes {Byte, Word, Long, Quad, end_sizes};
    453    struct {
    454        uint8_t size;
    455        uint8_t base;
    456        uint32_t offset;
    457        uint64_t value;
    458    } a;
    459    address_range abs;
    460
    461    if (len < sizeof(a)) {
    462        return;
    463    }
    464    memcpy(&a, data, sizeof(a));
    465
    466    if (get_mmio_address(&abs, a.base, a.offset) == 0) {
    467        return;
    468    }
    469
    470    switch (a.size %= end_sizes) {
    471    case Byte:
    472            qtest_writeb(s, abs.addr, a.value & 0xFF);
    473        break;
    474    case Word:
    475        if (abs.size >= 2) {
    476            qtest_writew(s, abs.addr, a.value & 0xFFFF);
    477        }
    478        break;
    479    case Long:
    480        if (abs.size >= 4) {
    481            qtest_writel(s, abs.addr, a.value & 0xFFFFFFFF);
    482        }
    483        break;
    484    case Quad:
    485        if (abs.size >= 8) {
    486            qtest_writeq(s, abs.addr, a.value);
    487        }
    488        break;
    489    }
    490}
    491
    492static void op_pci_read(QTestState *s, const unsigned char * data, size_t len)
    493{
    494    enum Sizes {Byte, Word, Long, end_sizes};
    495    struct {
    496        uint8_t size;
    497        uint8_t base;
    498        uint8_t offset;
    499    } a;
    500    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
    501        return;
    502    }
    503    memcpy(&a, data, sizeof(a));
    504    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
    505                                  a.base % fuzzable_pci_devices->len);
    506    int devfn = dev->devfn;
    507    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
    508    switch (a.size %= end_sizes) {
    509    case Byte:
    510        qtest_inb(s, PCI_HOST_BRIDGE_DATA);
    511        break;
    512    case Word:
    513        qtest_inw(s, PCI_HOST_BRIDGE_DATA);
    514        break;
    515    case Long:
    516        qtest_inl(s, PCI_HOST_BRIDGE_DATA);
    517        break;
    518    }
    519}
    520
    521static void op_pci_write(QTestState *s, const unsigned char * data, size_t len)
    522{
    523    enum Sizes {Byte, Word, Long, end_sizes};
    524    struct {
    525        uint8_t size;
    526        uint8_t base;
    527        uint8_t offset;
    528        uint32_t value;
    529    } a;
    530    if (len < sizeof(a) || fuzzable_pci_devices->len == 0 || pci_disabled) {
    531        return;
    532    }
    533    memcpy(&a, data, sizeof(a));
    534    PCIDevice *dev = g_ptr_array_index(fuzzable_pci_devices,
    535                                  a.base % fuzzable_pci_devices->len);
    536    int devfn = dev->devfn;
    537    qtest_outl(s, PCI_HOST_BRIDGE_CFG, (1U << 31) | (devfn << 8) | a.offset);
    538    switch (a.size %= end_sizes) {
    539    case Byte:
    540        qtest_outb(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFF);
    541        break;
    542    case Word:
    543        qtest_outw(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFF);
    544        break;
    545    case Long:
    546        qtest_outl(s, PCI_HOST_BRIDGE_DATA, a.value & 0xFFFFFFFF);
    547        break;
    548    }
    549}
    550
    551static void op_add_dma_pattern(QTestState *s,
    552                               const unsigned char *data, size_t len)
    553{
    554    struct {
    555        /*
    556         * index and stride can be used to increment the index-th byte of the
    557         * pattern by the value stride, for each loop of the pattern.
    558         */
    559        uint8_t index;
    560        uint8_t stride;
    561    } a;
    562
    563    if (len < sizeof(a) + 1) {
    564        return;
    565    }
    566    memcpy(&a, data, sizeof(a));
    567    pattern p = {a.index, a.stride, len - sizeof(a), data + sizeof(a)};
    568    p.index = a.index % p.len;
    569    g_array_append_val(dma_patterns, p);
    570    return;
    571}
    572
    573static void op_clear_dma_patterns(QTestState *s,
    574                                  const unsigned char *data, size_t len)
    575{
    576    g_array_set_size(dma_patterns, 0);
    577    dma_pattern_index = 0;
    578}
    579
    580static void op_clock_step(QTestState *s, const unsigned char *data, size_t len)
    581{
    582    qtest_clock_step_next(s);
    583}
    584
    585static void op_disable_pci(QTestState *s, const unsigned char *data, size_t len)
    586{
    587    pci_disabled = true;
    588}
    589
    590static void handle_timeout(int sig)
    591{
    592    if (qtest_log_enabled) {
    593        fprintf(stderr, "[Timeout]\n");
    594        fflush(stderr);
    595    }
    596
    597    /*
    598     * If there is a crash, libfuzzer/ASAN forks a child to run an
    599     * "llvm-symbolizer" process for printing out a pretty stacktrace. It
    600     * communicates with this child using a pipe.  If we timeout+Exit, while
    601     * libfuzzer is still communicating with the llvm-symbolizer child, we will
    602     * be left with an orphan llvm-symbolizer process. Sometimes, this appears
    603     * to lead to a deadlock in the forkserver. Use waitpid to check if there
    604     * are any waitable children. If so, exit out of the signal-handler, and
    605     * let libfuzzer finish communicating with the child, and exit, on its own.
    606     */
    607    if (waitpid(-1, NULL, WNOHANG) == 0) {
    608        return;
    609    }
    610
    611    _Exit(0);
    612}
    613
    614/*
    615 * Here, we interpret random bytes from the fuzzer, as a sequence of commands.
    616 * Some commands can be variable-width, so we use a separator, SEPARATOR, to
    617 * specify the boundaries between commands. SEPARATOR is used to separate
    618 * "operations" in the fuzz input. Why use a separator, instead of just using
    619 * the operations' length to identify operation boundaries?
    620 *   1. This is a simple way to support variable-length operations
    621 *   2. This adds "stability" to the input.
    622 *      For example take the input "AbBcgDefg", where there is no separator and
    623 *      Opcodes are capitalized.
    624 *      Simply, by removing the first byte, we end up with a very different
    625 *      sequence:
    626 *      BbcGdefg...
    627 *      By adding a separator, we avoid this problem:
    628 *      Ab SEP Bcg SEP Defg -> B SEP Bcg SEP Defg
    629 *      Since B uses two additional bytes as operands, the first "B" will be
    630 *      ignored. The fuzzer actively tries to reduce inputs, so such unused
    631 *      bytes are likely to be pruned, eventually.
    632 *
    633 *  SEPARATOR is trivial for the fuzzer to discover when using ASan. Optionally,
    634 *  SEPARATOR can be manually specified as a dictionary value (see libfuzzer's
    635 *  -dict), though this should not be necessary.
    636 *
    637 * As a result, the stream of bytes is converted into a sequence of commands.
    638 * In a simplified example where SEPARATOR is 0xFF:
    639 * 00 01 02 FF 03 04 05 06 FF 01 FF ...
    640 * becomes this sequence of commands:
    641 * 00 01 02    -> op00 (0102)   -> in (0102, 2)
    642 * 03 04 05 06 -> op03 (040506) -> write (040506, 3)
    643 * 01          -> op01 (-,0)    -> out (-,0)
    644 * ...
    645 *
    646 * Note here that it is the job of the individual opcode functions to check
    647 * that enough data was provided. I.e. in the last command out (,0), out needs
    648 * to check that there is not enough data provided to select an address/value
    649 * for the operation.
    650 */
    651static void generic_fuzz(QTestState *s, const unsigned char *Data, size_t Size)
    652{
    653    void (*ops[]) (QTestState *s, const unsigned char* , size_t) = {
    654        [OP_IN]                 = op_in,
    655        [OP_OUT]                = op_out,
    656        [OP_READ]               = op_read,
    657        [OP_WRITE]              = op_write,
    658        [OP_PCI_READ]           = op_pci_read,
    659        [OP_PCI_WRITE]          = op_pci_write,
    660        [OP_DISABLE_PCI]        = op_disable_pci,
    661        [OP_ADD_DMA_PATTERN]    = op_add_dma_pattern,
    662        [OP_CLEAR_DMA_PATTERNS] = op_clear_dma_patterns,
    663        [OP_CLOCK_STEP]         = op_clock_step,
    664    };
    665    const unsigned char *cmd = Data;
    666    const unsigned char *nextcmd;
    667    size_t cmd_len;
    668    uint8_t op;
    669
    670    if (fork() == 0) {
    671        struct sigaction sact;
    672        struct itimerval timer;
    673        sigset_t set;
    674        /*
    675         * Sometimes the fuzzer will find inputs that take quite a long time to
    676         * process. Often times, these inputs do not result in new coverage.
    677         * Even if these inputs might be interesting, they can slow down the
    678         * fuzzer, overall. Set a timeout for each command to avoid hurting
    679         * performance, too much
    680         */
    681        if (timeout) {
    682
    683            sigemptyset(&sact.sa_mask);
    684            sact.sa_flags   = SA_NODEFER;
    685            sact.sa_handler = handle_timeout;
    686            sigaction(SIGALRM, &sact, NULL);
    687
    688            sigemptyset(&set);
    689            sigaddset(&set, SIGALRM);
    690            pthread_sigmask(SIG_UNBLOCK, &set, NULL);
    691
    692            memset(&timer, 0, sizeof(timer));
    693            timer.it_value.tv_sec = timeout / USEC_IN_SEC;
    694            timer.it_value.tv_usec = timeout % USEC_IN_SEC;
    695        }
    696
    697        op_clear_dma_patterns(s, NULL, 0);
    698        pci_disabled = false;
    699
    700        while (cmd && Size) {
    701            /* Reset the timeout, each time we run a new command */
    702            if (timeout) {
    703                setitimer(ITIMER_REAL, &timer, NULL);
    704            }
    705
    706            /* Get the length until the next command or end of input */
    707            nextcmd = memmem(cmd, Size, SEPARATOR, strlen(SEPARATOR));
    708            cmd_len = nextcmd ? nextcmd - cmd : Size;
    709
    710            if (cmd_len > 0) {
    711                /* Interpret the first byte of the command as an opcode */
    712                op = *cmd % (sizeof(ops) / sizeof((ops)[0]));
    713                ops[op](s, cmd + 1, cmd_len - 1);
    714
    715                /* Run the main loop */
    716                flush_events(s);
    717            }
    718            /* Advance to the next command */
    719            cmd = nextcmd ? nextcmd + sizeof(SEPARATOR) - 1 : nextcmd;
    720            Size = Size - (cmd_len + sizeof(SEPARATOR) - 1);
    721            g_array_set_size(dma_regions, 0);
    722        }
    723        _Exit(0);
    724    } else {
    725        flush_events(s);
    726        wait(0);
    727    }
    728}
    729
    730static void usage(void)
    731{
    732    printf("Please specify the following environment variables:\n");
    733    printf("QEMU_FUZZ_ARGS= the command line arguments passed to qemu\n");
    734    printf("QEMU_FUZZ_OBJECTS= "
    735            "a space separated list of QOM type names for objects to fuzz\n");
    736    printf("Optionally: QEMU_AVOID_DOUBLE_FETCH= "
    737            "Try to avoid racy DMA double fetch bugs? %d by default\n",
    738            avoid_double_fetches);
    739    printf("Optionally: QEMU_FUZZ_TIMEOUT= Specify a custom timeout (us). "
    740            "0 to disable. %d by default\n", timeout);
    741    exit(0);
    742}
    743
    744static int locate_fuzz_memory_regions(Object *child, void *opaque)
    745{
    746    const char *name;
    747    MemoryRegion *mr;
    748    if (object_dynamic_cast(child, TYPE_MEMORY_REGION)) {
    749        mr = MEMORY_REGION(child);
    750        if ((memory_region_is_ram(mr) ||
    751            memory_region_is_ram_device(mr) ||
    752            memory_region_is_rom(mr)) == false) {
    753            name = object_get_canonical_path_component(child);
    754            /*
    755             * We don't want duplicate pointers to the same MemoryRegion, so
    756             * try to remove copies of the pointer, before adding it.
    757             */
    758            g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
    759        }
    760    }
    761    return 0;
    762}
    763
    764static int locate_fuzz_objects(Object *child, void *opaque)
    765{
    766    GString *type_name;
    767    GString *path_name;
    768    char *pattern = opaque;
    769
    770    type_name = g_string_new(object_get_typename(child));
    771    g_string_ascii_down(type_name);
    772    if (g_pattern_match_simple(pattern, type_name->str)) {
    773        /* Find and save ptrs to any child MemoryRegions */
    774        object_child_foreach_recursive(child, locate_fuzz_memory_regions, NULL);
    775
    776        /*
    777         * We matched an object. If its a PCI device, store a pointer to it so
    778         * we can map BARs and fuzz its config space.
    779         */
    780        if (object_dynamic_cast(OBJECT(child), TYPE_PCI_DEVICE)) {
    781            /*
    782             * Don't want duplicate pointers to the same PCIDevice, so remove
    783             * copies of the pointer, before adding it.
    784             */
    785            g_ptr_array_remove_fast(fuzzable_pci_devices, PCI_DEVICE(child));
    786            g_ptr_array_add(fuzzable_pci_devices, PCI_DEVICE(child));
    787        }
    788    } else if (object_dynamic_cast(OBJECT(child), TYPE_MEMORY_REGION)) {
    789        path_name = g_string_new(object_get_canonical_path_component(child));
    790        g_string_ascii_down(path_name);
    791        if (g_pattern_match_simple(pattern, path_name->str)) {
    792            MemoryRegion *mr;
    793            mr = MEMORY_REGION(child);
    794            if ((memory_region_is_ram(mr) ||
    795                 memory_region_is_ram_device(mr) ||
    796                 memory_region_is_rom(mr)) == false) {
    797                g_hash_table_insert(fuzzable_memoryregions, mr, (gpointer)true);
    798            }
    799        }
    800        g_string_free(path_name, true);
    801    }
    802    g_string_free(type_name, true);
    803    return 0;
    804}
    805
    806
    807static void pci_enum(gpointer pcidev, gpointer bus)
    808{
    809    PCIDevice *dev = pcidev;
    810    QPCIDevice *qdev;
    811    int i;
    812
    813    qdev = qpci_device_find(bus, dev->devfn);
    814    g_assert(qdev != NULL);
    815    for (i = 0; i < 6; i++) {
    816        if (dev->io_regions[i].size) {
    817            qpci_iomap(qdev, i, NULL);
    818        }
    819    }
    820    qpci_device_enable(qdev);
    821    g_free(qdev);
    822}
    823
    824static void generic_pre_fuzz(QTestState *s)
    825{
    826    GHashTableIter iter;
    827    MemoryRegion *mr;
    828    QPCIBus *pcibus;
    829    char **result;
    830    GString *name_pattern;
    831
    832    if (!getenv("QEMU_FUZZ_OBJECTS")) {
    833        usage();
    834    }
    835    if (getenv("QTEST_LOG")) {
    836        qtest_log_enabled = 1;
    837    }
    838    if (getenv("QEMU_AVOID_DOUBLE_FETCH")) {
    839        avoid_double_fetches = 1;
    840    }
    841    if (getenv("QEMU_FUZZ_TIMEOUT")) {
    842        timeout = g_ascii_strtoll(getenv("QEMU_FUZZ_TIMEOUT"), NULL, 0);
    843    }
    844    qts_global = s;
    845
    846    /*
    847     * Create a special device that we can use to back DMA buffers at very
    848     * high memory addresses
    849     */
    850    sparse_mem_mr = sparse_mem_init(0, UINT64_MAX);
    851
    852    dma_regions = g_array_new(false, false, sizeof(address_range));
    853    dma_patterns = g_array_new(false, false, sizeof(pattern));
    854
    855    fuzzable_memoryregions = g_hash_table_new(NULL, NULL);
    856    fuzzable_pci_devices   = g_ptr_array_new();
    857
    858    result = g_strsplit(getenv("QEMU_FUZZ_OBJECTS"), " ", -1);
    859    for (int i = 0; result[i] != NULL; i++) {
    860        name_pattern = g_string_new(result[i]);
    861        /*
    862         * Make the pattern lowercase. We do the same for all the MemoryRegion
    863         * and Type names so the configs are case-insensitive.
    864         */
    865        g_string_ascii_down(name_pattern);
    866        printf("Matching objects by name %s\n", result[i]);
    867        object_child_foreach_recursive(qdev_get_machine(),
    868                                    locate_fuzz_objects,
    869                                    name_pattern->str);
    870        g_string_free(name_pattern, true);
    871    }
    872    g_strfreev(result);
    873    printf("This process will try to fuzz the following MemoryRegions:\n");
    874
    875    g_hash_table_iter_init(&iter, fuzzable_memoryregions);
    876    while (g_hash_table_iter_next(&iter, (gpointer)&mr, NULL)) {
    877        printf("  * %s (size 0x%" PRIx64 ")\n",
    878               object_get_canonical_path_component(&(mr->parent_obj)),
    879               memory_region_size(mr));
    880    }
    881
    882    if (!g_hash_table_size(fuzzable_memoryregions)) {
    883        printf("No fuzzable memory regions found...\n");
    884        exit(1);
    885    }
    886
    887    pcibus = qpci_new_pc(s, NULL);
    888    g_ptr_array_foreach(fuzzable_pci_devices, pci_enum, pcibus);
    889    qpci_free_pc(pcibus);
    890
    891    counter_shm_init();
    892}
    893
    894/*
    895 * When libfuzzer gives us two inputs to combine, return a new input with the
    896 * following structure:
    897 *
    898 * Input 1 (data1)
    899 * SEPARATOR
    900 * Clear out the DMA Patterns
    901 * SEPARATOR
    902 * Disable the pci_read/write instructions
    903 * SEPARATOR
    904 * Input 2 (data2)
    905 *
    906 * The idea is to collate the core behaviors of the two inputs.
    907 * For example:
    908 * Input 1: maps a device's BARs, sets up three DMA patterns, and triggers
    909 *          device functionality A
    910 * Input 2: maps a device's BARs, sets up one DMA pattern, and triggers device
    911 *          functionality B
    912 *
    913 * This function attempts to produce an input that:
    914 * Ouptut: maps a device's BARs, set up three DMA patterns, triggers
    915 *          functionality A device, replaces the DMA patterns with a single
    916 *          patten, and triggers device functionality B.
    917 */
    918static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const
    919                                     uint8_t *data2, size_t size2, uint8_t *out,
    920                                     size_t max_out_size, unsigned int seed)
    921{
    922    size_t copy_len = 0, size = 0;
    923
    924    /* Check that we have enough space for data1 and at least part of data2 */
    925    if (max_out_size <= size1 + strlen(SEPARATOR) * 3 + 2) {
    926        return 0;
    927    }
    928
    929    /* Copy_Len in the first input */
    930    copy_len = size1;
    931    memcpy(out + size, data1, copy_len);
    932    size += copy_len;
    933    max_out_size -= copy_len;
    934
    935    /* Append a separator */
    936    copy_len = strlen(SEPARATOR);
    937    memcpy(out + size, SEPARATOR, copy_len);
    938    size += copy_len;
    939    max_out_size -= copy_len;
    940
    941    /* Clear out the DMA Patterns */
    942    copy_len = 1;
    943    if (copy_len) {
    944        out[size] = OP_CLEAR_DMA_PATTERNS;
    945    }
    946    size += copy_len;
    947    max_out_size -= copy_len;
    948
    949    /* Append a separator */
    950    copy_len = strlen(SEPARATOR);
    951    memcpy(out + size, SEPARATOR, copy_len);
    952    size += copy_len;
    953    max_out_size -= copy_len;
    954
    955    /* Disable PCI ops. Assume data1 took care of setting up PCI */
    956    copy_len = 1;
    957    if (copy_len) {
    958        out[size] = OP_DISABLE_PCI;
    959    }
    960    size += copy_len;
    961    max_out_size -= copy_len;
    962
    963    /* Append a separator */
    964    copy_len = strlen(SEPARATOR);
    965    memcpy(out + size, SEPARATOR, copy_len);
    966    size += copy_len;
    967    max_out_size -= copy_len;
    968
    969    /* Copy_Len over the second input */
    970    copy_len = MIN(size2, max_out_size);
    971    memcpy(out + size, data2, copy_len);
    972    size += copy_len;
    973    max_out_size -= copy_len;
    974
    975    return  size;
    976}
    977
    978
    979static GString *generic_fuzz_cmdline(FuzzTarget *t)
    980{
    981    GString *cmd_line = g_string_new(TARGET_NAME);
    982    if (!getenv("QEMU_FUZZ_ARGS")) {
    983        usage();
    984    }
    985    g_string_append_printf(cmd_line, " -display none \
    986                                      -machine accel=qtest, \
    987                                      -m 512M %s ", getenv("QEMU_FUZZ_ARGS"));
    988    return cmd_line;
    989}
    990
    991static GString *generic_fuzz_predefined_config_cmdline(FuzzTarget *t)
    992{
    993    gchar *args;
    994    const generic_fuzz_config *config;
    995    g_assert(t->opaque);
    996
    997    config = t->opaque;
    998    setenv("QEMU_AVOID_DOUBLE_FETCH", "1", 1);
    999    if (config->argfunc) {
   1000        args = config->argfunc();
   1001        setenv("QEMU_FUZZ_ARGS", args, 1);
   1002        g_free(args);
   1003    } else {
   1004        g_assert_nonnull(config->args);
   1005        setenv("QEMU_FUZZ_ARGS", config->args, 1);
   1006    }
   1007    setenv("QEMU_FUZZ_OBJECTS", config->objects, 1);
   1008    return generic_fuzz_cmdline(t);
   1009}
   1010
   1011static void register_generic_fuzz_targets(void)
   1012{
   1013    fuzz_add_target(&(FuzzTarget){
   1014            .name = "generic-fuzz",
   1015            .description = "Fuzz based on any qemu command-line args. ",
   1016            .get_init_cmdline = generic_fuzz_cmdline,
   1017            .pre_fuzz = generic_pre_fuzz,
   1018            .fuzz = generic_fuzz,
   1019            .crossover = generic_fuzz_crossover
   1020    });
   1021
   1022    GString *name;
   1023    const generic_fuzz_config *config;
   1024
   1025    for (int i = 0;
   1026         i < sizeof(predefined_configs) / sizeof(generic_fuzz_config);
   1027         i++) {
   1028        config = predefined_configs + i;
   1029        name = g_string_new("generic-fuzz");
   1030        g_string_append_printf(name, "-%s", config->name);
   1031        fuzz_add_target(&(FuzzTarget){
   1032                .name = name->str,
   1033                .description = "Predefined generic-fuzz config.",
   1034                .get_init_cmdline = generic_fuzz_predefined_config_cmdline,
   1035                .pre_fuzz = generic_pre_fuzz,
   1036                .fuzz = generic_fuzz,
   1037                .crossover = generic_fuzz_crossover,
   1038                .opaque = (void *)config
   1039        });
   1040    }
   1041}
   1042
   1043fuzz_target_init(register_generic_fuzz_targets);