cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

model.c (9218B)


      1/* Coverity Scan model
      2 *
      3 * Copyright (C) 2014 Red Hat, Inc.
      4 *
      5 * Authors:
      6 *  Markus Armbruster <armbru@redhat.com>
      7 *  Paolo Bonzini <pbonzini@redhat.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or, at your
     10 * option, any later version.  See the COPYING file in the top-level directory.
     11 */
     12
     13
     14/*
     15 * This is the source code for our Coverity user model file.  The
     16 * purpose of user models is to increase scanning accuracy by explaining
     17 * code Coverity can't see (out of tree libraries) or doesn't
     18 * sufficiently understand.  Better accuracy means both fewer false
     19 * positives and more true defects.  Memory leaks in particular.
     20 *
     21 * - A model file can't import any header files.  Some built-in primitives are
     22 *   available but not wchar_t, NULL etc.
     23 * - Modeling doesn't need full structs and typedefs. Rudimentary structs
     24 *   and similar types are sufficient.
     25 * - An uninitialized local variable signifies that the variable could be
     26 *   any value.
     27 *
     28 * The model file must be uploaded by an admin in the analysis settings of
     29 * http://scan.coverity.com/projects/378
     30 */
     31
     32#define NULL ((void *)0)
     33
     34typedef unsigned char uint8_t;
     35typedef char int8_t;
     36typedef unsigned int uint32_t;
     37typedef int int32_t;
     38typedef long ssize_t;
     39typedef unsigned long long uint64_t;
     40typedef long long int64_t;
     41typedef _Bool bool;
     42
     43typedef struct va_list_str *va_list;
     44
     45/* exec.c */
     46
     47typedef struct AddressSpace AddressSpace;
     48typedef struct MemoryRegionCache MemoryRegionCache;
     49typedef uint64_t hwaddr;
     50typedef uint32_t MemTxResult;
     51typedef struct MemTxAttrs {} MemTxAttrs;
     52
     53static void __bufwrite(uint8_t *buf, ssize_t len)
     54{
     55    int first, last;
     56    __coverity_negative_sink__(len);
     57    if (len == 0) return;
     58    buf[0] = first;
     59    buf[len-1] = last;
     60    __coverity_writeall__(buf);
     61}
     62
     63static void __bufread(uint8_t *buf, ssize_t len)
     64{
     65    __coverity_negative_sink__(len);
     66    if (len == 0) return;
     67    int first = buf[0];
     68    int last = buf[len-1];
     69}
     70
     71MemTxResult address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
     72                                      MemTxAttrs attrs,
     73                                      void *buf, int len)
     74{
     75    MemTxResult result;
     76    // TODO: investigate impact of treating reads as producing
     77    // tainted data, with __coverity_tainted_data_argument__(buf).
     78    __bufwrite(buf, len);
     79    return result;
     80}
     81
     82MemTxResult address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
     83                                MemTxAttrs attrs,
     84                                const void *buf, int len)
     85{
     86    MemTxResult result;
     87    __bufread(buf, len);
     88    return result;
     89}
     90
     91MemTxResult address_space_rw_cached(MemoryRegionCache *cache, hwaddr addr,
     92                                    MemTxAttrs attrs,
     93                                    void *buf, int len, bool is_write)
     94{
     95    if (is_write) {
     96        return address_space_write_cached(cache, addr, attrs, buf, len);
     97    } else {
     98        return address_space_read_cached(cache, addr, attrs, buf, len);
     99    }
    100}
    101
    102MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
    103                               MemTxAttrs attrs,
    104                               void *buf, int len)
    105{
    106    MemTxResult result;
    107    // TODO: investigate impact of treating reads as producing
    108    // tainted data, with __coverity_tainted_data_argument__(buf).
    109    __bufwrite(buf, len);
    110    return result;
    111}
    112
    113MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
    114                                MemTxAttrs attrs,
    115                                const void *buf, int len)
    116{
    117    MemTxResult result;
    118    __bufread(buf, len);
    119    return result;
    120}
    121
    122MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
    123                             MemTxAttrs attrs,
    124                             void *buf, int len, bool is_write)
    125{
    126    if (is_write) {
    127        return address_space_write(as, addr, attrs, buf, len);
    128    } else {
    129        return address_space_read(as, addr, attrs, buf, len);
    130    }
    131}
    132
    133/* Tainting */
    134
    135typedef struct {} name2keysym_t;
    136static int get_keysym(const name2keysym_t *table,
    137                      const char *name)
    138{
    139    int result;
    140    if (result > 0) {
    141        __coverity_tainted_string_sanitize_content__(name);
    142        return result;
    143    } else {
    144        return 0;
    145    }
    146}
    147
    148/* Replay data is considered trusted.  */
    149uint8_t replay_get_byte(void)
    150{
    151    uint8_t byte;
    152    return byte;
    153}
    154
    155
    156/*
    157 * GLib memory allocation functions.
    158 *
    159 * Note that we ignore the fact that g_malloc of 0 bytes returns NULL,
    160 * and g_realloc of 0 bytes frees the pointer.
    161 *
    162 * Modeling this would result in Coverity flagging a lot of memory
    163 * allocations as potentially returning NULL, and asking us to check
    164 * whether the result of the allocation is NULL or not.  However, the
    165 * resulting pointer should never be dereferenced anyway, and in fact
    166 * it is not in the vast majority of cases.
    167 *
    168 * If a dereference did happen, this would suppress a defect report
    169 * for an actual null pointer dereference.  But it's too unlikely to
    170 * be worth wading through the false positives, and with some luck
    171 * we'll get a buffer overflow reported anyway.
    172 */
    173
    174/*
    175 * Allocation primitives, cannot return NULL
    176 * See also Coverity's library/generic/libc/all/all.c
    177 */
    178
    179void *g_malloc_n(size_t nmemb, size_t size)
    180{
    181    void *ptr;
    182
    183    __coverity_negative_sink__(nmemb);
    184    __coverity_negative_sink__(size);
    185    ptr = __coverity_alloc__(nmemb * size);
    186    if (!ptr) {
    187        __coverity_panic__();
    188    }
    189    __coverity_mark_as_uninitialized_buffer__(ptr);
    190    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    191    return ptr;
    192}
    193
    194void *g_malloc0_n(size_t nmemb, size_t size)
    195{
    196    void *ptr;
    197
    198    __coverity_negative_sink__(nmemb);
    199    __coverity_negative_sink__(size);
    200    ptr = __coverity_alloc__(nmemb * size);
    201    if (!ptr) {
    202        __coverity_panic__();
    203    }
    204    __coverity_writeall0__(ptr);
    205    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    206    return ptr;
    207}
    208
    209void *g_realloc_n(void *ptr, size_t nmemb, size_t size)
    210{
    211    __coverity_negative_sink__(nmemb);
    212    __coverity_negative_sink__(size);
    213    __coverity_escape__(ptr);
    214    ptr = __coverity_alloc__(nmemb * size);
    215    if (!ptr) {
    216        __coverity_panic__();
    217    }
    218    /*
    219     * Memory beyond the old size isn't actually initialized.  Can't
    220     * model that.  See Coverity's realloc() model
    221     */
    222    __coverity_writeall__(ptr);
    223    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    224    return ptr;
    225}
    226
    227void g_free(void *ptr)
    228{
    229    __coverity_free__(ptr);
    230    __coverity_mark_as_afm_freed__(ptr, AFM_free);
    231}
    232
    233/*
    234 * Derive the g_try_FOO_n() from the g_FOO_n() by adding indeterminate
    235 * out of memory conditions
    236 */
    237
    238void *g_try_malloc_n(size_t nmemb, size_t size)
    239{
    240    int nomem;
    241
    242    if (nomem) {
    243        return NULL;
    244    }
    245    return g_malloc_n(nmemb, size);
    246}
    247
    248void *g_try_malloc0_n(size_t nmemb, size_t size)
    249{
    250    int nomem;
    251
    252    if (nomem) {
    253        return NULL;
    254    }
    255    return g_malloc0_n(nmemb, size);
    256}
    257
    258void *g_try_realloc_n(void *ptr, size_t nmemb, size_t size)
    259{
    260    int nomem;
    261
    262    if (nomem) {
    263        return NULL;
    264    }
    265    return g_realloc_n(ptr, nmemb, size);
    266}
    267
    268/* Derive the g_FOO() from the g_FOO_n() */
    269
    270void *g_malloc(size_t size)
    271{
    272    void *ptr;
    273
    274    __coverity_negative_sink__(size);
    275    ptr = __coverity_alloc__(size);
    276    if (!ptr) {
    277        __coverity_panic__();
    278    }
    279    __coverity_mark_as_uninitialized_buffer__(ptr);
    280    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    281    return ptr;
    282}
    283
    284void *g_malloc0(size_t size)
    285{
    286    void *ptr;
    287
    288    __coverity_negative_sink__(size);
    289    ptr = __coverity_alloc__(size);
    290    if (!ptr) {
    291        __coverity_panic__();
    292    }
    293    __coverity_writeall0__(ptr);
    294    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    295    return ptr;
    296}
    297
    298void *g_realloc(void *ptr, size_t size)
    299{
    300    __coverity_negative_sink__(size);
    301    __coverity_escape__(ptr);
    302    ptr = __coverity_alloc__(size);
    303    if (!ptr) {
    304        __coverity_panic__();
    305    }
    306    /*
    307     * Memory beyond the old size isn't actually initialized.  Can't
    308     * model that.  See Coverity's realloc() model
    309     */
    310    __coverity_writeall__(ptr);
    311    __coverity_mark_as_afm_allocated__(ptr, AFM_free);
    312    return ptr;
    313}
    314
    315void *g_try_malloc(size_t size)
    316{
    317    int nomem;
    318
    319    if (nomem) {
    320        return NULL;
    321    }
    322    return g_malloc(size);
    323}
    324
    325void *g_try_malloc0(size_t size)
    326{
    327    int nomem;
    328
    329    if (nomem) {
    330        return NULL;
    331    }
    332    return g_malloc0(size);
    333}
    334
    335void *g_try_realloc(void *ptr, size_t size)
    336{
    337    int nomem;
    338
    339    if (nomem) {
    340        return NULL;
    341    }
    342    return g_realloc(ptr, size);
    343}
    344
    345/* Other glib functions */
    346
    347typedef struct pollfd GPollFD;
    348
    349int poll();
    350
    351int g_poll (GPollFD *fds, unsigned nfds, int timeout)
    352{
    353    return poll(fds, nfds, timeout);
    354}
    355
    356typedef struct _GIOChannel GIOChannel;
    357GIOChannel *g_io_channel_unix_new(int fd)
    358{
    359    GIOChannel *c = g_malloc0(sizeof(GIOChannel));
    360    __coverity_escape__(fd);
    361    return c;
    362}
    363
    364void g_assertion_message_expr(const char     *domain,
    365                              const char     *file,
    366                              int             line,
    367                              const char     *func,
    368                              const char     *expr)
    369{
    370    __coverity_panic__();
    371}