cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

memory_mapping.c (11500B)


      1/*
      2 * QEMU memory mapping
      3 *
      4 * Copyright Fujitsu, Corp. 2011, 2012
      5 *
      6 * Authors:
      7 *     Wen Congyang <wency@cn.fujitsu.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     10 * See the COPYING file in the top-level directory.
     11 *
     12 */
     13
     14#include "qemu/osdep.h"
     15#include "qapi/error.h"
     16
     17#include "sysemu/memory_mapping.h"
     18#include "exec/memory.h"
     19#include "exec/address-spaces.h"
     20
     21//#define DEBUG_GUEST_PHYS_REGION_ADD
     22
     23static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
     24                                                   MemoryMapping *mapping)
     25{
     26    MemoryMapping *p;
     27
     28    QTAILQ_FOREACH(p, &list->head, next) {
     29        if (p->phys_addr >= mapping->phys_addr) {
     30            QTAILQ_INSERT_BEFORE(p, mapping, next);
     31            return;
     32        }
     33    }
     34    QTAILQ_INSERT_TAIL(&list->head, mapping, next);
     35}
     36
     37static void create_new_memory_mapping(MemoryMappingList *list,
     38                                      hwaddr phys_addr,
     39                                      hwaddr virt_addr,
     40                                      ram_addr_t length)
     41{
     42    MemoryMapping *memory_mapping;
     43
     44    memory_mapping = g_malloc(sizeof(MemoryMapping));
     45    memory_mapping->phys_addr = phys_addr;
     46    memory_mapping->virt_addr = virt_addr;
     47    memory_mapping->length = length;
     48    list->last_mapping = memory_mapping;
     49    list->num++;
     50    memory_mapping_list_add_mapping_sorted(list, memory_mapping);
     51}
     52
     53static inline bool mapping_contiguous(MemoryMapping *map,
     54                                      hwaddr phys_addr,
     55                                      hwaddr virt_addr)
     56{
     57    return phys_addr == map->phys_addr + map->length &&
     58           virt_addr == map->virt_addr + map->length;
     59}
     60
     61/*
     62 * [map->phys_addr, map->phys_addr + map->length) and
     63 * [phys_addr, phys_addr + length) have intersection?
     64 */
     65static inline bool mapping_have_same_region(MemoryMapping *map,
     66                                            hwaddr phys_addr,
     67                                            ram_addr_t length)
     68{
     69    return !(phys_addr + length < map->phys_addr ||
     70             phys_addr >= map->phys_addr + map->length);
     71}
     72
     73/*
     74 * [map->phys_addr, map->phys_addr + map->length) and
     75 * [phys_addr, phys_addr + length) have intersection. The virtual address in the
     76 * intersection are the same?
     77 */
     78static inline bool mapping_conflict(MemoryMapping *map,
     79                                    hwaddr phys_addr,
     80                                    hwaddr virt_addr)
     81{
     82    return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
     83}
     84
     85/*
     86 * [map->virt_addr, map->virt_addr + map->length) and
     87 * [virt_addr, virt_addr + length) have intersection. And the physical address
     88 * in the intersection are the same.
     89 */
     90static inline void mapping_merge(MemoryMapping *map,
     91                                 hwaddr virt_addr,
     92                                 ram_addr_t length)
     93{
     94    if (virt_addr < map->virt_addr) {
     95        map->length += map->virt_addr - virt_addr;
     96        map->virt_addr = virt_addr;
     97    }
     98
     99    if ((virt_addr + length) >
    100        (map->virt_addr + map->length)) {
    101        map->length = virt_addr + length - map->virt_addr;
    102    }
    103}
    104
    105void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
    106                                          hwaddr phys_addr,
    107                                          hwaddr virt_addr,
    108                                          ram_addr_t length)
    109{
    110    MemoryMapping *memory_mapping, *last_mapping;
    111
    112    if (QTAILQ_EMPTY(&list->head)) {
    113        create_new_memory_mapping(list, phys_addr, virt_addr, length);
    114        return;
    115    }
    116
    117    last_mapping = list->last_mapping;
    118    if (last_mapping) {
    119        if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
    120            last_mapping->length += length;
    121            return;
    122        }
    123    }
    124
    125    QTAILQ_FOREACH(memory_mapping, &list->head, next) {
    126        if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
    127            memory_mapping->length += length;
    128            list->last_mapping = memory_mapping;
    129            return;
    130        }
    131
    132        if (phys_addr + length < memory_mapping->phys_addr) {
    133            /* create a new region before memory_mapping */
    134            break;
    135        }
    136
    137        if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
    138            if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
    139                continue;
    140            }
    141
    142            /* merge this region into memory_mapping */
    143            mapping_merge(memory_mapping, virt_addr, length);
    144            list->last_mapping = memory_mapping;
    145            return;
    146        }
    147    }
    148
    149    /* this region can not be merged into any existed memory mapping. */
    150    create_new_memory_mapping(list, phys_addr, virt_addr, length);
    151}
    152
    153void memory_mapping_list_free(MemoryMappingList *list)
    154{
    155    MemoryMapping *p, *q;
    156
    157    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
    158        QTAILQ_REMOVE(&list->head, p, next);
    159        g_free(p);
    160    }
    161
    162    list->num = 0;
    163    list->last_mapping = NULL;
    164}
    165
    166void memory_mapping_list_init(MemoryMappingList *list)
    167{
    168    list->num = 0;
    169    list->last_mapping = NULL;
    170    QTAILQ_INIT(&list->head);
    171}
    172
    173void guest_phys_blocks_free(GuestPhysBlockList *list)
    174{
    175    GuestPhysBlock *p, *q;
    176
    177    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
    178        QTAILQ_REMOVE(&list->head, p, next);
    179        memory_region_unref(p->mr);
    180        g_free(p);
    181    }
    182    list->num = 0;
    183}
    184
    185void guest_phys_blocks_init(GuestPhysBlockList *list)
    186{
    187    list->num = 0;
    188    QTAILQ_INIT(&list->head);
    189}
    190
    191typedef struct GuestPhysListener {
    192    GuestPhysBlockList *list;
    193    MemoryListener listener;
    194} GuestPhysListener;
    195
    196static void guest_phys_block_add_section(GuestPhysListener *g,
    197                                         MemoryRegionSection *section)
    198{
    199    const hwaddr target_start = section->offset_within_address_space;
    200    const hwaddr target_end = target_start + int128_get64(section->size);
    201    uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) +
    202                         section->offset_within_region;
    203    GuestPhysBlock *predecessor = NULL;
    204
    205    /* find continuity in guest physical address space */
    206    if (!QTAILQ_EMPTY(&g->list->head)) {
    207        hwaddr predecessor_size;
    208
    209        predecessor = QTAILQ_LAST(&g->list->head);
    210        predecessor_size = predecessor->target_end - predecessor->target_start;
    211
    212        /* the memory API guarantees monotonically increasing traversal */
    213        g_assert(predecessor->target_end <= target_start);
    214
    215        /* we want continuity in both guest-physical and host-virtual memory */
    216        if (predecessor->target_end < target_start ||
    217            predecessor->host_addr + predecessor_size != host_addr ||
    218            predecessor->mr != section->mr) {
    219            predecessor = NULL;
    220        }
    221    }
    222
    223    if (predecessor == NULL) {
    224        /* isolated mapping, allocate it and add it to the list */
    225        GuestPhysBlock *block = g_malloc0(sizeof *block);
    226
    227        block->target_start = target_start;
    228        block->target_end   = target_end;
    229        block->host_addr    = host_addr;
    230        block->mr           = section->mr;
    231        memory_region_ref(section->mr);
    232
    233        QTAILQ_INSERT_TAIL(&g->list->head, block, next);
    234        ++g->list->num;
    235    } else {
    236        /* expand predecessor until @target_end; predecessor's start doesn't
    237         * change
    238         */
    239        predecessor->target_end = target_end;
    240    }
    241
    242#ifdef DEBUG_GUEST_PHYS_REGION_ADD
    243    fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
    244            TARGET_FMT_plx ": %s (count: %u)\n", __func__, target_start,
    245            target_end, predecessor ? "joined" : "added", g->list->num);
    246#endif
    247}
    248
    249static int guest_phys_ram_populate_cb(MemoryRegionSection *section,
    250                                      void *opaque)
    251{
    252    GuestPhysListener *g = opaque;
    253
    254    guest_phys_block_add_section(g, section);
    255    return 0;
    256}
    257
    258static void guest_phys_blocks_region_add(MemoryListener *listener,
    259                                         MemoryRegionSection *section)
    260{
    261    GuestPhysListener *g = container_of(listener, GuestPhysListener, listener);
    262
    263    /* we only care about RAM */
    264    if (!memory_region_is_ram(section->mr) ||
    265        memory_region_is_ram_device(section->mr) ||
    266        memory_region_is_nonvolatile(section->mr)) {
    267        return;
    268    }
    269
    270    /* for special sparse regions, only add populated parts */
    271    if (memory_region_has_ram_discard_manager(section->mr)) {
    272        RamDiscardManager *rdm;
    273
    274        rdm = memory_region_get_ram_discard_manager(section->mr);
    275        ram_discard_manager_replay_populated(rdm, section,
    276                                             guest_phys_ram_populate_cb, g);
    277        return;
    278    }
    279
    280    guest_phys_block_add_section(g, section);
    281}
    282
    283void guest_phys_blocks_append(GuestPhysBlockList *list)
    284{
    285    GuestPhysListener g = { 0 };
    286
    287    g.list = list;
    288    g.listener.region_add = &guest_phys_blocks_region_add;
    289    memory_listener_register(&g.listener, &address_space_memory);
    290    memory_listener_unregister(&g.listener);
    291}
    292
    293static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
    294{
    295    CPUState *cpu;
    296
    297    CPU_FOREACH(cpu) {
    298        if (cpu_paging_enabled(cpu)) {
    299            return cpu;
    300        }
    301    }
    302
    303    return NULL;
    304}
    305
    306void qemu_get_guest_memory_mapping(MemoryMappingList *list,
    307                                   const GuestPhysBlockList *guest_phys_blocks,
    308                                   Error **errp)
    309{
    310    CPUState *cpu, *first_paging_enabled_cpu;
    311    GuestPhysBlock *block;
    312    ram_addr_t offset, length;
    313
    314    first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
    315    if (first_paging_enabled_cpu) {
    316        for (cpu = first_paging_enabled_cpu; cpu != NULL;
    317             cpu = CPU_NEXT(cpu)) {
    318            Error *err = NULL;
    319            cpu_get_memory_mapping(cpu, list, &err);
    320            if (err) {
    321                error_propagate(errp, err);
    322                return;
    323            }
    324        }
    325        return;
    326    }
    327
    328    /*
    329     * If the guest doesn't use paging, the virtual address is equal to physical
    330     * address.
    331     */
    332    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
    333        offset = block->target_start;
    334        length = block->target_end - block->target_start;
    335        create_new_memory_mapping(list, offset, offset, length);
    336    }
    337}
    338
    339void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
    340                                   const GuestPhysBlockList *guest_phys_blocks)
    341{
    342    GuestPhysBlock *block;
    343
    344    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
    345        create_new_memory_mapping(list, block->target_start, 0,
    346                                  block->target_end - block->target_start);
    347    }
    348}
    349
    350void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
    351                           int64_t length)
    352{
    353    MemoryMapping *cur, *next;
    354
    355    QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
    356        if (cur->phys_addr >= begin + length ||
    357            cur->phys_addr + cur->length <= begin) {
    358            QTAILQ_REMOVE(&list->head, cur, next);
    359            g_free(cur);
    360            list->num--;
    361            continue;
    362        }
    363
    364        if (cur->phys_addr < begin) {
    365            cur->length -= begin - cur->phys_addr;
    366            if (cur->virt_addr) {
    367                cur->virt_addr += begin - cur->phys_addr;
    368            }
    369            cur->phys_addr = begin;
    370        }
    371
    372        if (cur->phys_addr + cur->length > begin + length) {
    373            cur->length -= cur->phys_addr + cur->length - begin - length;
    374        }
    375    }
    376}