cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

ram_addr.h (17361B)


      1/*
      2 * Declarations for cpu physical memory functions
      3 *
      4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
      5 *
      6 * Authors:
      7 *  Avi Kivity <avi@redhat.com>
      8 *
      9 * This work is licensed under the terms of the GNU GPL, version 2 or
     10 * later.  See the COPYING file in the top-level directory.
     11 *
     12 */
     13
     14/*
     15 * This header is for use by exec.c and memory.c ONLY.  Do not include it.
     16 * The functions declared here will be removed soon.
     17 */
     18
     19#ifndef RAM_ADDR_H
     20#define RAM_ADDR_H
     21
     22#ifndef CONFIG_USER_ONLY
     23#include "cpu.h"
     24#include "sysemu/xen.h"
     25#include "sysemu/tcg.h"
     26#include "exec/ramlist.h"
     27#include "exec/ramblock.h"
     28
     29/**
     30 * clear_bmap_size: calculate clear bitmap size
     31 *
     32 * @pages: number of guest pages
     33 * @shift: guest page number shift
     34 *
     35 * Returns: number of bits for the clear bitmap
     36 */
     37static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
     38{
     39    return DIV_ROUND_UP(pages, 1UL << shift);
     40}
     41
     42/**
     43 * clear_bmap_set: set clear bitmap for the page range
     44 *
     45 * @rb: the ramblock to operate on
     46 * @start: the start page number
     47 * @size: number of pages to set in the bitmap
     48 *
     49 * Returns: None
     50 */
     51static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
     52                                  uint64_t npages)
     53{
     54    uint8_t shift = rb->clear_bmap_shift;
     55
     56    bitmap_set_atomic(rb->clear_bmap, start >> shift,
     57                      clear_bmap_size(npages, shift));
     58}
     59
     60/**
     61 * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
     62 *
     63 * @rb: the ramblock to operate on
     64 * @page: the page number to check
     65 *
     66 * Returns: true if the bit was set, false otherwise
     67 */
     68static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
     69{
     70    uint8_t shift = rb->clear_bmap_shift;
     71
     72    return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
     73}
     74
     75static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
     76{
     77    return (b && b->host && offset < b->used_length) ? true : false;
     78}
     79
     80static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
     81{
     82    assert(offset_in_ramblock(block, offset));
     83    return (char *)block->host + offset;
     84}
     85
     86static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
     87                                                            RAMBlock *rb)
     88{
     89    uint64_t host_addr_offset =
     90            (uint64_t)(uintptr_t)(host_addr - (void *)rb->host);
     91    return host_addr_offset >> TARGET_PAGE_BITS;
     92}
     93
     94bool ramblock_is_pmem(RAMBlock *rb);
     95
     96long qemu_minrampagesize(void);
     97long qemu_maxrampagesize(void);
     98
     99/**
    100 * qemu_ram_alloc_from_file,
    101 * qemu_ram_alloc_from_fd:  Allocate a ram block from the specified backing
    102 *                          file or device
    103 *
    104 * Parameters:
    105 *  @size: the size in bytes of the ram block
    106 *  @mr: the memory region where the ram block is
    107 *  @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM,
    108 *              RAM_NORESERVE.
    109 *  @mem_path or @fd: specify the backing file or device
    110 *  @readonly: true to open @path for reading, false for read/write.
    111 *  @errp: pointer to Error*, to store an error if it happens
    112 *
    113 * Return:
    114 *  On success, return a pointer to the ram block.
    115 *  On failure, return NULL.
    116 */
    117RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
    118                                   uint32_t ram_flags, const char *mem_path,
    119                                   bool readonly, Error **errp);
    120RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
    121                                 uint32_t ram_flags, int fd, off_t offset,
    122                                 bool readonly, Error **errp);
    123
    124RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
    125                                  MemoryRegion *mr, Error **errp);
    126RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags, MemoryRegion *mr,
    127                         Error **errp);
    128RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
    129                                    void (*resized)(const char*,
    130                                                    uint64_t length,
    131                                                    void *host),
    132                                    MemoryRegion *mr, Error **errp);
    133void qemu_ram_free(RAMBlock *block);
    134
    135int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp);
    136
    137void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length);
    138
    139/* Clear whole block of mem */
    140static inline void qemu_ram_block_writeback(RAMBlock *block)
    141{
    142    qemu_ram_msync(block, 0, block->used_length);
    143}
    144
    145#define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
    146#define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
    147
    148void tb_invalidate_phys_range(ram_addr_t start, ram_addr_t end);
    149
    150static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
    151                                                 ram_addr_t length,
    152                                                 unsigned client)
    153{
    154    DirtyMemoryBlocks *blocks;
    155    unsigned long end, page;
    156    unsigned long idx, offset, base;
    157    bool dirty = false;
    158
    159    assert(client < DIRTY_MEMORY_NUM);
    160
    161    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
    162    page = start >> TARGET_PAGE_BITS;
    163
    164    WITH_RCU_READ_LOCK_GUARD() {
    165        blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
    166
    167        idx = page / DIRTY_MEMORY_BLOCK_SIZE;
    168        offset = page % DIRTY_MEMORY_BLOCK_SIZE;
    169        base = page - offset;
    170        while (page < end) {
    171            unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
    172            unsigned long num = next - base;
    173            unsigned long found = find_next_bit(blocks->blocks[idx],
    174                                                num, offset);
    175            if (found < num) {
    176                dirty = true;
    177                break;
    178            }
    179
    180            page = next;
    181            idx++;
    182            offset = 0;
    183            base += DIRTY_MEMORY_BLOCK_SIZE;
    184        }
    185    }
    186
    187    return dirty;
    188}
    189
    190static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
    191                                                 ram_addr_t length,
    192                                                 unsigned client)
    193{
    194    DirtyMemoryBlocks *blocks;
    195    unsigned long end, page;
    196    unsigned long idx, offset, base;
    197    bool dirty = true;
    198
    199    assert(client < DIRTY_MEMORY_NUM);
    200
    201    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
    202    page = start >> TARGET_PAGE_BITS;
    203
    204    RCU_READ_LOCK_GUARD();
    205
    206    blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
    207
    208    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
    209    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
    210    base = page - offset;
    211    while (page < end) {
    212        unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
    213        unsigned long num = next - base;
    214        unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
    215        if (found < num) {
    216            dirty = false;
    217            break;
    218        }
    219
    220        page = next;
    221        idx++;
    222        offset = 0;
    223        base += DIRTY_MEMORY_BLOCK_SIZE;
    224    }
    225
    226    return dirty;
    227}
    228
    229static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
    230                                                      unsigned client)
    231{
    232    return cpu_physical_memory_get_dirty(addr, 1, client);
    233}
    234
    235static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
    236{
    237    bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
    238    bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
    239    bool migration =
    240        cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
    241    return !(vga && code && migration);
    242}
    243
    244static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
    245                                                               ram_addr_t length,
    246                                                               uint8_t mask)
    247{
    248    uint8_t ret = 0;
    249
    250    if (mask & (1 << DIRTY_MEMORY_VGA) &&
    251        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
    252        ret |= (1 << DIRTY_MEMORY_VGA);
    253    }
    254    if (mask & (1 << DIRTY_MEMORY_CODE) &&
    255        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
    256        ret |= (1 << DIRTY_MEMORY_CODE);
    257    }
    258    if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
    259        !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
    260        ret |= (1 << DIRTY_MEMORY_MIGRATION);
    261    }
    262    return ret;
    263}
    264
    265static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
    266                                                      unsigned client)
    267{
    268    unsigned long page, idx, offset;
    269    DirtyMemoryBlocks *blocks;
    270
    271    assert(client < DIRTY_MEMORY_NUM);
    272
    273    page = addr >> TARGET_PAGE_BITS;
    274    idx = page / DIRTY_MEMORY_BLOCK_SIZE;
    275    offset = page % DIRTY_MEMORY_BLOCK_SIZE;
    276
    277    RCU_READ_LOCK_GUARD();
    278
    279    blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
    280
    281    set_bit_atomic(offset, blocks->blocks[idx]);
    282}
    283
    284static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
    285                                                       ram_addr_t length,
    286                                                       uint8_t mask)
    287{
    288    DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
    289    unsigned long end, page;
    290    unsigned long idx, offset, base;
    291    int i;
    292
    293    if (!mask && !xen_enabled()) {
    294        return;
    295    }
    296
    297    end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
    298    page = start >> TARGET_PAGE_BITS;
    299
    300    WITH_RCU_READ_LOCK_GUARD() {
    301        for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
    302            blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
    303        }
    304
    305        idx = page / DIRTY_MEMORY_BLOCK_SIZE;
    306        offset = page % DIRTY_MEMORY_BLOCK_SIZE;
    307        base = page - offset;
    308        while (page < end) {
    309            unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
    310
    311            if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
    312                bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
    313                                  offset, next - page);
    314            }
    315            if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
    316                bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
    317                                  offset, next - page);
    318            }
    319            if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
    320                bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
    321                                  offset, next - page);
    322            }
    323
    324            page = next;
    325            idx++;
    326            offset = 0;
    327            base += DIRTY_MEMORY_BLOCK_SIZE;
    328        }
    329    }
    330
    331    xen_hvm_modified_memory(start, length);
    332}
    333
    334#if !defined(_WIN32)
    335static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
    336                                                          ram_addr_t start,
    337                                                          ram_addr_t pages)
    338{
    339    unsigned long i, j;
    340    unsigned long page_number, c;
    341    hwaddr addr;
    342    ram_addr_t ram_addr;
    343    unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
    344    unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
    345    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
    346
    347    /* start address is aligned at the start of a word? */
    348    if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
    349        (hpratio == 1)) {
    350        unsigned long **blocks[DIRTY_MEMORY_NUM];
    351        unsigned long idx;
    352        unsigned long offset;
    353        long k;
    354        long nr = BITS_TO_LONGS(pages);
    355
    356        idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
    357        offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
    358                          DIRTY_MEMORY_BLOCK_SIZE);
    359
    360        WITH_RCU_READ_LOCK_GUARD() {
    361            for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
    362                blocks[i] =
    363                    qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
    364            }
    365
    366            for (k = 0; k < nr; k++) {
    367                if (bitmap[k]) {
    368                    unsigned long temp = leul_to_cpu(bitmap[k]);
    369
    370                    qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
    371
    372                    if (global_dirty_log) {
    373                        qatomic_or(
    374                                &blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
    375                                temp);
    376                    }
    377
    378                    if (tcg_enabled()) {
    379                        qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
    380                                   temp);
    381                    }
    382                }
    383
    384                if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
    385                    offset = 0;
    386                    idx++;
    387                }
    388            }
    389        }
    390
    391        xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
    392    } else {
    393        uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
    394
    395        if (!global_dirty_log) {
    396            clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
    397        }
    398
    399        /*
    400         * bitmap-traveling is faster than memory-traveling (for addr...)
    401         * especially when most of the memory is not dirty.
    402         */
    403        for (i = 0; i < len; i++) {
    404            if (bitmap[i] != 0) {
    405                c = leul_to_cpu(bitmap[i]);
    406                do {
    407                    j = ctzl(c);
    408                    c &= ~(1ul << j);
    409                    page_number = (i * HOST_LONG_BITS + j) * hpratio;
    410                    addr = page_number * TARGET_PAGE_SIZE;
    411                    ram_addr = start + addr;
    412                    cpu_physical_memory_set_dirty_range(ram_addr,
    413                                       TARGET_PAGE_SIZE * hpratio, clients);
    414                } while (c != 0);
    415            }
    416        }
    417    }
    418}
    419#endif /* not _WIN32 */
    420
    421bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
    422                                              ram_addr_t length,
    423                                              unsigned client);
    424
    425DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
    426    (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
    427
    428bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
    429                                            ram_addr_t start,
    430                                            ram_addr_t length);
    431
    432static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
    433                                                         ram_addr_t length)
    434{
    435    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
    436    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
    437    cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
    438}
    439
    440
    441/* Called with RCU critical section */
    442static inline
    443uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
    444                                               ram_addr_t start,
    445                                               ram_addr_t length)
    446{
    447    ram_addr_t addr;
    448    unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
    449    uint64_t num_dirty = 0;
    450    unsigned long *dest = rb->bmap;
    451
    452    /* start address and length is aligned at the start of a word? */
    453    if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
    454         (start + rb->offset) &&
    455        !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
    456        int k;
    457        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
    458        unsigned long * const *src;
    459        unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
    460        unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
    461                                        DIRTY_MEMORY_BLOCK_SIZE);
    462        unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
    463
    464        src = qatomic_rcu_read(
    465                &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
    466
    467        for (k = page; k < page + nr; k++) {
    468            if (src[idx][offset]) {
    469                unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
    470                unsigned long new_dirty;
    471                new_dirty = ~dest[k];
    472                dest[k] |= bits;
    473                new_dirty &= bits;
    474                num_dirty += ctpopl(new_dirty);
    475            }
    476
    477            if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
    478                offset = 0;
    479                idx++;
    480            }
    481        }
    482
    483        if (rb->clear_bmap) {
    484            /*
    485             * Postpone the dirty bitmap clear to the point before we
    486             * really send the pages, also we will split the clear
    487             * dirty procedure into smaller chunks.
    488             */
    489            clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
    490                           length >> TARGET_PAGE_BITS);
    491        } else {
    492            /* Slow path - still do that in a huge chunk */
    493            memory_region_clear_dirty_bitmap(rb->mr, start, length);
    494        }
    495    } else {
    496        ram_addr_t offset = rb->offset;
    497
    498        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
    499            if (cpu_physical_memory_test_and_clear_dirty(
    500                        start + addr + offset,
    501                        TARGET_PAGE_SIZE,
    502                        DIRTY_MEMORY_MIGRATION)) {
    503                long k = (start + addr) >> TARGET_PAGE_BITS;
    504                if (!test_and_set_bit(k, dest)) {
    505                    num_dirty++;
    506                }
    507            }
    508        }
    509    }
    510
    511    return num_dirty;
    512}
    513#endif
    514#endif