cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

qed.h (10194B)


      1/*
      2 * QEMU Enhanced Disk Format
      3 *
      4 * Copyright IBM, Corp. 2010
      5 *
      6 * Authors:
      7 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
      8 *  Anthony Liguori   <aliguori@us.ibm.com>
      9 *
     10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
     11 * See the COPYING.LIB file in the top-level directory.
     12 *
     13 */
     14
     15#ifndef BLOCK_QED_H
     16#define BLOCK_QED_H
     17
     18#include "block/block_int.h"
     19#include "qemu/cutils.h"
     20
     21/* The layout of a QED file is as follows:
     22 *
     23 * +--------+----------+----------+----------+-----+
     24 * | header | L1 table | cluster0 | cluster1 | ... |
     25 * +--------+----------+----------+----------+-----+
     26 *
     27 * There is a 2-level pagetable for cluster allocation:
     28 *
     29 *                     +----------+
     30 *                     | L1 table |
     31 *                     +----------+
     32 *                ,------'  |  '------.
     33 *           +----------+   |    +----------+
     34 *           | L2 table |  ...   | L2 table |
     35 *           +----------+        +----------+
     36 *       ,------'  |  '------.
     37 *  +----------+   |    +----------+
     38 *  |   Data   |  ...   |   Data   |
     39 *  +----------+        +----------+
     40 *
     41 * The L1 table is fixed size and always present.  L2 tables are allocated on
     42 * demand.  The L1 table size determines the maximum possible image size; it
     43 * can be influenced using the cluster_size and table_size values.
     44 *
     45 * All fields are little-endian on disk.
     46 */
     47#define  QED_DEFAULT_CLUSTER_SIZE  65536
     48enum {
     49    QED_MAGIC = 'Q' | 'E' << 8 | 'D' << 16 | '\0' << 24,
     50
     51    /* The image supports a backing file */
     52    QED_F_BACKING_FILE = 0x01,
     53
     54    /* The image needs a consistency check before use */
     55    QED_F_NEED_CHECK = 0x02,
     56
     57    /* The backing file format must not be probed, treat as raw image */
     58    QED_F_BACKING_FORMAT_NO_PROBE = 0x04,
     59
     60    /* Feature bits must be used when the on-disk format changes */
     61    QED_FEATURE_MASK = QED_F_BACKING_FILE | /* supported feature bits */
     62                       QED_F_NEED_CHECK |
     63                       QED_F_BACKING_FORMAT_NO_PROBE,
     64    QED_COMPAT_FEATURE_MASK = 0,            /* supported compat feature bits */
     65    QED_AUTOCLEAR_FEATURE_MASK = 0,         /* supported autoclear feature bits */
     66
     67    /* Data is stored in groups of sectors called clusters.  Cluster size must
     68     * be large to avoid keeping too much metadata.  I/O requests that have
     69     * sub-cluster size will require read-modify-write.
     70     */
     71    QED_MIN_CLUSTER_SIZE = 4 * 1024, /* in bytes */
     72    QED_MAX_CLUSTER_SIZE = 64 * 1024 * 1024,
     73
     74    /* Allocated clusters are tracked using a 2-level pagetable.  Table size is
     75     * a multiple of clusters so large maximum image sizes can be supported
     76     * without jacking up the cluster size too much.
     77     */
     78    QED_MIN_TABLE_SIZE = 1,        /* in clusters */
     79    QED_MAX_TABLE_SIZE = 16,
     80    QED_DEFAULT_TABLE_SIZE = 4,
     81
     82    /* Delay to flush and clean image after last allocating write completes */
     83    QED_NEED_CHECK_TIMEOUT = 5,    /* in seconds */
     84};
     85
     86typedef struct {
     87    uint32_t magic;                 /* QED\0 */
     88
     89    uint32_t cluster_size;          /* in bytes */
     90    uint32_t table_size;            /* for L1 and L2 tables, in clusters */
     91    uint32_t header_size;           /* in clusters */
     92
     93    uint64_t features;              /* format feature bits */
     94    uint64_t compat_features;       /* compatible feature bits */
     95    uint64_t autoclear_features;    /* self-resetting feature bits */
     96
     97    uint64_t l1_table_offset;       /* in bytes */
     98    uint64_t image_size;            /* total logical image size, in bytes */
     99
    100    /* if (features & QED_F_BACKING_FILE) */
    101    uint32_t backing_filename_offset; /* in bytes from start of header */
    102    uint32_t backing_filename_size;   /* in bytes */
    103} QEMU_PACKED QEDHeader;
    104
    105typedef struct {
    106    uint64_t offsets[0];            /* in bytes */
    107} QEDTable;
    108
    109/* The L2 cache is a simple write-through cache for L2 structures */
    110typedef struct CachedL2Table {
    111    QEDTable *table;
    112    uint64_t offset;    /* offset=0 indicates an invalidate entry */
    113    QTAILQ_ENTRY(CachedL2Table) node;
    114    int ref;
    115} CachedL2Table;
    116
    117typedef struct {
    118    QTAILQ_HEAD(, CachedL2Table) entries;
    119    unsigned int n_entries;
    120} L2TableCache;
    121
    122typedef struct QEDRequest {
    123    CachedL2Table *l2_table;
    124} QEDRequest;
    125
    126enum {
    127    QED_AIOCB_WRITE = 0x0001,       /* read or write? */
    128    QED_AIOCB_ZERO  = 0x0002,       /* zero write, used with QED_AIOCB_WRITE */
    129};
    130
    131typedef struct QEDAIOCB {
    132    BlockDriverState *bs;
    133    QSIMPLEQ_ENTRY(QEDAIOCB) next;  /* next request */
    134    int flags;                      /* QED_AIOCB_* bits ORed together */
    135    uint64_t end_pos;               /* request end on block device, in bytes */
    136
    137    /* User scatter-gather list */
    138    QEMUIOVector *qiov;
    139    size_t qiov_offset;             /* byte count already processed */
    140
    141    /* Current cluster scatter-gather list */
    142    QEMUIOVector cur_qiov;
    143    uint64_t cur_pos;               /* position on block device, in bytes */
    144    uint64_t cur_cluster;           /* cluster offset in image file */
    145    unsigned int cur_nclusters;     /* number of clusters being accessed */
    146    int find_cluster_ret;           /* used for L1/L2 update */
    147
    148    QEDRequest request;
    149} QEDAIOCB;
    150
    151typedef struct {
    152    BlockDriverState *bs;           /* device */
    153
    154    /* Written only by an allocating write or the timer handler (the latter
    155     * while allocating reqs are plugged).
    156     */
    157    QEDHeader header;               /* always cpu-endian */
    158
    159    /* Protected by table_lock.  */
    160    CoMutex table_lock;
    161    QEDTable *l1_table;
    162    L2TableCache l2_cache;          /* l2 table cache */
    163    uint32_t table_nelems;
    164    uint32_t l1_shift;
    165    uint32_t l2_shift;
    166    uint32_t l2_mask;
    167    uint64_t file_size;             /* length of image file, in bytes */
    168
    169    /* Allocating write request queue */
    170    QEDAIOCB *allocating_acb;
    171    CoQueue allocating_write_reqs;
    172    bool allocating_write_reqs_plugged;
    173
    174    /* Periodic flush and clear need check flag */
    175    QEMUTimer *need_check_timer;
    176} BDRVQEDState;
    177
    178enum {
    179    QED_CLUSTER_FOUND,         /* cluster found */
    180    QED_CLUSTER_ZERO,          /* zero cluster found */
    181    QED_CLUSTER_L2,            /* cluster missing in L2 */
    182    QED_CLUSTER_L1,            /* cluster missing in L1 */
    183};
    184
    185/**
    186 * Header functions
    187 */
    188int qed_write_header_sync(BDRVQEDState *s);
    189
    190/**
    191 * L2 cache functions
    192 */
    193void qed_init_l2_cache(L2TableCache *l2_cache);
    194void qed_free_l2_cache(L2TableCache *l2_cache);
    195CachedL2Table *qed_alloc_l2_cache_entry(L2TableCache *l2_cache);
    196void qed_unref_l2_cache_entry(CachedL2Table *entry);
    197CachedL2Table *qed_find_l2_cache_entry(L2TableCache *l2_cache, uint64_t offset);
    198void qed_commit_l2_cache_entry(L2TableCache *l2_cache, CachedL2Table *l2_table);
    199
    200/**
    201 * Table I/O functions
    202 */
    203int coroutine_fn qed_read_l1_table_sync(BDRVQEDState *s);
    204int coroutine_fn qed_write_l1_table(BDRVQEDState *s, unsigned int index,
    205                                    unsigned int n);
    206int coroutine_fn qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index,
    207                                         unsigned int n);
    208int coroutine_fn qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
    209                                        uint64_t offset);
    210int coroutine_fn qed_read_l2_table(BDRVQEDState *s, QEDRequest *request,
    211                                   uint64_t offset);
    212int coroutine_fn qed_write_l2_table(BDRVQEDState *s, QEDRequest *request,
    213                                    unsigned int index, unsigned int n,
    214                                    bool flush);
    215int coroutine_fn qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request,
    216                                         unsigned int index, unsigned int n,
    217                                         bool flush);
    218
    219/**
    220 * Cluster functions
    221 */
    222int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
    223                                  uint64_t pos, size_t *len,
    224                                  uint64_t *img_offset);
    225
    226/**
    227 * Consistency check
    228 */
    229int coroutine_fn qed_check(BDRVQEDState *s, BdrvCheckResult *result, bool fix);
    230
    231QEDTable *qed_alloc_table(BDRVQEDState *s);
    232
    233/**
    234 * Round down to the start of a cluster
    235 */
    236static inline uint64_t qed_start_of_cluster(BDRVQEDState *s, uint64_t offset)
    237{
    238    return offset & ~(uint64_t)(s->header.cluster_size - 1);
    239}
    240
    241static inline uint64_t qed_offset_into_cluster(BDRVQEDState *s, uint64_t offset)
    242{
    243    return offset & (s->header.cluster_size - 1);
    244}
    245
    246static inline uint64_t qed_bytes_to_clusters(BDRVQEDState *s, uint64_t bytes)
    247{
    248    return qed_start_of_cluster(s, bytes + (s->header.cluster_size - 1)) /
    249           (s->header.cluster_size - 1);
    250}
    251
    252static inline unsigned int qed_l1_index(BDRVQEDState *s, uint64_t pos)
    253{
    254    return pos >> s->l1_shift;
    255}
    256
    257static inline unsigned int qed_l2_index(BDRVQEDState *s, uint64_t pos)
    258{
    259    return (pos >> s->l2_shift) & s->l2_mask;
    260}
    261
    262/**
    263 * Test if a cluster offset is valid
    264 */
    265static inline bool qed_check_cluster_offset(BDRVQEDState *s, uint64_t offset)
    266{
    267    uint64_t header_size = (uint64_t)s->header.header_size *
    268                           s->header.cluster_size;
    269
    270    if (offset & (s->header.cluster_size - 1)) {
    271        return false;
    272    }
    273    return offset >= header_size && offset < s->file_size;
    274}
    275
    276/**
    277 * Test if a table offset is valid
    278 */
    279static inline bool qed_check_table_offset(BDRVQEDState *s, uint64_t offset)
    280{
    281    uint64_t end_offset = offset + (s->header.table_size - 1) *
    282                          s->header.cluster_size;
    283
    284    /* Overflow check */
    285    if (end_offset <= offset) {
    286        return false;
    287    }
    288
    289    return qed_check_cluster_offset(s, offset) &&
    290           qed_check_cluster_offset(s, end_offset);
    291}
    292
    293static inline bool qed_offset_is_cluster_aligned(BDRVQEDState *s,
    294                                                 uint64_t offset)
    295{
    296    if (qed_offset_into_cluster(s, offset)) {
    297        return false;
    298    }
    299    return true;
    300}
    301
    302static inline bool qed_offset_is_unalloc_cluster(uint64_t offset)
    303{
    304    if (offset == 0) {
    305        return true;
    306    }
    307    return false;
    308}
    309
    310static inline bool qed_offset_is_zero_cluster(uint64_t offset)
    311{
    312    if (offset == 1) {
    313        return true;
    314    }
    315    return false;
    316}
    317
    318#endif /* BLOCK_QED_H */