cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

qed-cluster.c (4664B)


      1/*
      2 * QEMU Enhanced Disk Format Cluster functions
      3 *
      4 * Copyright IBM, Corp. 2010
      5 *
      6 * Authors:
      7 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
      8 *  Anthony Liguori   <aliguori@us.ibm.com>
      9 *
     10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
     11 * See the COPYING.LIB file in the top-level directory.
     12 *
     13 */
     14
     15#include "qemu/osdep.h"
     16#include "qed.h"
     17
     18/**
     19 * Count the number of contiguous data clusters
     20 *
     21 * @s:              QED state
     22 * @table:          L2 table
     23 * @index:          First cluster index
     24 * @n:              Maximum number of clusters
     25 * @offset:         Set to first cluster offset
     26 *
     27 * This function scans tables for contiguous clusters.  A contiguous run of
     28 * clusters may be allocated, unallocated, or zero.
     29 */
     30static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s,
     31                                                  QEDTable *table,
     32                                                  unsigned int index,
     33                                                  unsigned int n,
     34                                                  uint64_t *offset)
     35{
     36    unsigned int end = MIN(index + n, s->table_nelems);
     37    uint64_t last = table->offsets[index];
     38    unsigned int i;
     39
     40    *offset = last;
     41
     42    for (i = index + 1; i < end; i++) {
     43        if (qed_offset_is_unalloc_cluster(last)) {
     44            /* Counting unallocated clusters */
     45            if (!qed_offset_is_unalloc_cluster(table->offsets[i])) {
     46                break;
     47            }
     48        } else if (qed_offset_is_zero_cluster(last)) {
     49            /* Counting zero clusters */
     50            if (!qed_offset_is_zero_cluster(table->offsets[i])) {
     51                break;
     52            }
     53        } else {
     54            /* Counting allocated clusters */
     55            if (table->offsets[i] != last + s->header.cluster_size) {
     56                break;
     57            }
     58            last = table->offsets[i];
     59        }
     60    }
     61    return i - index;
     62}
     63
     64/**
     65 * Find the offset of a data cluster
     66 *
     67 * @s:          QED state
     68 * @request:    L2 cache entry
     69 * @pos:        Byte position in device
     70 * @len:        Number of bytes (may be shortened on return)
     71 * @img_offset: Contains offset in the image file on success
     72 *
     73 * This function translates a position in the block device to an offset in the
     74 * image file. The translated offset or unallocated range in the image file is
     75 * reported back in *img_offset and *len.
     76 *
     77 * If the L2 table exists, request->l2_table points to the L2 table cache entry
     78 * and the caller must free the reference when they are finished.  The cache
     79 * entry is exposed in this way to avoid callers having to read the L2 table
     80 * again later during request processing.  If request->l2_table is non-NULL it
     81 * will be unreferenced before taking on the new cache entry.
     82 *
     83 * On success QED_CLUSTER_FOUND is returned and img_offset/len are a contiguous
     84 * range in the image file.
     85 *
     86 * On failure QED_CLUSTER_L2 or QED_CLUSTER_L1 is returned for missing L2 or L1
     87 * table offset, respectively. len is number of contiguous unallocated bytes.
     88 *
     89 * Called with table_lock held.
     90 */
     91int coroutine_fn qed_find_cluster(BDRVQEDState *s, QEDRequest *request,
     92                                  uint64_t pos, size_t *len,
     93                                  uint64_t *img_offset)
     94{
     95    uint64_t l2_offset;
     96    uint64_t offset = 0;
     97    unsigned int index;
     98    unsigned int n;
     99    int ret;
    100
    101    /* Limit length to L2 boundary.  Requests are broken up at the L2 boundary
    102     * so that a request acts on one L2 table at a time.
    103     */
    104    *len = MIN(*len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos);
    105
    106    l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)];
    107    if (qed_offset_is_unalloc_cluster(l2_offset)) {
    108        *img_offset = 0;
    109        return QED_CLUSTER_L1;
    110    }
    111    if (!qed_check_table_offset(s, l2_offset)) {
    112        *img_offset = *len = 0;
    113        return -EINVAL;
    114    }
    115
    116    ret = qed_read_l2_table(s, request, l2_offset);
    117    if (ret) {
    118        goto out;
    119    }
    120
    121    index = qed_l2_index(s, pos);
    122    n = qed_bytes_to_clusters(s, qed_offset_into_cluster(s, pos) + *len);
    123    n = qed_count_contiguous_clusters(s, request->l2_table->table,
    124                                      index, n, &offset);
    125
    126    if (qed_offset_is_unalloc_cluster(offset)) {
    127        ret = QED_CLUSTER_L2;
    128    } else if (qed_offset_is_zero_cluster(offset)) {
    129        ret = QED_CLUSTER_ZERO;
    130    } else if (qed_check_cluster_offset(s, offset)) {
    131        ret = QED_CLUSTER_FOUND;
    132    } else {
    133        ret = -EINVAL;
    134    }
    135
    136    *len = MIN(*len,
    137               n * s->header.cluster_size - qed_offset_into_cluster(s, pos));
    138
    139out:
    140    *img_offset = offset;
    141    return ret;
    142}