cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

stats64.h (5233B)


      1/*
      2 * Atomic operations on 64-bit quantities.
      3 *
      4 * Copyright (C) 2017 Red Hat, Inc.
      5 *
      6 * Author: Paolo Bonzini <pbonzini@redhat.com>
      7 *
      8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
      9 * See the COPYING file in the top-level directory.
     10 */
     11
     12#ifndef QEMU_STATS64_H
     13#define QEMU_STATS64_H
     14
     15#include "qemu/atomic.h"
     16
     17/* This provides atomic operations on 64-bit type, using a reader-writer
     18 * spinlock on architectures that do not have 64-bit accesses.  Even on
     19 * those architectures, it tries hard not to take the lock.
     20 */
     21
     22typedef struct Stat64 {
     23#ifdef CONFIG_ATOMIC64
     24    aligned_uint64_t value;
     25#else
     26    uint32_t low, high;
     27    uint32_t lock;
     28#endif
     29} Stat64;
     30
     31#ifdef CONFIG_ATOMIC64
     32static inline void stat64_init(Stat64 *s, uint64_t value)
     33{
     34    /* This is not guaranteed to be atomic! */
     35    *s = (Stat64) { value };
     36}
     37
     38static inline uint64_t stat64_get(const Stat64 *s)
     39{
     40    return qatomic_read__nocheck(&s->value);
     41}
     42
     43static inline void stat64_add(Stat64 *s, uint64_t value)
     44{
     45    qatomic_add(&s->value, value);
     46}
     47
     48static inline void stat64_min(Stat64 *s, uint64_t value)
     49{
     50    uint64_t orig = qatomic_read__nocheck(&s->value);
     51    while (orig > value) {
     52        orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
     53    }
     54}
     55
     56static inline void stat64_max(Stat64 *s, uint64_t value)
     57{
     58    uint64_t orig = qatomic_read__nocheck(&s->value);
     59    while (orig < value) {
     60        orig = qatomic_cmpxchg__nocheck(&s->value, orig, value);
     61    }
     62}
     63#else
     64uint64_t stat64_get(const Stat64 *s);
     65bool stat64_min_slow(Stat64 *s, uint64_t value);
     66bool stat64_max_slow(Stat64 *s, uint64_t value);
     67bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high);
     68
     69static inline void stat64_init(Stat64 *s, uint64_t value)
     70{
     71    /* This is not guaranteed to be atomic! */
     72    *s = (Stat64) { .low = value, .high = value >> 32, .lock = 0 };
     73}
     74
     75static inline void stat64_add(Stat64 *s, uint64_t value)
     76{
     77    uint32_t low, high;
     78    high = value >> 32;
     79    low = (uint32_t) value;
     80    if (!low) {
     81        if (high) {
     82            qatomic_add(&s->high, high);
     83        }
     84        return;
     85    }
     86
     87    for (;;) {
     88        uint32_t orig = s->low;
     89        uint32_t result = orig + low;
     90        uint32_t old;
     91
     92        if (result < low || high) {
     93            /* If the high part is affected, take the lock.  */
     94            if (stat64_add32_carry(s, low, high)) {
     95                return;
     96            }
     97            continue;
     98        }
     99
    100        /* No carry, try with a 32-bit cmpxchg.  The result is independent of
    101         * the high 32 bits, so it can race just fine with stat64_add32_carry
    102         * and even stat64_get!
    103         */
    104        old = qatomic_cmpxchg(&s->low, orig, result);
    105        if (orig == old) {
    106            return;
    107        }
    108    }
    109}
    110
    111static inline void stat64_min(Stat64 *s, uint64_t value)
    112{
    113    uint32_t low, high;
    114    uint32_t orig_low, orig_high;
    115
    116    high = value >> 32;
    117    low = (uint32_t) value;
    118    do {
    119        orig_high = qatomic_read(&s->high);
    120        if (orig_high < high) {
    121            return;
    122        }
    123
    124        if (orig_high == high) {
    125            /* High 32 bits are equal.  Read low after high, otherwise we
    126             * can get a false positive (e.g. 0x1235,0x0000 changes to
    127             * 0x1234,0x8000 and we read it as 0x1234,0x0000). Pairs with
    128             * the write barrier in stat64_min_slow.
    129             */
    130            smp_rmb();
    131            orig_low = qatomic_read(&s->low);
    132            if (orig_low <= low) {
    133                return;
    134            }
    135
    136            /* See if we were lucky and a writer raced against us.  The
    137             * barrier is theoretically unnecessary, but if we remove it
    138             * we may miss being lucky.
    139             */
    140            smp_rmb();
    141            orig_high = qatomic_read(&s->high);
    142            if (orig_high < high) {
    143                return;
    144            }
    145        }
    146
    147        /* If the value changes in any way, we have to take the lock.  */
    148    } while (!stat64_min_slow(s, value));
    149}
    150
    151static inline void stat64_max(Stat64 *s, uint64_t value)
    152{
    153    uint32_t low, high;
    154    uint32_t orig_low, orig_high;
    155
    156    high = value >> 32;
    157    low = (uint32_t) value;
    158    do {
    159        orig_high = qatomic_read(&s->high);
    160        if (orig_high > high) {
    161            return;
    162        }
    163
    164        if (orig_high == high) {
    165            /* High 32 bits are equal.  Read low after high, otherwise we
    166             * can get a false positive (e.g. 0x1234,0x8000 changes to
    167             * 0x1235,0x0000 and we read it as 0x1235,0x8000). Pairs with
    168             * the write barrier in stat64_max_slow.
    169             */
    170            smp_rmb();
    171            orig_low = qatomic_read(&s->low);
    172            if (orig_low >= low) {
    173                return;
    174            }
    175
    176            /* See if we were lucky and a writer raced against us.  The
    177             * barrier is theoretically unnecessary, but if we remove it
    178             * we may miss being lucky.
    179             */
    180            smp_rmb();
    181            orig_high = qatomic_read(&s->high);
    182            if (orig_high > high) {
    183                return;
    184            }
    185        }
    186
    187        /* If the value changes in any way, we have to take the lock.  */
    188    } while (!stat64_max_slow(s, value));
    189}
    190
    191#endif
    192
    193#endif