cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

cpu.h (31172B)


      1/*
      2 * QEMU CPU model
      3 *
      4 * Copyright (c) 2012 SUSE LINUX Products GmbH
      5 *
      6 * This program is free software; you can redistribute it and/or
      7 * modify it under the terms of the GNU General Public License
      8 * as published by the Free Software Foundation; either version 2
      9 * of the License, or (at your option) any later version.
     10 *
     11 * This program is distributed in the hope that it will be useful,
     12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     14 * GNU General Public License for more details.
     15 *
     16 * You should have received a copy of the GNU General Public License
     17 * along with this program; if not, see
     18 * <http://www.gnu.org/licenses/gpl-2.0.html>
     19 */
     20#ifndef QEMU_CPU_H
     21#define QEMU_CPU_H
     22
     23#include "hw/qdev-core.h"
     24#include "disas/dis-asm.h"
     25#include "exec/hwaddr.h"
     26#include "exec/memattrs.h"
     27#include "qapi/qapi-types-run-state.h"
     28#include "qemu/bitmap.h"
     29#include "qemu/rcu_queue.h"
     30#include "qemu/queue.h"
     31#include "qemu/thread.h"
     32#include "qemu/plugin.h"
     33#include "qom/object.h"
     34
     35typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
     36                                     void *opaque);
     37
     38/**
     39 * vaddr:
     40 * Type wide enough to contain any #target_ulong virtual address.
     41 */
     42typedef uint64_t vaddr;
     43#define VADDR_PRId PRId64
     44#define VADDR_PRIu PRIu64
     45#define VADDR_PRIo PRIo64
     46#define VADDR_PRIx PRIx64
     47#define VADDR_PRIX PRIX64
     48#define VADDR_MAX UINT64_MAX
     49
     50/**
     51 * SECTION:cpu
     52 * @section_id: QEMU-cpu
     53 * @title: CPU Class
     54 * @short_description: Base class for all CPUs
     55 */
     56
     57#define TYPE_CPU "cpu"
     58
     59/* Since this macro is used a lot in hot code paths and in conjunction with
     60 * FooCPU *foo_env_get_cpu(), we deviate from usual QOM practice by using
     61 * an unchecked cast.
     62 */
     63#define CPU(obj) ((CPUState *)(obj))
     64
     65typedef struct CPUClass CPUClass;
     66DECLARE_CLASS_CHECKERS(CPUClass, CPU,
     67                       TYPE_CPU)
     68
     69typedef enum MMUAccessType {
     70    MMU_DATA_LOAD  = 0,
     71    MMU_DATA_STORE = 1,
     72    MMU_INST_FETCH = 2
     73} MMUAccessType;
     74
     75typedef struct CPUWatchpoint CPUWatchpoint;
     76
     77/* see tcg-cpu-ops.h */
     78struct TCGCPUOps;
     79
     80/* see accel-cpu.h */
     81struct AccelCPUClass;
     82
     83/* see sysemu-cpu-ops.h */
     84struct SysemuCPUOps;
     85
     86/**
     87 * CPUClass:
     88 * @class_by_name: Callback to map -cpu command line model name to an
     89 * instantiatable CPU type.
     90 * @parse_features: Callback to parse command line arguments.
     91 * @reset_dump_flags: #CPUDumpFlags to use for reset logging.
     92 * @has_work: Callback for checking if there is work to do.
     93 * @memory_rw_debug: Callback for GDB memory access.
     94 * @dump_state: Callback for dumping state.
     95 * @get_arch_id: Callback for getting architecture-dependent CPU ID.
     96 * @set_pc: Callback for setting the Program Counter register. This
     97 *       should have the semantics used by the target architecture when
     98 *       setting the PC from a source such as an ELF file entry point;
     99 *       for example on Arm it will also set the Thumb mode bit based
    100 *       on the least significant bit of the new PC value.
    101 *       If the target behaviour here is anything other than "set
    102 *       the PC register to the value passed in" then the target must
    103 *       also implement the synchronize_from_tb hook.
    104 * @gdb_read_register: Callback for letting GDB read a register.
    105 * @gdb_write_register: Callback for letting GDB write a register.
    106 * @gdb_adjust_breakpoint: Callback for adjusting the address of a
    107 *       breakpoint.  Used by AVR to handle a gdb mis-feature with
    108 *       its Harvard architecture split code and data.
    109 * @gdb_num_core_regs: Number of core registers accessible to GDB.
    110 * @gdb_core_xml_file: File name for core registers GDB XML description.
    111 * @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop
    112 *           before the insn which triggers a watchpoint rather than after it.
    113 * @gdb_arch_name: Optional callback that returns the architecture name known
    114 * to GDB. The caller must free the returned string with g_free.
    115 * @gdb_get_dynamic_xml: Callback to return dynamically generated XML for the
    116 *   gdb stub. Returns a pointer to the XML contents for the specified XML file
    117 *   or NULL if the CPU doesn't have a dynamically generated content for it.
    118 * @disas_set_info: Setup architecture specific components of disassembly info
    119 * @adjust_watchpoint_address: Perform a target-specific adjustment to an
    120 * address before attempting to match it against watchpoints.
    121 * @deprecation_note: If this CPUClass is deprecated, this field provides
    122 *                    related information.
    123 *
    124 * Represents a CPU family or model.
    125 */
    126struct CPUClass {
    127    /*< private >*/
    128    DeviceClass parent_class;
    129    /*< public >*/
    130
    131    ObjectClass *(*class_by_name)(const char *cpu_model);
    132    void (*parse_features)(const char *typename, char *str, Error **errp);
    133
    134    bool (*has_work)(CPUState *cpu);
    135    int (*memory_rw_debug)(CPUState *cpu, vaddr addr,
    136                           uint8_t *buf, int len, bool is_write);
    137    void (*dump_state)(CPUState *cpu, FILE *, int flags);
    138    int64_t (*get_arch_id)(CPUState *cpu);
    139    void (*set_pc)(CPUState *cpu, vaddr value);
    140    int (*gdb_read_register)(CPUState *cpu, GByteArray *buf, int reg);
    141    int (*gdb_write_register)(CPUState *cpu, uint8_t *buf, int reg);
    142    vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr);
    143
    144    const char *gdb_core_xml_file;
    145    gchar * (*gdb_arch_name)(CPUState *cpu);
    146    const char * (*gdb_get_dynamic_xml)(CPUState *cpu, const char *xmlname);
    147
    148    void (*disas_set_info)(CPUState *cpu, disassemble_info *info);
    149
    150    const char *deprecation_note;
    151    struct AccelCPUClass *accel_cpu;
    152
    153    /* when system emulation is not available, this pointer is NULL */
    154    const struct SysemuCPUOps *sysemu_ops;
    155
    156    /* when TCG is not available, this pointer is NULL */
    157    const struct TCGCPUOps *tcg_ops;
    158
    159    /*
    160     * if not NULL, this is called in order for the CPUClass to initialize
    161     * class data that depends on the accelerator, see accel/accel-common.c.
    162     */
    163    void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc);
    164
    165    /*
    166     * Keep non-pointer data at the end to minimize holes.
    167     */
    168    int reset_dump_flags;
    169    int gdb_num_core_regs;
    170    bool gdb_stop_before_watchpoint;
    171};
    172
    173/*
    174 * Low 16 bits: number of cycles left, used only in icount mode.
    175 * High 16 bits: Set to -1 to force TCG to stop executing linked TBs
    176 * for this CPU and return to its top level loop (even in non-icount mode).
    177 * This allows a single read-compare-cbranch-write sequence to test
    178 * for both decrementer underflow and exceptions.
    179 */
    180typedef union IcountDecr {
    181    uint32_t u32;
    182    struct {
    183#ifdef HOST_WORDS_BIGENDIAN
    184        uint16_t high;
    185        uint16_t low;
    186#else
    187        uint16_t low;
    188        uint16_t high;
    189#endif
    190    } u16;
    191} IcountDecr;
    192
    193typedef struct CPUBreakpoint {
    194    vaddr pc;
    195    int flags; /* BP_* */
    196    QTAILQ_ENTRY(CPUBreakpoint) entry;
    197} CPUBreakpoint;
    198
    199struct CPUWatchpoint {
    200    vaddr vaddr;
    201    vaddr len;
    202    vaddr hitaddr;
    203    MemTxAttrs hitattrs;
    204    int flags; /* BP_* */
    205    QTAILQ_ENTRY(CPUWatchpoint) entry;
    206};
    207
    208#ifdef CONFIG_PLUGIN
    209/*
    210 * For plugins we sometime need to save the resolved iotlb data before
    211 * the memory regions get moved around  by io_writex.
    212 */
    213typedef struct SavedIOTLB {
    214    hwaddr addr;
    215    MemoryRegionSection *section;
    216    hwaddr mr_offset;
    217} SavedIOTLB;
    218#endif
    219
    220struct KVMState;
    221struct kvm_run;
    222
    223struct hax_vcpu_state;
    224struct hvf_vcpu_state;
    225
    226#define TB_JMP_CACHE_BITS 12
    227#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
    228
    229/* work queue */
    230
    231/* The union type allows passing of 64 bit target pointers on 32 bit
    232 * hosts in a single parameter
    233 */
    234typedef union {
    235    int           host_int;
    236    unsigned long host_ulong;
    237    void         *host_ptr;
    238    vaddr         target_ptr;
    239} run_on_cpu_data;
    240
    241#define RUN_ON_CPU_HOST_PTR(p)    ((run_on_cpu_data){.host_ptr = (p)})
    242#define RUN_ON_CPU_HOST_INT(i)    ((run_on_cpu_data){.host_int = (i)})
    243#define RUN_ON_CPU_HOST_ULONG(ul) ((run_on_cpu_data){.host_ulong = (ul)})
    244#define RUN_ON_CPU_TARGET_PTR(v)  ((run_on_cpu_data){.target_ptr = (v)})
    245#define RUN_ON_CPU_NULL           RUN_ON_CPU_HOST_PTR(NULL)
    246
    247typedef void (*run_on_cpu_func)(CPUState *cpu, run_on_cpu_data data);
    248
    249struct qemu_work_item;
    250
    251#define CPU_UNSET_NUMA_NODE_ID -1
    252#define CPU_TRACE_DSTATE_MAX_EVENTS 32
    253
    254/**
    255 * CPUState:
    256 * @cpu_index: CPU index (informative).
    257 * @cluster_index: Identifies which cluster this CPU is in.
    258 *   For boards which don't define clusters or for "loose" CPUs not assigned
    259 *   to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will
    260 *   be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER
    261 *   QOM parent.
    262 * @tcg_cflags: Pre-computed cflags for this cpu.
    263 * @nr_cores: Number of cores within this CPU package.
    264 * @nr_threads: Number of threads within this CPU.
    265 * @running: #true if CPU is currently running (lockless).
    266 * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
    267 * valid under cpu_list_lock.
    268 * @created: Indicates whether the CPU thread has been successfully created.
    269 * @interrupt_request: Indicates a pending interrupt request.
    270 * @halted: Nonzero if the CPU is in suspended state.
    271 * @stop: Indicates a pending stop request.
    272 * @stopped: Indicates the CPU has been artificially stopped.
    273 * @unplug: Indicates a pending CPU unplug request.
    274 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
    275 * @singlestep_enabled: Flags for single-stepping.
    276 * @icount_extra: Instructions until next timer event.
    277 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution
    278 * requires that IO only be performed on the last instruction of a TB
    279 * so that interrupts take effect immediately.
    280 * @cpu_ases: Pointer to array of CPUAddressSpaces (which define the
    281 *            AddressSpaces this CPU has)
    282 * @num_ases: number of CPUAddressSpaces in @cpu_ases
    283 * @as: Pointer to the first AddressSpace, for the convenience of targets which
    284 *      only have a single AddressSpace
    285 * @env_ptr: Pointer to subclass-specific CPUArchState field.
    286 * @icount_decr_ptr: Pointer to IcountDecr field within subclass.
    287 * @gdb_regs: Additional GDB registers.
    288 * @gdb_num_regs: Number of total registers accessible to GDB.
    289 * @gdb_num_g_regs: Number of registers in GDB 'g' packets.
    290 * @next_cpu: Next CPU sharing TB cache.
    291 * @opaque: User data.
    292 * @mem_io_pc: Host Program Counter at which the memory was accessed.
    293 * @kvm_fd: vCPU file descriptor for KVM.
    294 * @work_mutex: Lock to prevent multiple access to @work_list.
    295 * @work_list: List of pending asynchronous work.
    296 * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
    297 *                        to @trace_dstate).
    298 * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask).
    299 * @plugin_mask: Plugin event bitmap. Modified only via async work.
    300 * @ignore_memory_transaction_failures: Cached copy of the MachineState
    301 *    flag of the same name: allows the board to suppress calling of the
    302 *    CPU do_transaction_failed hook function.
    303 * @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty
    304 *    ring is enabled.
    305 * @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU
    306 *    dirty ring structure.
    307 *
    308 * State of one CPU core or thread.
    309 */
    310struct CPUState {
    311    /*< private >*/
    312    DeviceState parent_obj;
    313    /*< public >*/
    314
    315    int nr_cores;
    316    int nr_threads;
    317
    318    struct QemuThread *thread;
    319#ifdef _WIN32
    320    HANDLE hThread;
    321#endif
    322    int thread_id;
    323    bool running, has_waiter;
    324    struct QemuCond *halt_cond;
    325    bool thread_kicked;
    326    bool created;
    327    bool stop;
    328    bool stopped;
    329
    330    /* Should CPU start in powered-off state? */
    331    bool start_powered_off;
    332
    333    bool unplug;
    334    bool crash_occurred;
    335    bool exit_request;
    336    bool in_exclusive_context;
    337    uint32_t cflags_next_tb;
    338    /* updates protected by BQL */
    339    uint32_t interrupt_request;
    340    int singlestep_enabled;
    341    int64_t icount_budget;
    342    int64_t icount_extra;
    343    uint64_t random_seed;
    344    sigjmp_buf jmp_env;
    345
    346    QemuMutex work_mutex;
    347    QSIMPLEQ_HEAD(, qemu_work_item) work_list;
    348
    349    CPUAddressSpace *cpu_ases;
    350    int num_ases;
    351    AddressSpace *as;
    352    MemoryRegion *memory;
    353
    354    void *env_ptr; /* CPUArchState */
    355    IcountDecr *icount_decr_ptr;
    356
    357    /* Accessed in parallel; all accesses must be atomic */
    358    TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
    359
    360    struct GDBRegisterState *gdb_regs;
    361    int gdb_num_regs;
    362    int gdb_num_g_regs;
    363    QTAILQ_ENTRY(CPUState) node;
    364
    365    /* ice debug support */
    366    QTAILQ_HEAD(, CPUBreakpoint) breakpoints;
    367
    368    QTAILQ_HEAD(, CPUWatchpoint) watchpoints;
    369    CPUWatchpoint *watchpoint_hit;
    370
    371    void *opaque;
    372
    373    /* In order to avoid passing too many arguments to the MMIO helpers,
    374     * we store some rarely used information in the CPU context.
    375     */
    376    uintptr_t mem_io_pc;
    377
    378    /* Only used in KVM */
    379    int kvm_fd;
    380    struct KVMState *kvm_state;
    381    struct kvm_run *kvm_run;
    382    struct kvm_dirty_gfn *kvm_dirty_gfns;
    383    uint32_t kvm_fetch_index;
    384
    385    /* Used for events with 'vcpu' and *without* the 'disabled' properties */
    386    DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS);
    387    DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS);
    388
    389    DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX);
    390
    391#ifdef CONFIG_PLUGIN
    392    GArray *plugin_mem_cbs;
    393    /* saved iotlb data from io_writex */
    394    SavedIOTLB saved_iotlb;
    395#endif
    396
    397    /* TODO Move common fields from CPUArchState here. */
    398    int cpu_index;
    399    int cluster_index;
    400    uint32_t tcg_cflags;
    401    uint32_t halted;
    402    uint32_t can_do_io;
    403    int32_t exception_index;
    404
    405    /* shared by kvm, hax and hvf */
    406    bool vcpu_dirty;
    407
    408    /* Used to keep track of an outstanding cpu throttle thread for migration
    409     * autoconverge
    410     */
    411    bool throttle_thread_scheduled;
    412
    413    bool ignore_memory_transaction_failures;
    414
    415    struct hax_vcpu_state *hax_vcpu;
    416
    417    struct hvf_vcpu_state *hvf;
    418
    419    /* track IOMMUs whose translations we've cached in the TCG TLB */
    420    GArray *iommu_notifiers;
    421};
    422
    423typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ;
    424extern CPUTailQ cpus;
    425
    426#define first_cpu        QTAILQ_FIRST_RCU(&cpus)
    427#define CPU_NEXT(cpu)    QTAILQ_NEXT_RCU(cpu, node)
    428#define CPU_FOREACH(cpu) QTAILQ_FOREACH_RCU(cpu, &cpus, node)
    429#define CPU_FOREACH_SAFE(cpu, next_cpu) \
    430    QTAILQ_FOREACH_SAFE_RCU(cpu, &cpus, node, next_cpu)
    431
    432extern __thread CPUState *current_cpu;
    433
    434static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
    435{
    436    unsigned int i;
    437
    438    for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
    439        qatomic_set(&cpu->tb_jmp_cache[i], NULL);
    440    }
    441}
    442
    443/**
    444 * qemu_tcg_mttcg_enabled:
    445 * Check whether we are running MultiThread TCG or not.
    446 *
    447 * Returns: %true if we are in MTTCG mode %false otherwise.
    448 */
    449extern bool mttcg_enabled;
    450#define qemu_tcg_mttcg_enabled() (mttcg_enabled)
    451
    452/**
    453 * cpu_paging_enabled:
    454 * @cpu: The CPU whose state is to be inspected.
    455 *
    456 * Returns: %true if paging is enabled, %false otherwise.
    457 */
    458bool cpu_paging_enabled(const CPUState *cpu);
    459
    460/**
    461 * cpu_get_memory_mapping:
    462 * @cpu: The CPU whose memory mappings are to be obtained.
    463 * @list: Where to write the memory mappings to.
    464 * @errp: Pointer for reporting an #Error.
    465 */
    466void cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
    467                            Error **errp);
    468
    469#if !defined(CONFIG_USER_ONLY)
    470
    471/**
    472 * cpu_write_elf64_note:
    473 * @f: pointer to a function that writes memory to a file
    474 * @cpu: The CPU whose memory is to be dumped
    475 * @cpuid: ID number of the CPU
    476 * @opaque: pointer to the CPUState struct
    477 */
    478int cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
    479                         int cpuid, void *opaque);
    480
    481/**
    482 * cpu_write_elf64_qemunote:
    483 * @f: pointer to a function that writes memory to a file
    484 * @cpu: The CPU whose memory is to be dumped
    485 * @cpuid: ID number of the CPU
    486 * @opaque: pointer to the CPUState struct
    487 */
    488int cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
    489                             void *opaque);
    490
    491/**
    492 * cpu_write_elf32_note:
    493 * @f: pointer to a function that writes memory to a file
    494 * @cpu: The CPU whose memory is to be dumped
    495 * @cpuid: ID number of the CPU
    496 * @opaque: pointer to the CPUState struct
    497 */
    498int cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
    499                         int cpuid, void *opaque);
    500
    501/**
    502 * cpu_write_elf32_qemunote:
    503 * @f: pointer to a function that writes memory to a file
    504 * @cpu: The CPU whose memory is to be dumped
    505 * @cpuid: ID number of the CPU
    506 * @opaque: pointer to the CPUState struct
    507 */
    508int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
    509                             void *opaque);
    510
    511/**
    512 * cpu_get_crash_info:
    513 * @cpu: The CPU to get crash information for
    514 *
    515 * Gets the previously saved crash information.
    516 * Caller is responsible for freeing the data.
    517 */
    518GuestPanicInformation *cpu_get_crash_info(CPUState *cpu);
    519
    520#endif /* !CONFIG_USER_ONLY */
    521
    522/**
    523 * CPUDumpFlags:
    524 * @CPU_DUMP_CODE:
    525 * @CPU_DUMP_FPU: dump FPU register state, not just integer
    526 * @CPU_DUMP_CCOP: dump info about TCG QEMU's condition code optimization state
    527 */
    528enum CPUDumpFlags {
    529    CPU_DUMP_CODE = 0x00010000,
    530    CPU_DUMP_FPU  = 0x00020000,
    531    CPU_DUMP_CCOP = 0x00040000,
    532};
    533
    534/**
    535 * cpu_dump_state:
    536 * @cpu: The CPU whose state is to be dumped.
    537 * @f: If non-null, dump to this stream, else to current print sink.
    538 *
    539 * Dumps CPU state.
    540 */
    541void cpu_dump_state(CPUState *cpu, FILE *f, int flags);
    542
    543#ifndef CONFIG_USER_ONLY
    544/**
    545 * cpu_get_phys_page_attrs_debug:
    546 * @cpu: The CPU to obtain the physical page address for.
    547 * @addr: The virtual address.
    548 * @attrs: Updated on return with the memory transaction attributes to use
    549 *         for this access.
    550 *
    551 * Obtains the physical page corresponding to a virtual one, together
    552 * with the corresponding memory transaction attributes to use for the access.
    553 * Use it only for debugging because no protection checks are done.
    554 *
    555 * Returns: Corresponding physical page address or -1 if no page found.
    556 */
    557hwaddr cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
    558                                     MemTxAttrs *attrs);
    559
    560/**
    561 * cpu_get_phys_page_debug:
    562 * @cpu: The CPU to obtain the physical page address for.
    563 * @addr: The virtual address.
    564 *
    565 * Obtains the physical page corresponding to a virtual one.
    566 * Use it only for debugging because no protection checks are done.
    567 *
    568 * Returns: Corresponding physical page address or -1 if no page found.
    569 */
    570hwaddr cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
    571
    572/** cpu_asidx_from_attrs:
    573 * @cpu: CPU
    574 * @attrs: memory transaction attributes
    575 *
    576 * Returns the address space index specifying the CPU AddressSpace
    577 * to use for a memory access with the given transaction attributes.
    578 */
    579int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs);
    580
    581/**
    582 * cpu_virtio_is_big_endian:
    583 * @cpu: CPU
    584
    585 * Returns %true if a CPU which supports runtime configurable endianness
    586 * is currently big-endian.
    587 */
    588bool cpu_virtio_is_big_endian(CPUState *cpu);
    589
    590#endif /* CONFIG_USER_ONLY */
    591
    592/**
    593 * cpu_list_add:
    594 * @cpu: The CPU to be added to the list of CPUs.
    595 */
    596void cpu_list_add(CPUState *cpu);
    597
    598/**
    599 * cpu_list_remove:
    600 * @cpu: The CPU to be removed from the list of CPUs.
    601 */
    602void cpu_list_remove(CPUState *cpu);
    603
    604/**
    605 * cpu_reset:
    606 * @cpu: The CPU whose state is to be reset.
    607 */
    608void cpu_reset(CPUState *cpu);
    609
    610/**
    611 * cpu_class_by_name:
    612 * @typename: The CPU base type.
    613 * @cpu_model: The model string without any parameters.
    614 *
    615 * Looks up a CPU #ObjectClass matching name @cpu_model.
    616 *
    617 * Returns: A #CPUClass or %NULL if not matching class is found.
    618 */
    619ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model);
    620
    621/**
    622 * cpu_create:
    623 * @typename: The CPU type.
    624 *
    625 * Instantiates a CPU and realizes the CPU.
    626 *
    627 * Returns: A #CPUState or %NULL if an error occurred.
    628 */
    629CPUState *cpu_create(const char *typename);
    630
    631/**
    632 * parse_cpu_option:
    633 * @cpu_option: The -cpu option including optional parameters.
    634 *
    635 * processes optional parameters and registers them as global properties
    636 *
    637 * Returns: type of CPU to create or prints error and terminates process
    638 *          if an error occurred.
    639 */
    640const char *parse_cpu_option(const char *cpu_option);
    641
    642/**
    643 * cpu_has_work:
    644 * @cpu: The vCPU to check.
    645 *
    646 * Checks whether the CPU has work to do.
    647 *
    648 * Returns: %true if the CPU has work, %false otherwise.
    649 */
    650static inline bool cpu_has_work(CPUState *cpu)
    651{
    652    CPUClass *cc = CPU_GET_CLASS(cpu);
    653
    654    g_assert(cc->has_work);
    655    return cc->has_work(cpu);
    656}
    657
    658/**
    659 * qemu_cpu_is_self:
    660 * @cpu: The vCPU to check against.
    661 *
    662 * Checks whether the caller is executing on the vCPU thread.
    663 *
    664 * Returns: %true if called from @cpu's thread, %false otherwise.
    665 */
    666bool qemu_cpu_is_self(CPUState *cpu);
    667
    668/**
    669 * qemu_cpu_kick:
    670 * @cpu: The vCPU to kick.
    671 *
    672 * Kicks @cpu's thread.
    673 */
    674void qemu_cpu_kick(CPUState *cpu);
    675
    676/**
    677 * cpu_is_stopped:
    678 * @cpu: The CPU to check.
    679 *
    680 * Checks whether the CPU is stopped.
    681 *
    682 * Returns: %true if run state is not running or if artificially stopped;
    683 * %false otherwise.
    684 */
    685bool cpu_is_stopped(CPUState *cpu);
    686
    687/**
    688 * do_run_on_cpu:
    689 * @cpu: The vCPU to run on.
    690 * @func: The function to be executed.
    691 * @data: Data to pass to the function.
    692 * @mutex: Mutex to release while waiting for @func to run.
    693 *
    694 * Used internally in the implementation of run_on_cpu.
    695 */
    696void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
    697                   QemuMutex *mutex);
    698
    699/**
    700 * run_on_cpu:
    701 * @cpu: The vCPU to run on.
    702 * @func: The function to be executed.
    703 * @data: Data to pass to the function.
    704 *
    705 * Schedules the function @func for execution on the vCPU @cpu.
    706 */
    707void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
    708
    709/**
    710 * async_run_on_cpu:
    711 * @cpu: The vCPU to run on.
    712 * @func: The function to be executed.
    713 * @data: Data to pass to the function.
    714 *
    715 * Schedules the function @func for execution on the vCPU @cpu asynchronously.
    716 */
    717void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
    718
    719/**
    720 * async_safe_run_on_cpu:
    721 * @cpu: The vCPU to run on.
    722 * @func: The function to be executed.
    723 * @data: Data to pass to the function.
    724 *
    725 * Schedules the function @func for execution on the vCPU @cpu asynchronously,
    726 * while all other vCPUs are sleeping.
    727 *
    728 * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the
    729 * BQL.
    730 */
    731void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
    732
    733/**
    734 * cpu_in_exclusive_context()
    735 * @cpu: The vCPU to check
    736 *
    737 * Returns true if @cpu is an exclusive context, for example running
    738 * something which has previously been queued via async_safe_run_on_cpu().
    739 */
    740static inline bool cpu_in_exclusive_context(const CPUState *cpu)
    741{
    742    return cpu->in_exclusive_context;
    743}
    744
    745/**
    746 * qemu_get_cpu:
    747 * @index: The CPUState@cpu_index value of the CPU to obtain.
    748 *
    749 * Gets a CPU matching @index.
    750 *
    751 * Returns: The CPU or %NULL if there is no matching CPU.
    752 */
    753CPUState *qemu_get_cpu(int index);
    754
    755/**
    756 * cpu_exists:
    757 * @id: Guest-exposed CPU ID to lookup.
    758 *
    759 * Search for CPU with specified ID.
    760 *
    761 * Returns: %true - CPU is found, %false - CPU isn't found.
    762 */
    763bool cpu_exists(int64_t id);
    764
    765/**
    766 * cpu_by_arch_id:
    767 * @id: Guest-exposed CPU ID of the CPU to obtain.
    768 *
    769 * Get a CPU with matching @id.
    770 *
    771 * Returns: The CPU or %NULL if there is no matching CPU.
    772 */
    773CPUState *cpu_by_arch_id(int64_t id);
    774
    775/**
    776 * cpu_interrupt:
    777 * @cpu: The CPU to set an interrupt on.
    778 * @mask: The interrupts to set.
    779 *
    780 * Invokes the interrupt handler.
    781 */
    782
    783void cpu_interrupt(CPUState *cpu, int mask);
    784
    785/**
    786 * cpu_set_pc:
    787 * @cpu: The CPU to set the program counter for.
    788 * @addr: Program counter value.
    789 *
    790 * Sets the program counter for a CPU.
    791 */
    792static inline void cpu_set_pc(CPUState *cpu, vaddr addr)
    793{
    794    CPUClass *cc = CPU_GET_CLASS(cpu);
    795
    796    cc->set_pc(cpu, addr);
    797}
    798
    799/**
    800 * cpu_reset_interrupt:
    801 * @cpu: The CPU to clear the interrupt on.
    802 * @mask: The interrupt mask to clear.
    803 *
    804 * Resets interrupts on the vCPU @cpu.
    805 */
    806void cpu_reset_interrupt(CPUState *cpu, int mask);
    807
    808/**
    809 * cpu_exit:
    810 * @cpu: The CPU to exit.
    811 *
    812 * Requests the CPU @cpu to exit execution.
    813 */
    814void cpu_exit(CPUState *cpu);
    815
    816/**
    817 * cpu_resume:
    818 * @cpu: The CPU to resume.
    819 *
    820 * Resumes CPU, i.e. puts CPU into runnable state.
    821 */
    822void cpu_resume(CPUState *cpu);
    823
    824/**
    825 * cpu_remove_sync:
    826 * @cpu: The CPU to remove.
    827 *
    828 * Requests the CPU to be removed and waits till it is removed.
    829 */
    830void cpu_remove_sync(CPUState *cpu);
    831
    832/**
    833 * process_queued_cpu_work() - process all items on CPU work queue
    834 * @cpu: The CPU which work queue to process.
    835 */
    836void process_queued_cpu_work(CPUState *cpu);
    837
    838/**
    839 * cpu_exec_start:
    840 * @cpu: The CPU for the current thread.
    841 *
    842 * Record that a CPU has started execution and can be interrupted with
    843 * cpu_exit.
    844 */
    845void cpu_exec_start(CPUState *cpu);
    846
    847/**
    848 * cpu_exec_end:
    849 * @cpu: The CPU for the current thread.
    850 *
    851 * Record that a CPU has stopped execution and exclusive sections
    852 * can be executed without interrupting it.
    853 */
    854void cpu_exec_end(CPUState *cpu);
    855
    856/**
    857 * start_exclusive:
    858 *
    859 * Wait for a concurrent exclusive section to end, and then start
    860 * a section of work that is run while other CPUs are not running
    861 * between cpu_exec_start and cpu_exec_end.  CPUs that are running
    862 * cpu_exec are exited immediately.  CPUs that call cpu_exec_start
    863 * during the exclusive section go to sleep until this CPU calls
    864 * end_exclusive.
    865 */
    866void start_exclusive(void);
    867
    868/**
    869 * end_exclusive:
    870 *
    871 * Concludes an exclusive execution section started by start_exclusive.
    872 */
    873void end_exclusive(void);
    874
    875/**
    876 * qemu_init_vcpu:
    877 * @cpu: The vCPU to initialize.
    878 *
    879 * Initializes a vCPU.
    880 */
    881void qemu_init_vcpu(CPUState *cpu);
    882
    883#define SSTEP_ENABLE  0x1  /* Enable simulated HW single stepping */
    884#define SSTEP_NOIRQ   0x2  /* Do not use IRQ while single stepping */
    885#define SSTEP_NOTIMER 0x4  /* Do not Timers while single stepping */
    886
    887/**
    888 * cpu_single_step:
    889 * @cpu: CPU to the flags for.
    890 * @enabled: Flags to enable.
    891 *
    892 * Enables or disables single-stepping for @cpu.
    893 */
    894void cpu_single_step(CPUState *cpu, int enabled);
    895
    896/* Breakpoint/watchpoint flags */
    897#define BP_MEM_READ           0x01
    898#define BP_MEM_WRITE          0x02
    899#define BP_MEM_ACCESS         (BP_MEM_READ | BP_MEM_WRITE)
    900#define BP_STOP_BEFORE_ACCESS 0x04
    901/* 0x08 currently unused */
    902#define BP_GDB                0x10
    903#define BP_CPU                0x20
    904#define BP_ANY                (BP_GDB | BP_CPU)
    905#define BP_WATCHPOINT_HIT_READ 0x40
    906#define BP_WATCHPOINT_HIT_WRITE 0x80
    907#define BP_WATCHPOINT_HIT (BP_WATCHPOINT_HIT_READ | BP_WATCHPOINT_HIT_WRITE)
    908
    909int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
    910                          CPUBreakpoint **breakpoint);
    911int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags);
    912void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint);
    913void cpu_breakpoint_remove_all(CPUState *cpu, int mask);
    914
    915/* Return true if PC matches an installed breakpoint.  */
    916static inline bool cpu_breakpoint_test(CPUState *cpu, vaddr pc, int mask)
    917{
    918    CPUBreakpoint *bp;
    919
    920    if (unlikely(!QTAILQ_EMPTY(&cpu->breakpoints))) {
    921        QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
    922            if (bp->pc == pc && (bp->flags & mask)) {
    923                return true;
    924            }
    925        }
    926    }
    927    return false;
    928}
    929
    930#ifdef CONFIG_USER_ONLY
    931static inline int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
    932                                        int flags, CPUWatchpoint **watchpoint)
    933{
    934    return -ENOSYS;
    935}
    936
    937static inline int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
    938                                        vaddr len, int flags)
    939{
    940    return -ENOSYS;
    941}
    942
    943static inline void cpu_watchpoint_remove_by_ref(CPUState *cpu,
    944                                                CPUWatchpoint *wp)
    945{
    946}
    947
    948static inline void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
    949{
    950}
    951
    952static inline void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
    953                                        MemTxAttrs atr, int fl, uintptr_t ra)
    954{
    955}
    956
    957static inline int cpu_watchpoint_address_matches(CPUState *cpu,
    958                                                 vaddr addr, vaddr len)
    959{
    960    return 0;
    961}
    962#else
    963int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
    964                          int flags, CPUWatchpoint **watchpoint);
    965int cpu_watchpoint_remove(CPUState *cpu, vaddr addr,
    966                          vaddr len, int flags);
    967void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint);
    968void cpu_watchpoint_remove_all(CPUState *cpu, int mask);
    969
    970/**
    971 * cpu_check_watchpoint:
    972 * @cpu: cpu context
    973 * @addr: guest virtual address
    974 * @len: access length
    975 * @attrs: memory access attributes
    976 * @flags: watchpoint access type
    977 * @ra: unwind return address
    978 *
    979 * Check for a watchpoint hit in [addr, addr+len) of the type
    980 * specified by @flags.  Exit via exception with a hit.
    981 */
    982void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
    983                          MemTxAttrs attrs, int flags, uintptr_t ra);
    984
    985/**
    986 * cpu_watchpoint_address_matches:
    987 * @cpu: cpu context
    988 * @addr: guest virtual address
    989 * @len: access length
    990 *
    991 * Return the watchpoint flags that apply to [addr, addr+len).
    992 * If no watchpoint is registered for the range, the result is 0.
    993 */
    994int cpu_watchpoint_address_matches(CPUState *cpu, vaddr addr, vaddr len);
    995#endif
    996
    997/**
    998 * cpu_get_address_space:
    999 * @cpu: CPU to get address space from
   1000 * @asidx: index identifying which address space to get
   1001 *
   1002 * Return the requested address space of this CPU. @asidx
   1003 * specifies which address space to read.
   1004 */
   1005AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx);
   1006
   1007void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...)
   1008    GCC_FMT_ATTR(2, 3);
   1009
   1010/* $(top_srcdir)/cpu.c */
   1011void cpu_exec_initfn(CPUState *cpu);
   1012void cpu_exec_realizefn(CPUState *cpu, Error **errp);
   1013void cpu_exec_unrealizefn(CPUState *cpu);
   1014
   1015/**
   1016 * target_words_bigendian:
   1017 * Returns true if the (default) endianness of the target is big endian,
   1018 * false otherwise. Note that in target-specific code, you can use
   1019 * TARGET_WORDS_BIGENDIAN directly instead. On the other hand, common
   1020 * code should normally never need to know about the endianness of the
   1021 * target, so please do *not* use this function unless you know very well
   1022 * what you are doing!
   1023 */
   1024bool target_words_bigendian(void);
   1025
   1026#ifdef NEED_CPU_H
   1027
   1028#ifdef CONFIG_SOFTMMU
   1029
   1030extern const VMStateDescription vmstate_cpu_common;
   1031
   1032#define VMSTATE_CPU() {                                                     \
   1033    .name = "parent_obj",                                                   \
   1034    .size = sizeof(CPUState),                                               \
   1035    .vmsd = &vmstate_cpu_common,                                            \
   1036    .flags = VMS_STRUCT,                                                    \
   1037    .offset = 0,                                                            \
   1038}
   1039#endif /* CONFIG_SOFTMMU */
   1040
   1041#endif /* NEED_CPU_H */
   1042
   1043#define UNASSIGNED_CPU_INDEX -1
   1044#define UNASSIGNED_CLUSTER_INDEX -1
   1045
   1046#endif