cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

tcg-cpu-ops.h (4493B)


      1/*
      2 * TCG CPU-specific operations
      3 *
      4 * Copyright 2021 SUSE LLC
      5 *
      6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
      7 * See the COPYING file in the top-level directory.
      8 */
      9
     10#ifndef TCG_CPU_OPS_H
     11#define TCG_CPU_OPS_H
     12
     13#include "hw/core/cpu.h"
     14
     15struct TCGCPUOps {
     16    /**
     17     * @initialize: Initalize TCG state
     18     *
     19     * Called when the first CPU is realized.
     20     */
     21    void (*initialize)(void);
     22    /**
     23     * @synchronize_from_tb: Synchronize state from a TCG #TranslationBlock
     24     *
     25     * This is called when we abandon execution of a TB before starting it,
     26     * and must set all parts of the CPU state which the previous TB in the
     27     * chain may not have updated.
     28     * By default, when this is NULL, a call is made to @set_pc(tb->pc).
     29     *
     30     * If more state needs to be restored, the target must implement a
     31     * function to restore all the state, and register it here.
     32     */
     33    void (*synchronize_from_tb)(CPUState *cpu, const TranslationBlock *tb);
     34    /** @cpu_exec_enter: Callback for cpu_exec preparation */
     35    void (*cpu_exec_enter)(CPUState *cpu);
     36    /** @cpu_exec_exit: Callback for cpu_exec cleanup */
     37    void (*cpu_exec_exit)(CPUState *cpu);
     38    /**
     39     * @tlb_fill: Handle a softmmu tlb miss or user-only address fault
     40     *
     41     * For system mode, if the access is valid, call tlb_set_page
     42     * and return true; if the access is invalid, and probe is
     43     * true, return false; otherwise raise an exception and do
     44     * not return.  For user-only mode, always raise an exception
     45     * and do not return.
     46     */
     47    bool (*tlb_fill)(CPUState *cpu, vaddr address, int size,
     48                     MMUAccessType access_type, int mmu_idx,
     49                     bool probe, uintptr_t retaddr);
     50    /** @debug_excp_handler: Callback for handling debug exceptions */
     51    void (*debug_excp_handler)(CPUState *cpu);
     52
     53#ifdef NEED_CPU_H
     54#if defined(CONFIG_USER_ONLY) && defined(TARGET_I386)
     55    /**
     56     * @fake_user_interrupt: Callback for 'fake exception' handling.
     57     *
     58     * Simulate 'fake exception' which will be handled outside the
     59     * cpu execution loop (hack for x86 user mode).
     60     */
     61    void (*fake_user_interrupt)(CPUState *cpu);
     62#else
     63    /**
     64     * @do_interrupt: Callback for interrupt handling.
     65     */
     66    void (*do_interrupt)(CPUState *cpu);
     67#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
     68#ifdef CONFIG_SOFTMMU
     69    /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
     70    bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
     71    /**
     72     * @do_transaction_failed: Callback for handling failed memory transactions
     73     * (ie bus faults or external aborts; not MMU faults)
     74     */
     75    void (*do_transaction_failed)(CPUState *cpu, hwaddr physaddr, vaddr addr,
     76                                  unsigned size, MMUAccessType access_type,
     77                                  int mmu_idx, MemTxAttrs attrs,
     78                                  MemTxResult response, uintptr_t retaddr);
     79    /**
     80     * @do_unaligned_access: Callback for unaligned access handling
     81     * The callback must exit via raising an exception.
     82     */
     83    void (*do_unaligned_access)(CPUState *cpu, vaddr addr,
     84                                MMUAccessType access_type,
     85                                int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
     86
     87    /**
     88     * @adjust_watchpoint_address: hack for cpu_check_watchpoint used by ARM
     89     */
     90    vaddr (*adjust_watchpoint_address)(CPUState *cpu, vaddr addr, int len);
     91
     92    /**
     93     * @debug_check_watchpoint: return true if the architectural
     94     * watchpoint whose address has matched should really fire, used by ARM
     95     */
     96    bool (*debug_check_watchpoint)(CPUState *cpu, CPUWatchpoint *wp);
     97
     98    /**
     99     * @debug_check_breakpoint: return true if the architectural
    100     * breakpoint whose PC has matched should really fire.
    101     */
    102    bool (*debug_check_breakpoint)(CPUState *cpu);
    103
    104    /**
    105     * @io_recompile_replay_branch: Callback for cpu_io_recompile.
    106     *
    107     * The cpu has been stopped, and cpu_restore_state_from_tb has been
    108     * called.  If the faulting instruction is in a delay slot, and the
    109     * target architecture requires re-execution of the branch, then
    110     * adjust the cpu state as required and return true.
    111     */
    112    bool (*io_recompile_replay_branch)(CPUState *cpu,
    113                                       const TranslationBlock *tb);
    114#endif /* CONFIG_SOFTMMU */
    115#endif /* NEED_CPU_H */
    116
    117};
    118
    119#endif /* TCG_CPU_OPS_H */