cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

ppc.c (42377B)


      1/*
      2 * QEMU generic PowerPC hardware System Emulator
      3 *
      4 * Copyright (c) 2003-2007 Jocelyn Mayer
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#include "qemu/osdep.h"
     26#include "hw/irq.h"
     27#include "hw/ppc/ppc.h"
     28#include "hw/ppc/ppc_e500.h"
     29#include "qemu/timer.h"
     30#include "sysemu/cpus.h"
     31#include "qemu/log.h"
     32#include "qemu/main-loop.h"
     33#include "qemu/error-report.h"
     34#include "sysemu/kvm.h"
     35#include "sysemu/runstate.h"
     36#include "kvm_ppc.h"
     37#include "migration/vmstate.h"
     38#include "trace.h"
     39
     40static void cpu_ppc_tb_stop (CPUPPCState *env);
     41static void cpu_ppc_tb_start (CPUPPCState *env);
     42
     43void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
     44{
     45    CPUState *cs = CPU(cpu);
     46    CPUPPCState *env = &cpu->env;
     47    unsigned int old_pending;
     48    bool locked = false;
     49
     50    /* We may already have the BQL if coming from the reset path */
     51    if (!qemu_mutex_iothread_locked()) {
     52        locked = true;
     53        qemu_mutex_lock_iothread();
     54    }
     55
     56    old_pending = env->pending_interrupts;
     57
     58    if (level) {
     59        env->pending_interrupts |= 1 << n_IRQ;
     60        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
     61    } else {
     62        env->pending_interrupts &= ~(1 << n_IRQ);
     63        if (env->pending_interrupts == 0) {
     64            cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
     65        }
     66    }
     67
     68    if (old_pending != env->pending_interrupts) {
     69        kvmppc_set_interrupt(cpu, n_IRQ, level);
     70    }
     71
     72
     73    trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts,
     74                           CPU(cpu)->interrupt_request);
     75
     76    if (locked) {
     77        qemu_mutex_unlock_iothread();
     78    }
     79}
     80
     81/* PowerPC 6xx / 7xx internal IRQ controller */
     82static void ppc6xx_set_irq(void *opaque, int pin, int level)
     83{
     84    PowerPCCPU *cpu = opaque;
     85    CPUPPCState *env = &cpu->env;
     86    int cur_level;
     87
     88    trace_ppc_irq_set(env, pin, level);
     89
     90    cur_level = (env->irq_input_state >> pin) & 1;
     91    /* Don't generate spurious events */
     92    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
     93        CPUState *cs = CPU(cpu);
     94
     95        switch (pin) {
     96        case PPC6xx_INPUT_TBEN:
     97            /* Level sensitive - active high */
     98            trace_ppc_irq_set_state("time base", level);
     99            if (level) {
    100                cpu_ppc_tb_start(env);
    101            } else {
    102                cpu_ppc_tb_stop(env);
    103            }
    104            break;
    105        case PPC6xx_INPUT_INT:
    106            /* Level sensitive - active high */
    107            trace_ppc_irq_set_state("external IRQ", level);
    108            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    109            break;
    110        case PPC6xx_INPUT_SMI:
    111            /* Level sensitive - active high */
    112            trace_ppc_irq_set_state("SMI IRQ", level);
    113            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
    114            break;
    115        case PPC6xx_INPUT_MCP:
    116            /* Negative edge sensitive */
    117            /* XXX: TODO: actual reaction may depends on HID0 status
    118             *            603/604/740/750: check HID0[EMCP]
    119             */
    120            if (cur_level == 1 && level == 0) {
    121                trace_ppc_irq_set_state("machine check", 1);
    122                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
    123            }
    124            break;
    125        case PPC6xx_INPUT_CKSTP_IN:
    126            /* Level sensitive - active low */
    127            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
    128            /* XXX: Note that the only way to restart the CPU is to reset it */
    129            if (level) {
    130                trace_ppc_irq_cpu("stop");
    131                cs->halted = 1;
    132            }
    133            break;
    134        case PPC6xx_INPUT_HRESET:
    135            /* Level sensitive - active low */
    136            if (level) {
    137                trace_ppc_irq_reset("CPU");
    138                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
    139            }
    140            break;
    141        case PPC6xx_INPUT_SRESET:
    142            trace_ppc_irq_set_state("RESET IRQ", level);
    143            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
    144            break;
    145        default:
    146            g_assert_not_reached();
    147        }
    148        if (level)
    149            env->irq_input_state |= 1 << pin;
    150        else
    151            env->irq_input_state &= ~(1 << pin);
    152    }
    153}
    154
    155void ppc6xx_irq_init(PowerPCCPU *cpu)
    156{
    157    CPUPPCState *env = &cpu->env;
    158
    159    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
    160                                                  PPC6xx_INPUT_NB);
    161}
    162
    163#if defined(TARGET_PPC64)
    164/* PowerPC 970 internal IRQ controller */
    165static void ppc970_set_irq(void *opaque, int pin, int level)
    166{
    167    PowerPCCPU *cpu = opaque;
    168    CPUPPCState *env = &cpu->env;
    169    int cur_level;
    170
    171    trace_ppc_irq_set(env, pin, level);
    172
    173    cur_level = (env->irq_input_state >> pin) & 1;
    174    /* Don't generate spurious events */
    175    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
    176        CPUState *cs = CPU(cpu);
    177
    178        switch (pin) {
    179        case PPC970_INPUT_INT:
    180            /* Level sensitive - active high */
    181            trace_ppc_irq_set_state("external IRQ", level);
    182            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    183            break;
    184        case PPC970_INPUT_THINT:
    185            /* Level sensitive - active high */
    186            trace_ppc_irq_set_state("SMI IRQ", level);
    187            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
    188            break;
    189        case PPC970_INPUT_MCP:
    190            /* Negative edge sensitive */
    191            /* XXX: TODO: actual reaction may depends on HID0 status
    192             *            603/604/740/750: check HID0[EMCP]
    193             */
    194            if (cur_level == 1 && level == 0) {
    195                trace_ppc_irq_set_state("machine check", 1);
    196                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
    197            }
    198            break;
    199        case PPC970_INPUT_CKSTP:
    200            /* Level sensitive - active low */
    201            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
    202            if (level) {
    203                trace_ppc_irq_cpu("stop");
    204                cs->halted = 1;
    205            } else {
    206                trace_ppc_irq_cpu("restart");
    207                cs->halted = 0;
    208                qemu_cpu_kick(cs);
    209            }
    210            break;
    211        case PPC970_INPUT_HRESET:
    212            /* Level sensitive - active low */
    213            if (level) {
    214                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
    215            }
    216            break;
    217        case PPC970_INPUT_SRESET:
    218            trace_ppc_irq_set_state("RESET IRQ", level);
    219            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
    220            break;
    221        case PPC970_INPUT_TBEN:
    222            trace_ppc_irq_set_state("TBEN IRQ", level);
    223            /* XXX: TODO */
    224            break;
    225        default:
    226            g_assert_not_reached();
    227        }
    228        if (level)
    229            env->irq_input_state |= 1 << pin;
    230        else
    231            env->irq_input_state &= ~(1 << pin);
    232    }
    233}
    234
    235void ppc970_irq_init(PowerPCCPU *cpu)
    236{
    237    CPUPPCState *env = &cpu->env;
    238
    239    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
    240                                                  PPC970_INPUT_NB);
    241}
    242
    243/* POWER7 internal IRQ controller */
    244static void power7_set_irq(void *opaque, int pin, int level)
    245{
    246    PowerPCCPU *cpu = opaque;
    247
    248    trace_ppc_irq_set(&cpu->env, pin, level);
    249
    250    switch (pin) {
    251    case POWER7_INPUT_INT:
    252        /* Level sensitive - active high */
    253        trace_ppc_irq_set_state("external IRQ", level);
    254        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    255        break;
    256    default:
    257        g_assert_not_reached();
    258    }
    259}
    260
    261void ppcPOWER7_irq_init(PowerPCCPU *cpu)
    262{
    263    CPUPPCState *env = &cpu->env;
    264
    265    env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
    266                                                  POWER7_INPUT_NB);
    267}
    268
    269/* POWER9 internal IRQ controller */
    270static void power9_set_irq(void *opaque, int pin, int level)
    271{
    272    PowerPCCPU *cpu = opaque;
    273
    274    trace_ppc_irq_set(&cpu->env, pin, level);
    275
    276    switch (pin) {
    277    case POWER9_INPUT_INT:
    278        /* Level sensitive - active high */
    279        trace_ppc_irq_set_state("external IRQ", level);
    280        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    281        break;
    282    case POWER9_INPUT_HINT:
    283        /* Level sensitive - active high */
    284        trace_ppc_irq_set_state("HV external IRQ", level);
    285        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
    286        break;
    287    default:
    288        g_assert_not_reached();
    289        return;
    290    }
    291}
    292
    293void ppcPOWER9_irq_init(PowerPCCPU *cpu)
    294{
    295    CPUPPCState *env = &cpu->env;
    296
    297    env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu,
    298                                                  POWER9_INPUT_NB);
    299}
    300#endif /* defined(TARGET_PPC64) */
    301
    302void ppc40x_core_reset(PowerPCCPU *cpu)
    303{
    304    CPUPPCState *env = &cpu->env;
    305    target_ulong dbsr;
    306
    307    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
    308    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
    309    dbsr = env->spr[SPR_40x_DBSR];
    310    dbsr &= ~0x00000300;
    311    dbsr |= 0x00000100;
    312    env->spr[SPR_40x_DBSR] = dbsr;
    313}
    314
    315void ppc40x_chip_reset(PowerPCCPU *cpu)
    316{
    317    CPUPPCState *env = &cpu->env;
    318    target_ulong dbsr;
    319
    320    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
    321    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
    322    /* XXX: TODO reset all internal peripherals */
    323    dbsr = env->spr[SPR_40x_DBSR];
    324    dbsr &= ~0x00000300;
    325    dbsr |= 0x00000200;
    326    env->spr[SPR_40x_DBSR] = dbsr;
    327}
    328
    329void ppc40x_system_reset(PowerPCCPU *cpu)
    330{
    331    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
    332    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
    333}
    334
    335void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
    336{
    337    PowerPCCPU *cpu = env_archcpu(env);
    338
    339    switch ((val >> 28) & 0x3) {
    340    case 0x0:
    341        /* No action */
    342        break;
    343    case 0x1:
    344        /* Core reset */
    345        ppc40x_core_reset(cpu);
    346        break;
    347    case 0x2:
    348        /* Chip reset */
    349        ppc40x_chip_reset(cpu);
    350        break;
    351    case 0x3:
    352        /* System reset */
    353        ppc40x_system_reset(cpu);
    354        break;
    355    }
    356}
    357
    358/* PowerPC 40x internal IRQ controller */
    359static void ppc40x_set_irq(void *opaque, int pin, int level)
    360{
    361    PowerPCCPU *cpu = opaque;
    362    CPUPPCState *env = &cpu->env;
    363    int cur_level;
    364
    365    trace_ppc_irq_set(env, pin, level);
    366
    367    cur_level = (env->irq_input_state >> pin) & 1;
    368    /* Don't generate spurious events */
    369    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
    370        CPUState *cs = CPU(cpu);
    371
    372        switch (pin) {
    373        case PPC40x_INPUT_RESET_SYS:
    374            if (level) {
    375                trace_ppc_irq_reset("system");
    376                ppc40x_system_reset(cpu);
    377            }
    378            break;
    379        case PPC40x_INPUT_RESET_CHIP:
    380            if (level) {
    381                trace_ppc_irq_reset("chip");
    382                ppc40x_chip_reset(cpu);
    383            }
    384            break;
    385        case PPC40x_INPUT_RESET_CORE:
    386            /* XXX: TODO: update DBSR[MRR] */
    387            if (level) {
    388                trace_ppc_irq_reset("core");
    389                ppc40x_core_reset(cpu);
    390            }
    391            break;
    392        case PPC40x_INPUT_CINT:
    393            /* Level sensitive - active high */
    394            trace_ppc_irq_set_state("critical IRQ", level);
    395            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
    396            break;
    397        case PPC40x_INPUT_INT:
    398            /* Level sensitive - active high */
    399            trace_ppc_irq_set_state("external IRQ", level);
    400            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    401            break;
    402        case PPC40x_INPUT_HALT:
    403            /* Level sensitive - active low */
    404            if (level) {
    405                trace_ppc_irq_cpu("stop");
    406                cs->halted = 1;
    407            } else {
    408                trace_ppc_irq_cpu("restart");
    409                cs->halted = 0;
    410                qemu_cpu_kick(cs);
    411            }
    412            break;
    413        case PPC40x_INPUT_DEBUG:
    414            /* Level sensitive - active high */
    415            trace_ppc_irq_set_state("debug pin", level);
    416            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
    417            break;
    418        default:
    419            g_assert_not_reached();
    420        }
    421        if (level)
    422            env->irq_input_state |= 1 << pin;
    423        else
    424            env->irq_input_state &= ~(1 << pin);
    425    }
    426}
    427
    428void ppc40x_irq_init(PowerPCCPU *cpu)
    429{
    430    CPUPPCState *env = &cpu->env;
    431
    432    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
    433                                                  cpu, PPC40x_INPUT_NB);
    434}
    435
    436/* PowerPC E500 internal IRQ controller */
    437static void ppce500_set_irq(void *opaque, int pin, int level)
    438{
    439    PowerPCCPU *cpu = opaque;
    440    CPUPPCState *env = &cpu->env;
    441    int cur_level;
    442
    443    trace_ppc_irq_set(env, pin, level);
    444
    445    cur_level = (env->irq_input_state >> pin) & 1;
    446    /* Don't generate spurious events */
    447    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
    448        switch (pin) {
    449        case PPCE500_INPUT_MCK:
    450            if (level) {
    451                trace_ppc_irq_reset("system");
    452                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
    453            }
    454            break;
    455        case PPCE500_INPUT_RESET_CORE:
    456            if (level) {
    457                trace_ppc_irq_reset("core");
    458                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
    459            }
    460            break;
    461        case PPCE500_INPUT_CINT:
    462            /* Level sensitive - active high */
    463            trace_ppc_irq_set_state("critical IRQ", level);
    464            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
    465            break;
    466        case PPCE500_INPUT_INT:
    467            /* Level sensitive - active high */
    468            trace_ppc_irq_set_state("core IRQ", level);
    469            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
    470            break;
    471        case PPCE500_INPUT_DEBUG:
    472            /* Level sensitive - active high */
    473            trace_ppc_irq_set_state("debug pin", level);
    474            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
    475            break;
    476        default:
    477            g_assert_not_reached();
    478        }
    479        if (level)
    480            env->irq_input_state |= 1 << pin;
    481        else
    482            env->irq_input_state &= ~(1 << pin);
    483    }
    484}
    485
    486void ppce500_irq_init(PowerPCCPU *cpu)
    487{
    488    CPUPPCState *env = &cpu->env;
    489
    490    env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
    491                                                  cpu, PPCE500_INPUT_NB);
    492}
    493
    494/* Enable or Disable the E500 EPR capability */
    495void ppce500_set_mpic_proxy(bool enabled)
    496{
    497    CPUState *cs;
    498
    499    CPU_FOREACH(cs) {
    500        PowerPCCPU *cpu = POWERPC_CPU(cs);
    501
    502        cpu->env.mpic_proxy = enabled;
    503        if (kvm_enabled()) {
    504            kvmppc_set_mpic_proxy(cpu, enabled);
    505        }
    506    }
    507}
    508
    509/*****************************************************************************/
    510/* PowerPC time base and decrementer emulation */
    511
    512uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
    513{
    514    /* TB time in tb periods */
    515    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
    516}
    517
    518uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
    519{
    520    ppc_tb_t *tb_env = env->tb_env;
    521    uint64_t tb;
    522
    523    if (kvm_enabled()) {
    524        return env->spr[SPR_TBL];
    525    }
    526
    527    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
    528    trace_ppc_tb_load(tb);
    529
    530    return tb;
    531}
    532
    533static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
    534{
    535    ppc_tb_t *tb_env = env->tb_env;
    536    uint64_t tb;
    537
    538    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
    539    trace_ppc_tb_load(tb);
    540
    541    return tb >> 32;
    542}
    543
    544uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
    545{
    546    if (kvm_enabled()) {
    547        return env->spr[SPR_TBU];
    548    }
    549
    550    return _cpu_ppc_load_tbu(env);
    551}
    552
    553static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
    554                                    int64_t *tb_offsetp, uint64_t value)
    555{
    556    *tb_offsetp = value -
    557        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
    558
    559    trace_ppc_tb_store(value, *tb_offsetp);
    560}
    561
    562void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
    563{
    564    ppc_tb_t *tb_env = env->tb_env;
    565    uint64_t tb;
    566
    567    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
    568    tb &= 0xFFFFFFFF00000000ULL;
    569    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    570                     &tb_env->tb_offset, tb | (uint64_t)value);
    571}
    572
    573static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
    574{
    575    ppc_tb_t *tb_env = env->tb_env;
    576    uint64_t tb;
    577
    578    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
    579    tb &= 0x00000000FFFFFFFFULL;
    580    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    581                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
    582}
    583
    584void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
    585{
    586    _cpu_ppc_store_tbu(env, value);
    587}
    588
    589uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
    590{
    591    ppc_tb_t *tb_env = env->tb_env;
    592    uint64_t tb;
    593
    594    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
    595    trace_ppc_tb_load(tb);
    596
    597    return tb;
    598}
    599
    600uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
    601{
    602    ppc_tb_t *tb_env = env->tb_env;
    603    uint64_t tb;
    604
    605    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
    606    trace_ppc_tb_load(tb);
    607
    608    return tb >> 32;
    609}
    610
    611void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
    612{
    613    ppc_tb_t *tb_env = env->tb_env;
    614    uint64_t tb;
    615
    616    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
    617    tb &= 0xFFFFFFFF00000000ULL;
    618    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    619                     &tb_env->atb_offset, tb | (uint64_t)value);
    620}
    621
    622void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
    623{
    624    ppc_tb_t *tb_env = env->tb_env;
    625    uint64_t tb;
    626
    627    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
    628    tb &= 0x00000000FFFFFFFFULL;
    629    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    630                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
    631}
    632
    633uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
    634{
    635    ppc_tb_t *tb_env = env->tb_env;
    636
    637    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    638                          tb_env->vtb_offset);
    639}
    640
    641void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
    642{
    643    ppc_tb_t *tb_env = env->tb_env;
    644
    645    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    646                     &tb_env->vtb_offset, value);
    647}
    648
    649void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
    650{
    651    ppc_tb_t *tb_env = env->tb_env;
    652    uint64_t tb;
    653
    654    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    655                        tb_env->tb_offset);
    656    tb &= 0xFFFFFFUL;
    657    tb |= (value & ~0xFFFFFFUL);
    658    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    659                     &tb_env->tb_offset, tb);
    660}
    661
    662static void cpu_ppc_tb_stop (CPUPPCState *env)
    663{
    664    ppc_tb_t *tb_env = env->tb_env;
    665    uint64_t tb, atb, vmclk;
    666
    667    /* If the time base is already frozen, do nothing */
    668    if (tb_env->tb_freq != 0) {
    669        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
    670        /* Get the time base */
    671        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
    672        /* Get the alternate time base */
    673        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
    674        /* Store the time base value (ie compute the current offset) */
    675        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
    676        /* Store the alternate time base value (compute the current offset) */
    677        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
    678        /* Set the time base frequency to zero */
    679        tb_env->tb_freq = 0;
    680        /* Now, the time bases are frozen to tb_offset / atb_offset value */
    681    }
    682}
    683
    684static void cpu_ppc_tb_start (CPUPPCState *env)
    685{
    686    ppc_tb_t *tb_env = env->tb_env;
    687    uint64_t tb, atb, vmclk;
    688
    689    /* If the time base is not frozen, do nothing */
    690    if (tb_env->tb_freq == 0) {
    691        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
    692        /* Get the time base from tb_offset */
    693        tb = tb_env->tb_offset;
    694        /* Get the alternate time base from atb_offset */
    695        atb = tb_env->atb_offset;
    696        /* Restore the tb frequency from the decrementer frequency */
    697        tb_env->tb_freq = tb_env->decr_freq;
    698        /* Store the time base value */
    699        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
    700        /* Store the alternate time base value */
    701        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
    702    }
    703}
    704
    705bool ppc_decr_clear_on_delivery(CPUPPCState *env)
    706{
    707    ppc_tb_t *tb_env = env->tb_env;
    708    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
    709    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
    710}
    711
    712static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
    713{
    714    ppc_tb_t *tb_env = env->tb_env;
    715    int64_t decr, diff;
    716
    717    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
    718    if (diff >= 0) {
    719        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
    720    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
    721        decr = 0;
    722    }  else {
    723        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
    724    }
    725    trace_ppc_decr_load(decr);
    726
    727    return decr;
    728}
    729
    730target_ulong cpu_ppc_load_decr(CPUPPCState *env)
    731{
    732    ppc_tb_t *tb_env = env->tb_env;
    733    uint64_t decr;
    734
    735    if (kvm_enabled()) {
    736        return env->spr[SPR_DECR];
    737    }
    738
    739    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
    740
    741    /*
    742     * If large decrementer is enabled then the decrementer is signed extened
    743     * to 64 bits, otherwise it is a 32 bit value.
    744     */
    745    if (env->spr[SPR_LPCR] & LPCR_LD) {
    746        return decr;
    747    }
    748    return (uint32_t) decr;
    749}
    750
    751target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
    752{
    753    PowerPCCPU *cpu = env_archcpu(env);
    754    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
    755    ppc_tb_t *tb_env = env->tb_env;
    756    uint64_t hdecr;
    757
    758    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
    759
    760    /*
    761     * If we have a large decrementer (POWER9 or later) then hdecr is sign
    762     * extended to 64 bits, otherwise it is 32 bits.
    763     */
    764    if (pcc->lrg_decr_bits > 32) {
    765        return hdecr;
    766    }
    767    return (uint32_t) hdecr;
    768}
    769
    770uint64_t cpu_ppc_load_purr (CPUPPCState *env)
    771{
    772    ppc_tb_t *tb_env = env->tb_env;
    773
    774    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    775                          tb_env->purr_offset);
    776}
    777
    778/* When decrementer expires,
    779 * all we need to do is generate or queue a CPU exception
    780 */
    781static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
    782{
    783    /* Raise it */
    784    trace_ppc_decr_excp("raise");
    785    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
    786}
    787
    788static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
    789{
    790    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
    791}
    792
    793static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
    794{
    795    CPUPPCState *env = &cpu->env;
    796
    797    /* Raise it */
    798    trace_ppc_decr_excp("raise HV");
    799
    800    /* The architecture specifies that we don't deliver HDEC
    801     * interrupts in a PM state. Not only they don't cause a
    802     * wakeup but they also get effectively discarded.
    803     */
    804    if (!env->resume_as_sreset) {
    805        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
    806    }
    807}
    808
    809static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
    810{
    811    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
    812}
    813
    814static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
    815                                 QEMUTimer *timer,
    816                                 void (*raise_excp)(void *),
    817                                 void (*lower_excp)(PowerPCCPU *),
    818                                 target_ulong decr, target_ulong value,
    819                                 int nr_bits)
    820{
    821    CPUPPCState *env = &cpu->env;
    822    ppc_tb_t *tb_env = env->tb_env;
    823    uint64_t now, next;
    824    int64_t signed_value;
    825    int64_t signed_decr;
    826
    827    /* Truncate value to decr_width and sign extend for simplicity */
    828    signed_value = sextract64(value, 0, nr_bits);
    829    signed_decr = sextract64(decr, 0, nr_bits);
    830
    831    trace_ppc_decr_store(nr_bits, decr, value);
    832
    833    if (kvm_enabled()) {
    834        /* KVM handles decrementer exceptions, we don't need our own timer */
    835        return;
    836    }
    837
    838    /*
    839     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
    840     * interrupt.
    841     *
    842     * If we get a really small DEC value, we can assume that by the time we
    843     * handled it we should inject an interrupt already.
    844     *
    845     * On MSB level based DEC implementations the MSB always means the interrupt
    846     * is pending, so raise it on those.
    847     *
    848     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
    849     * an edge interrupt, so raise it here too.
    850     */
    851    if ((signed_value < 3) ||
    852        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
    853        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
    854          && signed_decr >= 0)) {
    855        (*raise_excp)(cpu);
    856        return;
    857    }
    858
    859    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
    860    if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
    861        (*lower_excp)(cpu);
    862    }
    863
    864    /* Calculate the next timer event */
    865    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
    866    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
    867    *nextp = next;
    868
    869    /* Adjust timer */
    870    timer_mod(timer, next);
    871}
    872
    873static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
    874                                       target_ulong value, int nr_bits)
    875{
    876    ppc_tb_t *tb_env = cpu->env.tb_env;
    877
    878    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
    879                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
    880                         value, nr_bits);
    881}
    882
    883void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
    884{
    885    PowerPCCPU *cpu = env_archcpu(env);
    886    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
    887    int nr_bits = 32;
    888
    889    if (env->spr[SPR_LPCR] & LPCR_LD) {
    890        nr_bits = pcc->lrg_decr_bits;
    891    }
    892
    893    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
    894}
    895
    896static void cpu_ppc_decr_cb(void *opaque)
    897{
    898    PowerPCCPU *cpu = opaque;
    899
    900    cpu_ppc_decr_excp(cpu);
    901}
    902
    903static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
    904                                        target_ulong value, int nr_bits)
    905{
    906    ppc_tb_t *tb_env = cpu->env.tb_env;
    907
    908    if (tb_env->hdecr_timer != NULL) {
    909        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
    910                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
    911                             hdecr, value, nr_bits);
    912    }
    913}
    914
    915void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
    916{
    917    PowerPCCPU *cpu = env_archcpu(env);
    918    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
    919
    920    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
    921                         pcc->lrg_decr_bits);
    922}
    923
    924static void cpu_ppc_hdecr_cb(void *opaque)
    925{
    926    PowerPCCPU *cpu = opaque;
    927
    928    cpu_ppc_hdecr_excp(cpu);
    929}
    930
    931void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
    932{
    933    ppc_tb_t *tb_env = env->tb_env;
    934
    935    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
    936                     &tb_env->purr_offset, value);
    937}
    938
    939static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
    940{
    941    CPUPPCState *env = opaque;
    942    PowerPCCPU *cpu = env_archcpu(env);
    943    ppc_tb_t *tb_env = env->tb_env;
    944
    945    tb_env->tb_freq = freq;
    946    tb_env->decr_freq = freq;
    947    /* There is a bug in Linux 2.4 kernels:
    948     * if a decrementer exception is pending when it enables msr_ee at startup,
    949     * it's not ready to handle it...
    950     */
    951    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
    952    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
    953    cpu_ppc_store_purr(env, 0x0000000000000000ULL);
    954}
    955
    956static void timebase_save(PPCTimebase *tb)
    957{
    958    uint64_t ticks = cpu_get_host_ticks();
    959    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
    960
    961    if (!first_ppc_cpu->env.tb_env) {
    962        error_report("No timebase object");
    963        return;
    964    }
    965
    966    /* not used anymore, we keep it for compatibility */
    967    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
    968    /*
    969     * tb_offset is only expected to be changed by QEMU so
    970     * there is no need to update it from KVM here
    971     */
    972    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
    973
    974    tb->runstate_paused =
    975        runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
    976}
    977
    978static void timebase_load(PPCTimebase *tb)
    979{
    980    CPUState *cpu;
    981    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
    982    int64_t tb_off_adj, tb_off;
    983    unsigned long freq;
    984
    985    if (!first_ppc_cpu->env.tb_env) {
    986        error_report("No timebase object");
    987        return;
    988    }
    989
    990    freq = first_ppc_cpu->env.tb_env->tb_freq;
    991
    992    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
    993
    994    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
    995    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
    996                        (tb_off_adj - tb_off) / freq);
    997
    998    /* Set new offset to all CPUs */
    999    CPU_FOREACH(cpu) {
   1000        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
   1001        pcpu->env.tb_env->tb_offset = tb_off_adj;
   1002        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
   1003    }
   1004}
   1005
   1006void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
   1007                                   RunState state)
   1008{
   1009    PPCTimebase *tb = opaque;
   1010
   1011    if (running) {
   1012        timebase_load(tb);
   1013    } else {
   1014        timebase_save(tb);
   1015    }
   1016}
   1017
   1018/*
   1019 * When migrating a running guest, read the clock just
   1020 * before migration, so that the guest clock counts
   1021 * during the events between:
   1022 *
   1023 *  * vm_stop()
   1024 *  *
   1025 *  * pre_save()
   1026 *
   1027 *  This reduces clock difference on migration from 5s
   1028 *  to 0.1s (when max_downtime == 5s), because sending the
   1029 *  final pages of memory (which happens between vm_stop()
   1030 *  and pre_save()) takes max_downtime.
   1031 */
   1032static int timebase_pre_save(void *opaque)
   1033{
   1034    PPCTimebase *tb = opaque;
   1035
   1036    /* guest_timebase won't be overridden in case of paused guest or savevm */
   1037    if (!tb->runstate_paused) {
   1038        timebase_save(tb);
   1039    }
   1040
   1041    return 0;
   1042}
   1043
   1044const VMStateDescription vmstate_ppc_timebase = {
   1045    .name = "timebase",
   1046    .version_id = 1,
   1047    .minimum_version_id = 1,
   1048    .minimum_version_id_old = 1,
   1049    .pre_save = timebase_pre_save,
   1050    .fields      = (VMStateField []) {
   1051        VMSTATE_UINT64(guest_timebase, PPCTimebase),
   1052        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
   1053        VMSTATE_END_OF_LIST()
   1054    },
   1055};
   1056
   1057/* Set up (once) timebase frequency (in Hz) */
   1058clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
   1059{
   1060    PowerPCCPU *cpu = env_archcpu(env);
   1061    ppc_tb_t *tb_env;
   1062
   1063    tb_env = g_malloc0(sizeof(ppc_tb_t));
   1064    env->tb_env = tb_env;
   1065    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
   1066    if (is_book3s_arch2x(env)) {
   1067        /* All Book3S 64bit CPUs implement level based DEC logic */
   1068        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
   1069    }
   1070    /* Create new timer */
   1071    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
   1072    if (env->has_hv_mode) {
   1073        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
   1074                                                cpu);
   1075    } else {
   1076        tb_env->hdecr_timer = NULL;
   1077    }
   1078    cpu_ppc_set_tb_clk(env, freq);
   1079
   1080    return &cpu_ppc_set_tb_clk;
   1081}
   1082
   1083/* Specific helpers for POWER & PowerPC 601 RTC */
   1084void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
   1085{
   1086    _cpu_ppc_store_tbu(env, value);
   1087}
   1088
   1089uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
   1090{
   1091    return _cpu_ppc_load_tbu(env);
   1092}
   1093
   1094void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
   1095{
   1096    cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
   1097}
   1098
   1099uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
   1100{
   1101    return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
   1102}
   1103
   1104/*****************************************************************************/
   1105/* PowerPC 40x timers */
   1106
   1107/* PIT, FIT & WDT */
   1108typedef struct ppc40x_timer_t ppc40x_timer_t;
   1109struct ppc40x_timer_t {
   1110    uint64_t pit_reload;  /* PIT auto-reload value        */
   1111    uint64_t fit_next;    /* Tick for next FIT interrupt  */
   1112    QEMUTimer *fit_timer;
   1113    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
   1114    QEMUTimer *wdt_timer;
   1115
   1116    /* 405 have the PIT, 440 have a DECR.  */
   1117    unsigned int decr_excp;
   1118};
   1119
   1120/* Fixed interval timer */
   1121static void cpu_4xx_fit_cb (void *opaque)
   1122{
   1123    PowerPCCPU *cpu;
   1124    CPUPPCState *env;
   1125    ppc_tb_t *tb_env;
   1126    ppc40x_timer_t *ppc40x_timer;
   1127    uint64_t now, next;
   1128
   1129    env = opaque;
   1130    cpu = env_archcpu(env);
   1131    tb_env = env->tb_env;
   1132    ppc40x_timer = tb_env->opaque;
   1133    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
   1134    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
   1135    case 0:
   1136        next = 1 << 9;
   1137        break;
   1138    case 1:
   1139        next = 1 << 13;
   1140        break;
   1141    case 2:
   1142        next = 1 << 17;
   1143        break;
   1144    case 3:
   1145        next = 1 << 21;
   1146        break;
   1147    default:
   1148        /* Cannot occur, but makes gcc happy */
   1149        return;
   1150    }
   1151    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
   1152    if (next == now)
   1153        next++;
   1154    timer_mod(ppc40x_timer->fit_timer, next);
   1155    env->spr[SPR_40x_TSR] |= 1 << 26;
   1156    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
   1157        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
   1158    }
   1159    trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
   1160                         env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
   1161}
   1162
   1163/* Programmable interval timer */
   1164static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
   1165{
   1166    ppc40x_timer_t *ppc40x_timer;
   1167    uint64_t now, next;
   1168
   1169    ppc40x_timer = tb_env->opaque;
   1170    if (ppc40x_timer->pit_reload <= 1 ||
   1171        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
   1172        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
   1173        /* Stop PIT */
   1174        trace_ppc4xx_pit_stop();
   1175        timer_del(tb_env->decr_timer);
   1176    } else {
   1177        trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
   1178        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
   1179        next = now + muldiv64(ppc40x_timer->pit_reload,
   1180                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
   1181        if (is_excp)
   1182            next += tb_env->decr_next - now;
   1183        if (next == now)
   1184            next++;
   1185        timer_mod(tb_env->decr_timer, next);
   1186        tb_env->decr_next = next;
   1187    }
   1188}
   1189
   1190static void cpu_4xx_pit_cb (void *opaque)
   1191{
   1192    PowerPCCPU *cpu;
   1193    CPUPPCState *env;
   1194    ppc_tb_t *tb_env;
   1195    ppc40x_timer_t *ppc40x_timer;
   1196
   1197    env = opaque;
   1198    cpu = env_archcpu(env);
   1199    tb_env = env->tb_env;
   1200    ppc40x_timer = tb_env->opaque;
   1201    env->spr[SPR_40x_TSR] |= 1 << 27;
   1202    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
   1203        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
   1204    }
   1205    start_stop_pit(env, tb_env, 1);
   1206    trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
   1207           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
   1208           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
   1209           ppc40x_timer->pit_reload);
   1210}
   1211
   1212/* Watchdog timer */
   1213static void cpu_4xx_wdt_cb (void *opaque)
   1214{
   1215    PowerPCCPU *cpu;
   1216    CPUPPCState *env;
   1217    ppc_tb_t *tb_env;
   1218    ppc40x_timer_t *ppc40x_timer;
   1219    uint64_t now, next;
   1220
   1221    env = opaque;
   1222    cpu = env_archcpu(env);
   1223    tb_env = env->tb_env;
   1224    ppc40x_timer = tb_env->opaque;
   1225    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
   1226    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
   1227    case 0:
   1228        next = 1 << 17;
   1229        break;
   1230    case 1:
   1231        next = 1 << 21;
   1232        break;
   1233    case 2:
   1234        next = 1 << 25;
   1235        break;
   1236    case 3:
   1237        next = 1 << 29;
   1238        break;
   1239    default:
   1240        /* Cannot occur, but makes gcc happy */
   1241        return;
   1242    }
   1243    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
   1244    if (next == now)
   1245        next++;
   1246    trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
   1247    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
   1248    case 0x0:
   1249    case 0x1:
   1250        timer_mod(ppc40x_timer->wdt_timer, next);
   1251        ppc40x_timer->wdt_next = next;
   1252        env->spr[SPR_40x_TSR] |= 1U << 31;
   1253        break;
   1254    case 0x2:
   1255        timer_mod(ppc40x_timer->wdt_timer, next);
   1256        ppc40x_timer->wdt_next = next;
   1257        env->spr[SPR_40x_TSR] |= 1 << 30;
   1258        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
   1259            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
   1260        }
   1261        break;
   1262    case 0x3:
   1263        env->spr[SPR_40x_TSR] &= ~0x30000000;
   1264        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
   1265        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
   1266        case 0x0:
   1267            /* No reset */
   1268            break;
   1269        case 0x1: /* Core reset */
   1270            ppc40x_core_reset(cpu);
   1271            break;
   1272        case 0x2: /* Chip reset */
   1273            ppc40x_chip_reset(cpu);
   1274            break;
   1275        case 0x3: /* System reset */
   1276            ppc40x_system_reset(cpu);
   1277            break;
   1278        }
   1279    }
   1280}
   1281
   1282void store_40x_pit (CPUPPCState *env, target_ulong val)
   1283{
   1284    ppc_tb_t *tb_env;
   1285    ppc40x_timer_t *ppc40x_timer;
   1286
   1287    tb_env = env->tb_env;
   1288    ppc40x_timer = tb_env->opaque;
   1289    trace_ppc40x_store_pit(val);
   1290    ppc40x_timer->pit_reload = val;
   1291    start_stop_pit(env, tb_env, 0);
   1292}
   1293
   1294target_ulong load_40x_pit (CPUPPCState *env)
   1295{
   1296    return cpu_ppc_load_decr(env);
   1297}
   1298
   1299static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
   1300{
   1301    CPUPPCState *env = opaque;
   1302    ppc_tb_t *tb_env = env->tb_env;
   1303
   1304    trace_ppc40x_set_tb_clk(freq);
   1305    tb_env->tb_freq = freq;
   1306    tb_env->decr_freq = freq;
   1307    /* XXX: we should also update all timers */
   1308}
   1309
   1310clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
   1311                                  unsigned int decr_excp)
   1312{
   1313    ppc_tb_t *tb_env;
   1314    ppc40x_timer_t *ppc40x_timer;
   1315
   1316    tb_env = g_malloc0(sizeof(ppc_tb_t));
   1317    env->tb_env = tb_env;
   1318    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
   1319    ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
   1320    tb_env->tb_freq = freq;
   1321    tb_env->decr_freq = freq;
   1322    tb_env->opaque = ppc40x_timer;
   1323    trace_ppc40x_timers_init(freq);
   1324    if (ppc40x_timer != NULL) {
   1325        /* We use decr timer for PIT */
   1326        tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
   1327        ppc40x_timer->fit_timer =
   1328            timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
   1329        ppc40x_timer->wdt_timer =
   1330            timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
   1331        ppc40x_timer->decr_excp = decr_excp;
   1332    }
   1333
   1334    return &ppc_40x_set_tb_clk;
   1335}
   1336
   1337/*****************************************************************************/
   1338/* Embedded PowerPC Device Control Registers */
   1339typedef struct ppc_dcrn_t ppc_dcrn_t;
   1340struct ppc_dcrn_t {
   1341    dcr_read_cb dcr_read;
   1342    dcr_write_cb dcr_write;
   1343    void *opaque;
   1344};
   1345
   1346/* XXX: on 460, DCR addresses are 32 bits wide,
   1347 *      using DCRIPR to get the 22 upper bits of the DCR address
   1348 */
   1349#define DCRN_NB 1024
   1350struct ppc_dcr_t {
   1351    ppc_dcrn_t dcrn[DCRN_NB];
   1352    int (*read_error)(int dcrn);
   1353    int (*write_error)(int dcrn);
   1354};
   1355
   1356int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
   1357{
   1358    ppc_dcrn_t *dcr;
   1359
   1360    if (dcrn < 0 || dcrn >= DCRN_NB)
   1361        goto error;
   1362    dcr = &dcr_env->dcrn[dcrn];
   1363    if (dcr->dcr_read == NULL)
   1364        goto error;
   1365    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
   1366
   1367    return 0;
   1368
   1369 error:
   1370    if (dcr_env->read_error != NULL)
   1371        return (*dcr_env->read_error)(dcrn);
   1372
   1373    return -1;
   1374}
   1375
   1376int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
   1377{
   1378    ppc_dcrn_t *dcr;
   1379
   1380    if (dcrn < 0 || dcrn >= DCRN_NB)
   1381        goto error;
   1382    dcr = &dcr_env->dcrn[dcrn];
   1383    if (dcr->dcr_write == NULL)
   1384        goto error;
   1385    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
   1386
   1387    return 0;
   1388
   1389 error:
   1390    if (dcr_env->write_error != NULL)
   1391        return (*dcr_env->write_error)(dcrn);
   1392
   1393    return -1;
   1394}
   1395
   1396int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
   1397                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
   1398{
   1399    ppc_dcr_t *dcr_env;
   1400    ppc_dcrn_t *dcr;
   1401
   1402    dcr_env = env->dcr_env;
   1403    if (dcr_env == NULL)
   1404        return -1;
   1405    if (dcrn < 0 || dcrn >= DCRN_NB)
   1406        return -1;
   1407    dcr = &dcr_env->dcrn[dcrn];
   1408    if (dcr->opaque != NULL ||
   1409        dcr->dcr_read != NULL ||
   1410        dcr->dcr_write != NULL)
   1411        return -1;
   1412    dcr->opaque = opaque;
   1413    dcr->dcr_read = dcr_read;
   1414    dcr->dcr_write = dcr_write;
   1415
   1416    return 0;
   1417}
   1418
   1419int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
   1420                  int (*write_error)(int dcrn))
   1421{
   1422    ppc_dcr_t *dcr_env;
   1423
   1424    dcr_env = g_malloc0(sizeof(ppc_dcr_t));
   1425    dcr_env->read_error = read_error;
   1426    dcr_env->write_error = write_error;
   1427    env->dcr_env = dcr_env;
   1428
   1429    return 0;
   1430}
   1431
   1432/*****************************************************************************/
   1433
   1434int ppc_cpu_pir(PowerPCCPU *cpu)
   1435{
   1436    CPUPPCState *env = &cpu->env;
   1437    return env->spr_cb[SPR_PIR].default_value;
   1438}
   1439
   1440PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
   1441{
   1442    CPUState *cs;
   1443
   1444    CPU_FOREACH(cs) {
   1445        PowerPCCPU *cpu = POWERPC_CPU(cs);
   1446
   1447        if (ppc_cpu_pir(cpu) == pir) {
   1448            return cpu;
   1449        }
   1450    }
   1451
   1452    return NULL;
   1453}
   1454
   1455void ppc_irq_reset(PowerPCCPU *cpu)
   1456{
   1457    CPUPPCState *env = &cpu->env;
   1458
   1459    env->irq_input_state = 0;
   1460    kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
   1461}