cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

monitor.c (28312B)


      1/*
      2 * QEMU monitor
      3 *
      4 * Copyright (c) 2003-2004 Fabrice Bellard
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#include "qemu/osdep.h"
     26#include "cpu.h"
     27#include "monitor/monitor.h"
     28#include "monitor/hmp-target.h"
     29#include "monitor/hmp.h"
     30#include "qapi/qmp/qdict.h"
     31#include "sysemu/kvm.h"
     32#include "sysemu/sev.h"
     33#include "qapi/error.h"
     34#include "sev_i386.h"
     35#include "qapi/qapi-commands-misc-target.h"
     36#include "qapi/qapi-commands-misc.h"
     37#include "hw/i386/pc.h"
     38#include "hw/i386/sgx.h"
     39
     40/* Perform linear address sign extension */
     41static hwaddr addr_canonical(CPUArchState *env, hwaddr addr)
     42{
     43#ifdef TARGET_X86_64
     44    if (env->cr[4] & CR4_LA57_MASK) {
     45        if (addr & (1ULL << 56)) {
     46            addr |= (hwaddr)-(1LL << 57);
     47        }
     48    } else {
     49        if (addr & (1ULL << 47)) {
     50            addr |= (hwaddr)-(1LL << 48);
     51        }
     52    }
     53#endif
     54    return addr;
     55}
     56
     57static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr,
     58                      hwaddr pte, hwaddr mask)
     59{
     60    addr = addr_canonical(env, addr);
     61
     62    monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
     63                   " %c%c%c%c%c%c%c%c%c\n",
     64                   addr,
     65                   pte & mask,
     66                   pte & PG_NX_MASK ? 'X' : '-',
     67                   pte & PG_GLOBAL_MASK ? 'G' : '-',
     68                   pte & PG_PSE_MASK ? 'P' : '-',
     69                   pte & PG_DIRTY_MASK ? 'D' : '-',
     70                   pte & PG_ACCESSED_MASK ? 'A' : '-',
     71                   pte & PG_PCD_MASK ? 'C' : '-',
     72                   pte & PG_PWT_MASK ? 'T' : '-',
     73                   pte & PG_USER_MASK ? 'U' : '-',
     74                   pte & PG_RW_MASK ? 'W' : '-');
     75}
     76
     77static void tlb_info_32(Monitor *mon, CPUArchState *env)
     78{
     79    unsigned int l1, l2;
     80    uint32_t pgd, pde, pte;
     81
     82    pgd = env->cr[3] & ~0xfff;
     83    for(l1 = 0; l1 < 1024; l1++) {
     84        cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
     85        pde = le32_to_cpu(pde);
     86        if (pde & PG_PRESENT_MASK) {
     87            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
     88                /* 4M pages */
     89                print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1));
     90            } else {
     91                for(l2 = 0; l2 < 1024; l2++) {
     92                    cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
     93                    pte = le32_to_cpu(pte);
     94                    if (pte & PG_PRESENT_MASK) {
     95                        print_pte(mon, env, (l1 << 22) + (l2 << 12),
     96                                  pte & ~PG_PSE_MASK,
     97                                  ~0xfff);
     98                    }
     99                }
    100            }
    101        }
    102    }
    103}
    104
    105static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
    106{
    107    unsigned int l1, l2, l3;
    108    uint64_t pdpe, pde, pte;
    109    uint64_t pdp_addr, pd_addr, pt_addr;
    110
    111    pdp_addr = env->cr[3] & ~0x1f;
    112    for (l1 = 0; l1 < 4; l1++) {
    113        cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
    114        pdpe = le64_to_cpu(pdpe);
    115        if (pdpe & PG_PRESENT_MASK) {
    116            pd_addr = pdpe & 0x3fffffffff000ULL;
    117            for (l2 = 0; l2 < 512; l2++) {
    118                cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
    119                pde = le64_to_cpu(pde);
    120                if (pde & PG_PRESENT_MASK) {
    121                    if (pde & PG_PSE_MASK) {
    122                        /* 2M pages with PAE, CR4.PSE is ignored */
    123                        print_pte(mon, env, (l1 << 30) + (l2 << 21), pde,
    124                                  ~((hwaddr)(1 << 20) - 1));
    125                    } else {
    126                        pt_addr = pde & 0x3fffffffff000ULL;
    127                        for (l3 = 0; l3 < 512; l3++) {
    128                            cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
    129                            pte = le64_to_cpu(pte);
    130                            if (pte & PG_PRESENT_MASK) {
    131                                print_pte(mon, env, (l1 << 30) + (l2 << 21)
    132                                          + (l3 << 12),
    133                                          pte & ~PG_PSE_MASK,
    134                                          ~(hwaddr)0xfff);
    135                            }
    136                        }
    137                    }
    138                }
    139            }
    140        }
    141    }
    142}
    143
    144#ifdef TARGET_X86_64
    145static void tlb_info_la48(Monitor *mon, CPUArchState *env,
    146        uint64_t l0, uint64_t pml4_addr)
    147{
    148    uint64_t l1, l2, l3, l4;
    149    uint64_t pml4e, pdpe, pde, pte;
    150    uint64_t pdp_addr, pd_addr, pt_addr;
    151
    152    for (l1 = 0; l1 < 512; l1++) {
    153        cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
    154        pml4e = le64_to_cpu(pml4e);
    155        if (!(pml4e & PG_PRESENT_MASK)) {
    156            continue;
    157        }
    158
    159        pdp_addr = pml4e & 0x3fffffffff000ULL;
    160        for (l2 = 0; l2 < 512; l2++) {
    161            cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
    162            pdpe = le64_to_cpu(pdpe);
    163            if (!(pdpe & PG_PRESENT_MASK)) {
    164                continue;
    165            }
    166
    167            if (pdpe & PG_PSE_MASK) {
    168                /* 1G pages, CR4.PSE is ignored */
    169                print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30),
    170                        pdpe, 0x3ffffc0000000ULL);
    171                continue;
    172            }
    173
    174            pd_addr = pdpe & 0x3fffffffff000ULL;
    175            for (l3 = 0; l3 < 512; l3++) {
    176                cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
    177                pde = le64_to_cpu(pde);
    178                if (!(pde & PG_PRESENT_MASK)) {
    179                    continue;
    180                }
    181
    182                if (pde & PG_PSE_MASK) {
    183                    /* 2M pages, CR4.PSE is ignored */
    184                    print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) +
    185                            (l3 << 21), pde, 0x3ffffffe00000ULL);
    186                    continue;
    187                }
    188
    189                pt_addr = pde & 0x3fffffffff000ULL;
    190                for (l4 = 0; l4 < 512; l4++) {
    191                    cpu_physical_memory_read(pt_addr
    192                            + l4 * 8,
    193                            &pte, 8);
    194                    pte = le64_to_cpu(pte);
    195                    if (pte & PG_PRESENT_MASK) {
    196                        print_pte(mon, env, (l0 << 48) + (l1 << 39) +
    197                                (l2 << 30) + (l3 << 21) + (l4 << 12),
    198                                pte & ~PG_PSE_MASK, 0x3fffffffff000ULL);
    199                    }
    200                }
    201            }
    202        }
    203    }
    204}
    205
    206static void tlb_info_la57(Monitor *mon, CPUArchState *env)
    207{
    208    uint64_t l0;
    209    uint64_t pml5e;
    210    uint64_t pml5_addr;
    211
    212    pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
    213    for (l0 = 0; l0 < 512; l0++) {
    214        cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
    215        pml5e = le64_to_cpu(pml5e);
    216        if (pml5e & PG_PRESENT_MASK) {
    217            tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL);
    218        }
    219    }
    220}
    221#endif /* TARGET_X86_64 */
    222
    223void hmp_info_tlb(Monitor *mon, const QDict *qdict)
    224{
    225    CPUArchState *env;
    226
    227    env = mon_get_cpu_env(mon);
    228    if (!env) {
    229        monitor_printf(mon, "No CPU available\n");
    230        return;
    231    }
    232
    233    if (!(env->cr[0] & CR0_PG_MASK)) {
    234        monitor_printf(mon, "PG disabled\n");
    235        return;
    236    }
    237    if (env->cr[4] & CR4_PAE_MASK) {
    238#ifdef TARGET_X86_64
    239        if (env->hflags & HF_LMA_MASK) {
    240            if (env->cr[4] & CR4_LA57_MASK) {
    241                tlb_info_la57(mon, env);
    242            } else {
    243                tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL);
    244            }
    245        } else
    246#endif
    247        {
    248            tlb_info_pae32(mon, env);
    249        }
    250    } else {
    251        tlb_info_32(mon, env);
    252    }
    253}
    254
    255static void mem_print(Monitor *mon, CPUArchState *env,
    256                      hwaddr *pstart, int *plast_prot,
    257                      hwaddr end, int prot)
    258{
    259    int prot1;
    260    prot1 = *plast_prot;
    261    if (prot != prot1) {
    262        if (*pstart != -1) {
    263            monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
    264                           TARGET_FMT_plx " %c%c%c\n",
    265                           addr_canonical(env, *pstart),
    266                           addr_canonical(env, end),
    267                           addr_canonical(env, end - *pstart),
    268                           prot1 & PG_USER_MASK ? 'u' : '-',
    269                           'r',
    270                           prot1 & PG_RW_MASK ? 'w' : '-');
    271        }
    272        if (prot != 0)
    273            *pstart = end;
    274        else
    275            *pstart = -1;
    276        *plast_prot = prot;
    277    }
    278}
    279
    280static void mem_info_32(Monitor *mon, CPUArchState *env)
    281{
    282    unsigned int l1, l2;
    283    int prot, last_prot;
    284    uint32_t pgd, pde, pte;
    285    hwaddr start, end;
    286
    287    pgd = env->cr[3] & ~0xfff;
    288    last_prot = 0;
    289    start = -1;
    290    for(l1 = 0; l1 < 1024; l1++) {
    291        cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
    292        pde = le32_to_cpu(pde);
    293        end = l1 << 22;
    294        if (pde & PG_PRESENT_MASK) {
    295            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
    296                prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
    297                mem_print(mon, env, &start, &last_prot, end, prot);
    298            } else {
    299                for(l2 = 0; l2 < 1024; l2++) {
    300                    cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
    301                    pte = le32_to_cpu(pte);
    302                    end = (l1 << 22) + (l2 << 12);
    303                    if (pte & PG_PRESENT_MASK) {
    304                        prot = pte & pde &
    305                            (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
    306                    } else {
    307                        prot = 0;
    308                    }
    309                    mem_print(mon, env, &start, &last_prot, end, prot);
    310                }
    311            }
    312        } else {
    313            prot = 0;
    314            mem_print(mon, env, &start, &last_prot, end, prot);
    315        }
    316    }
    317    /* Flush last range */
    318    mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
    319}
    320
    321static void mem_info_pae32(Monitor *mon, CPUArchState *env)
    322{
    323    unsigned int l1, l2, l3;
    324    int prot, last_prot;
    325    uint64_t pdpe, pde, pte;
    326    uint64_t pdp_addr, pd_addr, pt_addr;
    327    hwaddr start, end;
    328
    329    pdp_addr = env->cr[3] & ~0x1f;
    330    last_prot = 0;
    331    start = -1;
    332    for (l1 = 0; l1 < 4; l1++) {
    333        cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
    334        pdpe = le64_to_cpu(pdpe);
    335        end = l1 << 30;
    336        if (pdpe & PG_PRESENT_MASK) {
    337            pd_addr = pdpe & 0x3fffffffff000ULL;
    338            for (l2 = 0; l2 < 512; l2++) {
    339                cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
    340                pde = le64_to_cpu(pde);
    341                end = (l1 << 30) + (l2 << 21);
    342                if (pde & PG_PRESENT_MASK) {
    343                    if (pde & PG_PSE_MASK) {
    344                        prot = pde & (PG_USER_MASK | PG_RW_MASK |
    345                                      PG_PRESENT_MASK);
    346                        mem_print(mon, env, &start, &last_prot, end, prot);
    347                    } else {
    348                        pt_addr = pde & 0x3fffffffff000ULL;
    349                        for (l3 = 0; l3 < 512; l3++) {
    350                            cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
    351                            pte = le64_to_cpu(pte);
    352                            end = (l1 << 30) + (l2 << 21) + (l3 << 12);
    353                            if (pte & PG_PRESENT_MASK) {
    354                                prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
    355                                                    PG_PRESENT_MASK);
    356                            } else {
    357                                prot = 0;
    358                            }
    359                            mem_print(mon, env, &start, &last_prot, end, prot);
    360                        }
    361                    }
    362                } else {
    363                    prot = 0;
    364                    mem_print(mon, env, &start, &last_prot, end, prot);
    365                }
    366            }
    367        } else {
    368            prot = 0;
    369            mem_print(mon, env, &start, &last_prot, end, prot);
    370        }
    371    }
    372    /* Flush last range */
    373    mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0);
    374}
    375
    376
    377#ifdef TARGET_X86_64
    378static void mem_info_la48(Monitor *mon, CPUArchState *env)
    379{
    380    int prot, last_prot;
    381    uint64_t l1, l2, l3, l4;
    382    uint64_t pml4e, pdpe, pde, pte;
    383    uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
    384
    385    pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
    386    last_prot = 0;
    387    start = -1;
    388    for (l1 = 0; l1 < 512; l1++) {
    389        cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
    390        pml4e = le64_to_cpu(pml4e);
    391        end = l1 << 39;
    392        if (pml4e & PG_PRESENT_MASK) {
    393            pdp_addr = pml4e & 0x3fffffffff000ULL;
    394            for (l2 = 0; l2 < 512; l2++) {
    395                cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
    396                pdpe = le64_to_cpu(pdpe);
    397                end = (l1 << 39) + (l2 << 30);
    398                if (pdpe & PG_PRESENT_MASK) {
    399                    if (pdpe & PG_PSE_MASK) {
    400                        prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
    401                                       PG_PRESENT_MASK);
    402                        prot &= pml4e;
    403                        mem_print(mon, env, &start, &last_prot, end, prot);
    404                    } else {
    405                        pd_addr = pdpe & 0x3fffffffff000ULL;
    406                        for (l3 = 0; l3 < 512; l3++) {
    407                            cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
    408                            pde = le64_to_cpu(pde);
    409                            end = (l1 << 39) + (l2 << 30) + (l3 << 21);
    410                            if (pde & PG_PRESENT_MASK) {
    411                                if (pde & PG_PSE_MASK) {
    412                                    prot = pde & (PG_USER_MASK | PG_RW_MASK |
    413                                                  PG_PRESENT_MASK);
    414                                    prot &= pml4e & pdpe;
    415                                    mem_print(mon, env, &start,
    416                                              &last_prot, end, prot);
    417                                } else {
    418                                    pt_addr = pde & 0x3fffffffff000ULL;
    419                                    for (l4 = 0; l4 < 512; l4++) {
    420                                        cpu_physical_memory_read(pt_addr
    421                                                                 + l4 * 8,
    422                                                                 &pte, 8);
    423                                        pte = le64_to_cpu(pte);
    424                                        end = (l1 << 39) + (l2 << 30) +
    425                                            (l3 << 21) + (l4 << 12);
    426                                        if (pte & PG_PRESENT_MASK) {
    427                                            prot = pte & (PG_USER_MASK | PG_RW_MASK |
    428                                                          PG_PRESENT_MASK);
    429                                            prot &= pml4e & pdpe & pde;
    430                                        } else {
    431                                            prot = 0;
    432                                        }
    433                                        mem_print(mon, env, &start,
    434                                                  &last_prot, end, prot);
    435                                    }
    436                                }
    437                            } else {
    438                                prot = 0;
    439                                mem_print(mon, env, &start,
    440                                          &last_prot, end, prot);
    441                            }
    442                        }
    443                    }
    444                } else {
    445                    prot = 0;
    446                    mem_print(mon, env, &start, &last_prot, end, prot);
    447                }
    448            }
    449        } else {
    450            prot = 0;
    451            mem_print(mon, env, &start, &last_prot, end, prot);
    452        }
    453    }
    454    /* Flush last range */
    455    mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0);
    456}
    457
    458static void mem_info_la57(Monitor *mon, CPUArchState *env)
    459{
    460    int prot, last_prot;
    461    uint64_t l0, l1, l2, l3, l4;
    462    uint64_t pml5e, pml4e, pdpe, pde, pte;
    463    uint64_t pml5_addr, pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
    464
    465    pml5_addr = env->cr[3] & 0x3fffffffff000ULL;
    466    last_prot = 0;
    467    start = -1;
    468    for (l0 = 0; l0 < 512; l0++) {
    469        cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8);
    470        pml5e = le64_to_cpu(pml5e);
    471        end = l0 << 48;
    472        if (!(pml5e & PG_PRESENT_MASK)) {
    473            prot = 0;
    474            mem_print(mon, env, &start, &last_prot, end, prot);
    475            continue;
    476        }
    477
    478        pml4_addr = pml5e & 0x3fffffffff000ULL;
    479        for (l1 = 0; l1 < 512; l1++) {
    480            cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
    481            pml4e = le64_to_cpu(pml4e);
    482            end = (l0 << 48) + (l1 << 39);
    483            if (!(pml4e & PG_PRESENT_MASK)) {
    484                prot = 0;
    485                mem_print(mon, env, &start, &last_prot, end, prot);
    486                continue;
    487            }
    488
    489            pdp_addr = pml4e & 0x3fffffffff000ULL;
    490            for (l2 = 0; l2 < 512; l2++) {
    491                cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
    492                pdpe = le64_to_cpu(pdpe);
    493                end = (l0 << 48) + (l1 << 39) + (l2 << 30);
    494                if (pdpe & PG_PRESENT_MASK) {
    495                    prot = 0;
    496                    mem_print(mon, env, &start, &last_prot, end, prot);
    497                    continue;
    498                }
    499
    500                if (pdpe & PG_PSE_MASK) {
    501                    prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
    502                            PG_PRESENT_MASK);
    503                    prot &= pml5e & pml4e;
    504                    mem_print(mon, env, &start, &last_prot, end, prot);
    505                    continue;
    506                }
    507
    508                pd_addr = pdpe & 0x3fffffffff000ULL;
    509                for (l3 = 0; l3 < 512; l3++) {
    510                    cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
    511                    pde = le64_to_cpu(pde);
    512                    end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21);
    513                    if (pde & PG_PRESENT_MASK) {
    514                        prot = 0;
    515                        mem_print(mon, env, &start, &last_prot, end, prot);
    516                        continue;
    517                    }
    518
    519                    if (pde & PG_PSE_MASK) {
    520                        prot = pde & (PG_USER_MASK | PG_RW_MASK |
    521                                PG_PRESENT_MASK);
    522                        prot &= pml5e & pml4e & pdpe;
    523                        mem_print(mon, env, &start, &last_prot, end, prot);
    524                        continue;
    525                    }
    526
    527                    pt_addr = pde & 0x3fffffffff000ULL;
    528                    for (l4 = 0; l4 < 512; l4++) {
    529                        cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8);
    530                        pte = le64_to_cpu(pte);
    531                        end = (l0 << 48) + (l1 << 39) + (l2 << 30) +
    532                            (l3 << 21) + (l4 << 12);
    533                        if (pte & PG_PRESENT_MASK) {
    534                            prot = pte & (PG_USER_MASK | PG_RW_MASK |
    535                                    PG_PRESENT_MASK);
    536                            prot &= pml5e & pml4e & pdpe & pde;
    537                        } else {
    538                            prot = 0;
    539                        }
    540                        mem_print(mon, env, &start, &last_prot, end, prot);
    541                    }
    542                }
    543            }
    544        }
    545    }
    546    /* Flush last range */
    547    mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 57, 0);
    548}
    549#endif /* TARGET_X86_64 */
    550
    551void hmp_info_mem(Monitor *mon, const QDict *qdict)
    552{
    553    CPUArchState *env;
    554
    555    env = mon_get_cpu_env(mon);
    556    if (!env) {
    557        monitor_printf(mon, "No CPU available\n");
    558        return;
    559    }
    560
    561    if (!(env->cr[0] & CR0_PG_MASK)) {
    562        monitor_printf(mon, "PG disabled\n");
    563        return;
    564    }
    565    if (env->cr[4] & CR4_PAE_MASK) {
    566#ifdef TARGET_X86_64
    567        if (env->hflags & HF_LMA_MASK) {
    568            if (env->cr[4] & CR4_LA57_MASK) {
    569                mem_info_la57(mon, env);
    570            } else {
    571                mem_info_la48(mon, env);
    572            }
    573        } else
    574#endif
    575        {
    576            mem_info_pae32(mon, env);
    577        }
    578    } else {
    579        mem_info_32(mon, env);
    580    }
    581}
    582
    583void hmp_mce(Monitor *mon, const QDict *qdict)
    584{
    585    X86CPU *cpu;
    586    CPUState *cs;
    587    int cpu_index = qdict_get_int(qdict, "cpu_index");
    588    int bank = qdict_get_int(qdict, "bank");
    589    uint64_t status = qdict_get_int(qdict, "status");
    590    uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
    591    uint64_t addr = qdict_get_int(qdict, "addr");
    592    uint64_t misc = qdict_get_int(qdict, "misc");
    593    int flags = MCE_INJECT_UNCOND_AO;
    594
    595    if (qdict_get_try_bool(qdict, "broadcast", false)) {
    596        flags |= MCE_INJECT_BROADCAST;
    597    }
    598    cs = qemu_get_cpu(cpu_index);
    599    if (cs != NULL) {
    600        cpu = X86_CPU(cs);
    601        cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
    602                           flags);
    603    }
    604}
    605
    606static target_long monitor_get_pc(Monitor *mon, const struct MonitorDef *md,
    607                                  int val)
    608{
    609    CPUArchState *env = mon_get_cpu_env(mon);
    610    return env->eip + env->segs[R_CS].base;
    611}
    612
    613const MonitorDef monitor_defs[] = {
    614#define SEG(name, seg) \
    615    { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
    616    { name ".base", offsetof(CPUX86State, segs[seg].base) },\
    617    { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
    618
    619    { "eax", offsetof(CPUX86State, regs[0]) },
    620    { "ecx", offsetof(CPUX86State, regs[1]) },
    621    { "edx", offsetof(CPUX86State, regs[2]) },
    622    { "ebx", offsetof(CPUX86State, regs[3]) },
    623    { "esp|sp", offsetof(CPUX86State, regs[4]) },
    624    { "ebp|fp", offsetof(CPUX86State, regs[5]) },
    625    { "esi", offsetof(CPUX86State, regs[6]) },
    626    { "edi", offsetof(CPUX86State, regs[7]) },
    627#ifdef TARGET_X86_64
    628    { "r8", offsetof(CPUX86State, regs[8]) },
    629    { "r9", offsetof(CPUX86State, regs[9]) },
    630    { "r10", offsetof(CPUX86State, regs[10]) },
    631    { "r11", offsetof(CPUX86State, regs[11]) },
    632    { "r12", offsetof(CPUX86State, regs[12]) },
    633    { "r13", offsetof(CPUX86State, regs[13]) },
    634    { "r14", offsetof(CPUX86State, regs[14]) },
    635    { "r15", offsetof(CPUX86State, regs[15]) },
    636#endif
    637    { "eflags", offsetof(CPUX86State, eflags) },
    638    { "eip", offsetof(CPUX86State, eip) },
    639    SEG("cs", R_CS)
    640    SEG("ds", R_DS)
    641    SEG("es", R_ES)
    642    SEG("ss", R_SS)
    643    SEG("fs", R_FS)
    644    SEG("gs", R_GS)
    645    { "pc", 0, monitor_get_pc, },
    646    { NULL },
    647};
    648
    649const MonitorDef *target_monitor_defs(void)
    650{
    651    return monitor_defs;
    652}
    653
    654void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
    655{
    656    CPUState *cs;
    657
    658    if (qdict_haskey(qdict, "apic-id")) {
    659        int id = qdict_get_try_int(qdict, "apic-id", 0);
    660        cs = cpu_by_arch_id(id);
    661    } else {
    662        cs = mon_get_cpu(mon);
    663    }
    664
    665
    666    if (!cs) {
    667        monitor_printf(mon, "No CPU available\n");
    668        return;
    669    }
    670    x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU);
    671}
    672
    673void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
    674{
    675    monitor_printf(mon, "This command is obsolete and will be "
    676                   "removed soon. Please use 'info pic' instead.\n");
    677}
    678
    679SevInfo *qmp_query_sev(Error **errp)
    680{
    681    SevInfo *info;
    682
    683    info = sev_get_info();
    684    if (!info) {
    685        error_setg(errp, "SEV feature is not available");
    686        return NULL;
    687    }
    688
    689    return info;
    690}
    691
    692void hmp_info_sev(Monitor *mon, const QDict *qdict)
    693{
    694    SevInfo *info = sev_get_info();
    695
    696    if (!info || !info->enabled) {
    697        monitor_printf(mon, "SEV is not enabled\n");
    698        goto out;
    699    }
    700
    701    if (sev_snp_enabled()) {
    702        monitor_printf(mon, "state: %s\n", SevState_str(info->state));
    703        monitor_printf(mon, "build: %d\n", info->build_id);
    704        monitor_printf(mon, "api version: %d.%d\n",
    705                       info->api_major, info->api_minor);
    706        monitor_printf(mon, "debug: %s\n",
    707                       info->u.sev_snp.snp_policy & SEV_SNP_POLICY_DBG ? "on"
    708                                                                       : "off");
    709        monitor_printf(mon, "SMT allowed: %s\n",
    710                       info->u.sev_snp.snp_policy & SEV_SNP_POLICY_SMT ? "on"
    711                                                                       : "off");
    712        monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type));
    713    } else {
    714        monitor_printf(mon, "handle: %d\n", info->u.sev.handle);
    715        monitor_printf(mon, "state: %s\n", SevState_str(info->state));
    716        monitor_printf(mon, "build: %d\n", info->build_id);
    717        monitor_printf(mon, "api version: %d.%d\n",
    718                       info->api_major, info->api_minor);
    719        monitor_printf(mon, "debug: %s\n",
    720                       info->u.sev.policy & SEV_POLICY_NODBG ? "off" : "on");
    721        monitor_printf(mon, "key-sharing: %s\n",
    722                       info->u.sev.policy & SEV_POLICY_NOKS ? "off" : "on");
    723        monitor_printf(mon, "SEV type: %s\n", SevGuestType_str(info->sev_type));
    724    }
    725
    726out:
    727    qapi_free_SevInfo(info);
    728}
    729
    730SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp)
    731{
    732    char *data;
    733    SevLaunchMeasureInfo *info;
    734
    735    data = sev_get_launch_measurement();
    736    if (!data) {
    737        error_setg(errp, "Measurement is not available");
    738        return NULL;
    739    }
    740
    741    info = g_malloc0(sizeof(*info));
    742    info->data = data;
    743
    744    return info;
    745}
    746
    747SevCapability *qmp_query_sev_capabilities(Error **errp)
    748{
    749    return sev_get_capabilities(errp);
    750}
    751
    752#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294"
    753struct sev_secret_area {
    754    uint32_t base;
    755    uint32_t size;
    756};
    757
    758void qmp_sev_inject_launch_secret(const char *packet_hdr,
    759                                  const char *secret,
    760                                  bool has_gpa, uint64_t gpa,
    761                                  Error **errp)
    762{
    763    if (!has_gpa) {
    764        uint8_t *data;
    765        struct sev_secret_area *area;
    766
    767        if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) {
    768            error_setg(errp, "SEV: no secret area found in OVMF,"
    769                       " gpa must be specified.");
    770            return;
    771        }
    772        area = (struct sev_secret_area *)data;
    773        gpa = area->base;
    774    }
    775
    776    sev_inject_launch_secret(packet_hdr, secret, gpa, errp);
    777}
    778
    779SevAttestationReport *
    780qmp_query_sev_attestation_report(const char *mnonce, Error **errp)
    781{
    782    return sev_get_attestation_report(mnonce, errp);
    783}
    784
    785SGXInfo *qmp_query_sgx(Error **errp)
    786{
    787    return sgx_get_info(errp);
    788}
    789
    790void hmp_info_sgx(Monitor *mon, const QDict *qdict)
    791{
    792    Error *err = NULL;
    793    g_autoptr(SGXInfo) info = qmp_query_sgx(&err);
    794
    795    if (err) {
    796        error_report_err(err);
    797        return;
    798    }
    799    monitor_printf(mon, "SGX support: %s\n",
    800                   info->sgx ? "enabled" : "disabled");
    801    monitor_printf(mon, "SGX1 support: %s\n",
    802                   info->sgx1 ? "enabled" : "disabled");
    803    monitor_printf(mon, "SGX2 support: %s\n",
    804                   info->sgx2 ? "enabled" : "disabled");
    805    monitor_printf(mon, "FLC support: %s\n",
    806                   info->flc ? "enabled" : "disabled");
    807    monitor_printf(mon, "size: %" PRIu64 "\n",
    808                   info->section_size);
    809}
    810
    811SGXInfo *qmp_query_sgx_capabilities(Error **errp)
    812{
    813    return sgx_get_capabilities(errp);
    814}