cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

misc.c (11938B)


      1/*
      2 *  Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved.
      3 *
      4 *  This program is free software; you can redistribute it and/or modify
      5 *  it under the terms of the GNU General Public License as published by
      6 *  the Free Software Foundation; either version 2 of the License, or
      7 *  (at your option) any later version.
      8 *
      9 *  This program is distributed in the hope that it will be useful,
     10 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
     11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12 *  GNU General Public License for more details.
     13 *
     14 *  You should have received a copy of the GNU General Public License
     15 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
     16 */
     17
     18#include <stdio.h>
     19#include <string.h>
     20
     21typedef unsigned char uint8_t;
     22typedef unsigned short uint16_t;
     23typedef unsigned int uint32_t;
     24
     25
     26static inline void S4_storerhnew_rr(void *p, int index, uint16_t v)
     27{
     28  asm volatile("{\n\t"
     29               "    r0 = %0\n\n"
     30               "    memh(%1+%2<<#2) = r0.new\n\t"
     31               "}\n"
     32               :: "r"(v), "r"(p), "r"(index)
     33               : "r0", "memory");
     34}
     35
     36static uint32_t data;
     37static inline void *S4_storerbnew_ap(uint8_t v)
     38{
     39  void *ret;
     40  asm volatile("{\n\t"
     41               "    r0 = %1\n\n"
     42               "    memb(%0 = ##data) = r0.new\n\t"
     43               "}\n"
     44               : "=r"(ret)
     45               : "r"(v)
     46               : "r0", "memory");
     47  return ret;
     48}
     49
     50static inline void *S4_storerhnew_ap(uint16_t v)
     51{
     52  void *ret;
     53  asm volatile("{\n\t"
     54               "    r0 = %1\n\n"
     55               "    memh(%0 = ##data) = r0.new\n\t"
     56               "}\n"
     57               : "=r"(ret)
     58               : "r"(v)
     59               : "r0", "memory");
     60  return ret;
     61}
     62
     63static inline void *S4_storerinew_ap(uint32_t v)
     64{
     65  void *ret;
     66  asm volatile("{\n\t"
     67               "    r0 = %1\n\n"
     68               "    memw(%0 = ##data) = r0.new\n\t"
     69               "}\n"
     70               : "=r"(ret)
     71               : "r"(v)
     72               : "r0", "memory");
     73  return ret;
     74}
     75
     76static inline void S4_storeirbt_io(void *p, int pred)
     77{
     78  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
     79               "if (p0) memb(%1+#4)=#27\n\t"
     80               :: "r"(pred), "r"(p)
     81               : "p0", "memory");
     82}
     83
     84static inline void S4_storeirbf_io(void *p, int pred)
     85{
     86  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
     87               "if (!p0) memb(%1+#4)=#27\n\t"
     88               :: "r"(pred), "r"(p)
     89               : "p0", "memory");
     90}
     91
     92static inline void S4_storeirbtnew_io(void *p, int pred)
     93{
     94  asm volatile("{\n\t"
     95               "    p0 = cmp.eq(%0, #1)\n\t"
     96               "    if (p0.new) memb(%1+#4)=#27\n\t"
     97               "}\n\t"
     98               :: "r"(pred), "r"(p)
     99               : "p0", "memory");
    100}
    101
    102static inline void S4_storeirbfnew_io(void *p, int pred)
    103{
    104  asm volatile("{\n\t"
    105               "    p0 = cmp.eq(%0, #1)\n\t"
    106               "    if (!p0.new) memb(%1+#4)=#27\n\t"
    107               "}\n\t"
    108               :: "r"(pred), "r"(p)
    109               : "p0", "memory");
    110}
    111
    112static inline void S4_storeirht_io(void *p, int pred)
    113{
    114  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
    115               "if (p0) memh(%1+#4)=#27\n\t"
    116               :: "r"(pred), "r"(p)
    117               : "p0", "memory");
    118}
    119
    120static inline void S4_storeirhf_io(void *p, int pred)
    121{
    122  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
    123               "if (!p0) memh(%1+#4)=#27\n\t"
    124               :: "r"(pred), "r"(p)
    125               : "p0", "memory");
    126}
    127
    128static inline void S4_storeirhtnew_io(void *p, int pred)
    129{
    130  asm volatile("{\n\t"
    131               "    p0 = cmp.eq(%0, #1)\n\t"
    132               "    if (p0.new) memh(%1+#4)=#27\n\t"
    133               "}\n\t"
    134               :: "r"(pred), "r"(p)
    135               : "p0", "memory");
    136}
    137
    138static inline void S4_storeirhfnew_io(void *p, int pred)
    139{
    140  asm volatile("{\n\t"
    141               "    p0 = cmp.eq(%0, #1)\n\t"
    142               "    if (!p0.new) memh(%1+#4)=#27\n\t"
    143               "}\n\t"
    144               :: "r"(pred), "r"(p)
    145               : "p0", "memory");
    146}
    147
    148static inline void S4_storeirit_io(void *p, int pred)
    149{
    150  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
    151               "if (p0) memw(%1+#4)=#27\n\t"
    152               :: "r"(pred), "r"(p)
    153               : "p0", "memory");
    154}
    155
    156static inline void S4_storeirif_io(void *p, int pred)
    157{
    158  asm volatile("p0 = cmp.eq(%0, #1)\n\t"
    159               "if (!p0) memw(%1+#4)=#27\n\t"
    160               :: "r"(pred), "r"(p)
    161               : "p0", "memory");
    162}
    163
    164static inline void S4_storeiritnew_io(void *p, int pred)
    165{
    166  asm volatile("{\n\t"
    167               "    p0 = cmp.eq(%0, #1)\n\t"
    168               "    if (p0.new) memw(%1+#4)=#27\n\t"
    169               "}\n\t"
    170               :: "r"(pred), "r"(p)
    171               : "p0", "memory");
    172}
    173
    174static inline void S4_storeirifnew_io(void *p, int pred)
    175{
    176  asm volatile("{\n\t"
    177               "    p0 = cmp.eq(%0, #1)\n\t"
    178               "    if (!p0.new) memw(%1+#4)=#27\n\t"
    179               "}\n\t"
    180               :: "r"(pred), "r"(p)
    181               : "p0", "memory");
    182}
    183
    184static int L2_ploadrifnew_pi(void *p, int pred)
    185{
    186  int result;
    187  asm volatile("%0 = #31\n\t"
    188               "{\n\t"
    189               "    p0 = cmp.eq(%1, #1)\n\t"
    190               "    if (!p0.new) %0 = memw(%2++#4)\n\t"
    191               "}\n\t"
    192               : "=r"(result) : "r"(pred), "r"(p)
    193               : "p0");
    194  return result;
    195}
    196
    197/*
    198 * Test that compound-compare-jump is executed in 2 parts
    199 * First we have to do all the compares in the packet and
    200 * account for auto-anding.  Then, we can do the predicated
    201 * jump.
    202 */
    203static inline int cmpnd_cmp_jump(void)
    204{
    205    int retval;
    206    asm ("r5 = #7\n\t"
    207         "r6 = #9\n\t"
    208         "{\n\t"
    209         "    p0 = cmp.eq(r5, #7)\n\t"
    210         "    if (p0.new) jump:nt 1f\n\t"
    211         "    p0 = cmp.eq(r6, #7)\n\t"
    212         "}\n\t"
    213         "%0 = #12\n\t"
    214         "jump 2f\n\t"
    215         "1:\n\t"
    216         "%0 = #13\n\t"
    217         "2:\n\t"
    218         : "=r"(retval) :: "r5", "r6", "p0");
    219    return retval;
    220}
    221
    222static inline int test_clrtnew(int arg1, int old_val)
    223{
    224  int ret;
    225  asm volatile("r5 = %2\n\t"
    226               "{\n\t"
    227                   "p0 = cmp.eq(%1, #1)\n\t"
    228                   "if (p0.new) r5=#0\n\t"
    229               "}\n\t"
    230               "%0 = r5\n\t"
    231               : "=r"(ret)
    232               : "r"(arg1), "r"(old_val)
    233               : "p0", "r5");
    234  return ret;
    235}
    236
    237int err;
    238
    239static void check(int val, int expect)
    240{
    241    if (val != expect) {
    242        printf("ERROR: 0x%04x != 0x%04x\n", val, expect);
    243        err++;
    244    }
    245}
    246
    247static void check64(long long val, long long expect)
    248{
    249    if (val != expect) {
    250        printf("ERROR: 0x%016llx != 0x%016llx\n", val, expect);
    251        err++;
    252    }
    253}
    254
    255uint32_t init[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
    256uint32_t array[10];
    257
    258uint32_t early_exit;
    259
    260/*
    261 * Write this as a function because we can't guarantee the compiler will
    262 * allocate a frame with just the SL2_return_tnew packet.
    263 */
    264static void SL2_return_tnew(int x);
    265asm ("SL2_return_tnew:\n\t"
    266     "   allocframe(#0)\n\t"
    267     "   r1 = #1\n\t"
    268     "   memw(##early_exit) = r1\n\t"
    269     "   {\n\t"
    270     "       p0 = cmp.eq(r0, #1)\n\t"
    271     "       if (p0.new) dealloc_return:nt\n\t"    /* SL2_return_tnew */
    272     "   }\n\t"
    273     "   r1 = #0\n\t"
    274     "   memw(##early_exit) = r1\n\t"
    275     "   dealloc_return\n\t"
    276    );
    277
    278static long long creg_pair(int x, int y)
    279{
    280    long long retval;
    281    asm ("m0 = %1\n\t"
    282         "m1 = %2\n\t"
    283         "%0 = c7:6\n\t"
    284         : "=r"(retval) : "r"(x), "r"(y) : "m0", "m1");
    285    return retval;
    286}
    287
    288static long long decbin(long long x, long long y, int *pred)
    289{
    290    long long retval;
    291    asm ("%0 = decbin(%2, %3)\n\t"
    292         "%1 = p0\n\t"
    293         : "=r"(retval), "=r"(*pred)
    294         : "r"(x), "r"(y));
    295    return retval;
    296}
    297
    298/* Check that predicates are auto-and'ed in a packet */
    299static int auto_and(void)
    300{
    301    int retval;
    302    asm ("r5 = #1\n\t"
    303         "{\n\t"
    304         "    p0 = cmp.eq(r1, #1)\n\t"
    305         "    p0 = cmp.eq(r1, #2)\n\t"
    306         "}\n\t"
    307         "%0 = p0\n\t"
    308         : "=r"(retval)
    309         :
    310         : "r5", "p0");
    311    return retval;
    312}
    313
    314void test_lsbnew(void)
    315{
    316    int result;
    317
    318    asm("r0 = #2\n\t"
    319        "r1 = #5\n\t"
    320        "{\n\t"
    321        "    p0 = r0\n\t"
    322        "    if (p0.new) r1 = #3\n\t"
    323        "}\n\t"
    324        "%0 = r1\n\t"
    325        : "=r"(result) :: "r0", "r1", "p0");
    326    check(result, 5);
    327}
    328
    329void test_l2fetch(void)
    330{
    331    /* These don't do anything in qemu, just make sure they don't assert */
    332    asm volatile ("l2fetch(r0, r1)\n\t"
    333                  "l2fetch(r0, r3:2)\n\t");
    334}
    335
    336int main()
    337{
    338    int res;
    339    long long res64;
    340    int pred;
    341
    342    memcpy(array, init, sizeof(array));
    343    S4_storerhnew_rr(array, 4, 0xffff);
    344    check(array[4], 0xffff);
    345
    346    data = ~0;
    347    check((uint32_t)S4_storerbnew_ap(0x12), (uint32_t)&data);
    348    check(data, 0xffffff12);
    349
    350    data = ~0;
    351    check((uint32_t)S4_storerhnew_ap(0x1234), (uint32_t)&data);
    352    check(data, 0xffff1234);
    353
    354    data = ~0;
    355    check((uint32_t)S4_storerinew_ap(0x12345678), (uint32_t)&data);
    356    check(data, 0x12345678);
    357
    358    /* Byte */
    359    memcpy(array, init, sizeof(array));
    360    S4_storeirbt_io(&array[1], 1);
    361    check(array[2], 27);
    362    S4_storeirbt_io(&array[2], 0);
    363    check(array[3], 3);
    364
    365    memcpy(array, init, sizeof(array));
    366    S4_storeirbf_io(&array[3], 0);
    367    check(array[4], 27);
    368    S4_storeirbf_io(&array[4], 1);
    369    check(array[5], 5);
    370
    371    memcpy(array, init, sizeof(array));
    372    S4_storeirbtnew_io(&array[5], 1);
    373    check(array[6], 27);
    374    S4_storeirbtnew_io(&array[6], 0);
    375    check(array[7], 7);
    376
    377    memcpy(array, init, sizeof(array));
    378    S4_storeirbfnew_io(&array[7], 0);
    379    check(array[8], 27);
    380    S4_storeirbfnew_io(&array[8], 1);
    381    check(array[9], 9);
    382
    383    /* Half word */
    384    memcpy(array, init, sizeof(array));
    385    S4_storeirht_io(&array[1], 1);
    386    check(array[2], 27);
    387    S4_storeirht_io(&array[2], 0);
    388    check(array[3], 3);
    389
    390    memcpy(array, init, sizeof(array));
    391    S4_storeirhf_io(&array[3], 0);
    392    check(array[4], 27);
    393    S4_storeirhf_io(&array[4], 1);
    394    check(array[5], 5);
    395
    396    memcpy(array, init, sizeof(array));
    397    S4_storeirhtnew_io(&array[5], 1);
    398    check(array[6], 27);
    399    S4_storeirhtnew_io(&array[6], 0);
    400    check(array[7], 7);
    401
    402    memcpy(array, init, sizeof(array));
    403    S4_storeirhfnew_io(&array[7], 0);
    404    check(array[8], 27);
    405    S4_storeirhfnew_io(&array[8], 1);
    406    check(array[9], 9);
    407
    408    /* Word */
    409    memcpy(array, init, sizeof(array));
    410    S4_storeirit_io(&array[1], 1);
    411    check(array[2], 27);
    412    S4_storeirit_io(&array[2], 0);
    413    check(array[3], 3);
    414
    415    memcpy(array, init, sizeof(array));
    416    S4_storeirif_io(&array[3], 0);
    417    check(array[4], 27);
    418    S4_storeirif_io(&array[4], 1);
    419    check(array[5], 5);
    420
    421    memcpy(array, init, sizeof(array));
    422    S4_storeiritnew_io(&array[5], 1);
    423    check(array[6], 27);
    424    S4_storeiritnew_io(&array[6], 0);
    425    check(array[7], 7);
    426
    427    memcpy(array, init, sizeof(array));
    428    S4_storeirifnew_io(&array[7], 0);
    429    check(array[8], 27);
    430    S4_storeirifnew_io(&array[8], 1);
    431    check(array[9], 9);
    432
    433    memcpy(array, init, sizeof(array));
    434    res = L2_ploadrifnew_pi(&array[6], 0);
    435    check(res, 6);
    436    res = L2_ploadrifnew_pi(&array[7], 1);
    437    check(res, 31);
    438
    439    int x = cmpnd_cmp_jump();
    440    check(x, 12);
    441
    442    SL2_return_tnew(0);
    443    check(early_exit, 0);
    444    SL2_return_tnew(1);
    445    check(early_exit, 1);
    446
    447    long long pair = creg_pair(5, 7);
    448    check((int)pair, 5);
    449    check((int)(pair >> 32), 7);
    450
    451    res = test_clrtnew(1, 7);
    452    check(res, 0);
    453    res = test_clrtnew(2, 7);
    454    check(res, 7);
    455
    456    res64 = decbin(0xf0f1f2f3f4f5f6f7LL, 0x7f6f5f4f3f2f1f0fLL, &pred);
    457    check64(res64, 0x357980003700010cLL);
    458    check(pred, 0);
    459
    460    res64 = decbin(0xfLL, 0x1bLL, &pred);
    461    check64(res64, 0x78000100LL);
    462    check(pred, 1);
    463
    464    res = auto_and();
    465    check(res, 0);
    466
    467    test_lsbnew();
    468
    469    test_l2fetch();
    470
    471    puts(err ? "FAIL" : "PASS");
    472    return err;
    473}