cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

test-block-iothread.c (23719B)


      1/*
      2 * Block tests for iothreads
      3 *
      4 * Copyright (c) 2018 Kevin Wolf <kwolf@redhat.com>
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a copy
      7 * of this software and associated documentation files (the "Software"), to deal
      8 * in the Software without restriction, including without limitation the rights
      9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     10 * copies of the Software, and to permit persons to whom the Software is
     11 * furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
     19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     22 * THE SOFTWARE.
     23 */
     24
     25#include "qemu/osdep.h"
     26#include "block/block.h"
     27#include "block/blockjob_int.h"
     28#include "sysemu/block-backend.h"
     29#include "qapi/error.h"
     30#include "qapi/qmp/qdict.h"
     31#include "qemu/main-loop.h"
     32#include "iothread.h"
     33
     34static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
     35                                            int64_t offset, int64_t bytes,
     36                                            QEMUIOVector *qiov,
     37                                            BdrvRequestFlags flags)
     38{
     39    return 0;
     40}
     41
     42static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
     43                                             int64_t offset, int64_t bytes,
     44                                             QEMUIOVector *qiov,
     45                                             BdrvRequestFlags flags)
     46{
     47    return 0;
     48}
     49
     50static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
     51                                              int64_t offset, int64_t bytes)
     52{
     53    return 0;
     54}
     55
     56static int coroutine_fn
     57bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, bool exact,
     58                      PreallocMode prealloc, BdrvRequestFlags flags,
     59                      Error **errp)
     60{
     61    return 0;
     62}
     63
     64static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs,
     65                                                  bool want_zero,
     66                                                  int64_t offset, int64_t count,
     67                                                  int64_t *pnum, int64_t *map,
     68                                                  BlockDriverState **file)
     69{
     70    *pnum = count;
     71    return 0;
     72}
     73
     74static BlockDriver bdrv_test = {
     75    .format_name            = "test",
     76    .instance_size          = 1,
     77
     78    .bdrv_co_preadv         = bdrv_test_co_preadv,
     79    .bdrv_co_pwritev        = bdrv_test_co_pwritev,
     80    .bdrv_co_pdiscard       = bdrv_test_co_pdiscard,
     81    .bdrv_co_truncate       = bdrv_test_co_truncate,
     82    .bdrv_co_block_status   = bdrv_test_co_block_status,
     83};
     84
     85static void test_sync_op_pread(BdrvChild *c)
     86{
     87    uint8_t buf[512];
     88    int ret;
     89
     90    /* Success */
     91    ret = bdrv_pread(c, 0, buf, sizeof(buf));
     92    g_assert_cmpint(ret, ==, 512);
     93
     94    /* Early error: Negative offset */
     95    ret = bdrv_pread(c, -2, buf, sizeof(buf));
     96    g_assert_cmpint(ret, ==, -EIO);
     97}
     98
     99static void test_sync_op_pwrite(BdrvChild *c)
    100{
    101    uint8_t buf[512] = { 0 };
    102    int ret;
    103
    104    /* Success */
    105    ret = bdrv_pwrite(c, 0, buf, sizeof(buf));
    106    g_assert_cmpint(ret, ==, 512);
    107
    108    /* Early error: Negative offset */
    109    ret = bdrv_pwrite(c, -2, buf, sizeof(buf));
    110    g_assert_cmpint(ret, ==, -EIO);
    111}
    112
    113static void test_sync_op_blk_pread(BlockBackend *blk)
    114{
    115    uint8_t buf[512];
    116    int ret;
    117
    118    /* Success */
    119    ret = blk_pread(blk, 0, buf, sizeof(buf));
    120    g_assert_cmpint(ret, ==, 512);
    121
    122    /* Early error: Negative offset */
    123    ret = blk_pread(blk, -2, buf, sizeof(buf));
    124    g_assert_cmpint(ret, ==, -EIO);
    125}
    126
    127static void test_sync_op_blk_pwrite(BlockBackend *blk)
    128{
    129    uint8_t buf[512] = { 0 };
    130    int ret;
    131
    132    /* Success */
    133    ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0);
    134    g_assert_cmpint(ret, ==, 512);
    135
    136    /* Early error: Negative offset */
    137    ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0);
    138    g_assert_cmpint(ret, ==, -EIO);
    139}
    140
    141static void test_sync_op_load_vmstate(BdrvChild *c)
    142{
    143    uint8_t buf[512];
    144    int ret;
    145
    146    /* Error: Driver does not support snapshots */
    147    ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf));
    148    g_assert_cmpint(ret, ==, -ENOTSUP);
    149}
    150
    151static void test_sync_op_save_vmstate(BdrvChild *c)
    152{
    153    uint8_t buf[512] = { 0 };
    154    int ret;
    155
    156    /* Error: Driver does not support snapshots */
    157    ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf));
    158    g_assert_cmpint(ret, ==, -ENOTSUP);
    159}
    160
    161static void test_sync_op_pdiscard(BdrvChild *c)
    162{
    163    int ret;
    164
    165    /* Normal success path */
    166    c->bs->open_flags |= BDRV_O_UNMAP;
    167    ret = bdrv_pdiscard(c, 0, 512);
    168    g_assert_cmpint(ret, ==, 0);
    169
    170    /* Early success: UNMAP not supported */
    171    c->bs->open_flags &= ~BDRV_O_UNMAP;
    172    ret = bdrv_pdiscard(c, 0, 512);
    173    g_assert_cmpint(ret, ==, 0);
    174
    175    /* Early error: Negative offset */
    176    ret = bdrv_pdiscard(c, -2, 512);
    177    g_assert_cmpint(ret, ==, -EIO);
    178}
    179
    180static void test_sync_op_blk_pdiscard(BlockBackend *blk)
    181{
    182    int ret;
    183
    184    /* Early success: UNMAP not supported */
    185    ret = blk_pdiscard(blk, 0, 512);
    186    g_assert_cmpint(ret, ==, 0);
    187
    188    /* Early error: Negative offset */
    189    ret = blk_pdiscard(blk, -2, 512);
    190    g_assert_cmpint(ret, ==, -EIO);
    191}
    192
    193static void test_sync_op_truncate(BdrvChild *c)
    194{
    195    int ret;
    196
    197    /* Normal success path */
    198    ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
    199    g_assert_cmpint(ret, ==, 0);
    200
    201    /* Early error: Negative offset */
    202    ret = bdrv_truncate(c, -2, false, PREALLOC_MODE_OFF, 0, NULL);
    203    g_assert_cmpint(ret, ==, -EINVAL);
    204
    205    /* Error: Read-only image */
    206    c->bs->open_flags &= ~BDRV_O_RDWR;
    207
    208    ret = bdrv_truncate(c, 65536, false, PREALLOC_MODE_OFF, 0, NULL);
    209    g_assert_cmpint(ret, ==, -EACCES);
    210
    211    c->bs->open_flags |= BDRV_O_RDWR;
    212}
    213
    214static void test_sync_op_block_status(BdrvChild *c)
    215{
    216    int ret;
    217    int64_t n;
    218
    219    /* Normal success path */
    220    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
    221    g_assert_cmpint(ret, ==, 0);
    222
    223    /* Early success: No driver support */
    224    bdrv_test.bdrv_co_block_status = NULL;
    225    ret = bdrv_is_allocated(c->bs, 0, 65536, &n);
    226    g_assert_cmpint(ret, ==, 1);
    227
    228    /* Early success: bytes = 0 */
    229    ret = bdrv_is_allocated(c->bs, 0, 0, &n);
    230    g_assert_cmpint(ret, ==, 0);
    231
    232    /* Early success: Offset > image size*/
    233    ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n);
    234    g_assert_cmpint(ret, ==, 0);
    235}
    236
    237static void test_sync_op_flush(BdrvChild *c)
    238{
    239    int ret;
    240
    241    /* Normal success path */
    242    ret = bdrv_flush(c->bs);
    243    g_assert_cmpint(ret, ==, 0);
    244
    245    /* Early success: Read-only image */
    246    c->bs->open_flags &= ~BDRV_O_RDWR;
    247
    248    ret = bdrv_flush(c->bs);
    249    g_assert_cmpint(ret, ==, 0);
    250
    251    c->bs->open_flags |= BDRV_O_RDWR;
    252}
    253
    254static void test_sync_op_blk_flush(BlockBackend *blk)
    255{
    256    BlockDriverState *bs = blk_bs(blk);
    257    int ret;
    258
    259    /* Normal success path */
    260    ret = blk_flush(blk);
    261    g_assert_cmpint(ret, ==, 0);
    262
    263    /* Early success: Read-only image */
    264    bs->open_flags &= ~BDRV_O_RDWR;
    265
    266    ret = blk_flush(blk);
    267    g_assert_cmpint(ret, ==, 0);
    268
    269    bs->open_flags |= BDRV_O_RDWR;
    270}
    271
    272static void test_sync_op_check(BdrvChild *c)
    273{
    274    BdrvCheckResult result;
    275    int ret;
    276
    277    /* Error: Driver does not implement check */
    278    ret = bdrv_check(c->bs, &result, 0);
    279    g_assert_cmpint(ret, ==, -ENOTSUP);
    280}
    281
    282static void test_sync_op_invalidate_cache(BdrvChild *c)
    283{
    284    /* Early success: Image is not inactive */
    285    bdrv_invalidate_cache(c->bs, NULL);
    286}
    287
    288
    289typedef struct SyncOpTest {
    290    const char *name;
    291    void (*fn)(BdrvChild *c);
    292    void (*blkfn)(BlockBackend *blk);
    293} SyncOpTest;
    294
    295const SyncOpTest sync_op_tests[] = {
    296    {
    297        .name   = "/sync-op/pread",
    298        .fn     = test_sync_op_pread,
    299        .blkfn  = test_sync_op_blk_pread,
    300    }, {
    301        .name   = "/sync-op/pwrite",
    302        .fn     = test_sync_op_pwrite,
    303        .blkfn  = test_sync_op_blk_pwrite,
    304    }, {
    305        .name   = "/sync-op/load_vmstate",
    306        .fn     = test_sync_op_load_vmstate,
    307    }, {
    308        .name   = "/sync-op/save_vmstate",
    309        .fn     = test_sync_op_save_vmstate,
    310    }, {
    311        .name   = "/sync-op/pdiscard",
    312        .fn     = test_sync_op_pdiscard,
    313        .blkfn  = test_sync_op_blk_pdiscard,
    314    }, {
    315        .name   = "/sync-op/truncate",
    316        .fn     = test_sync_op_truncate,
    317    }, {
    318        .name   = "/sync-op/block_status",
    319        .fn     = test_sync_op_block_status,
    320    }, {
    321        .name   = "/sync-op/flush",
    322        .fn     = test_sync_op_flush,
    323        .blkfn  = test_sync_op_blk_flush,
    324    }, {
    325        .name   = "/sync-op/check",
    326        .fn     = test_sync_op_check,
    327    }, {
    328        .name   = "/sync-op/invalidate_cache",
    329        .fn     = test_sync_op_invalidate_cache,
    330    },
    331};
    332
    333/* Test synchronous operations that run in a different iothread, so we have to
    334 * poll for the coroutine there to return. */
    335static void test_sync_op(const void *opaque)
    336{
    337    const SyncOpTest *t = opaque;
    338    IOThread *iothread = iothread_new();
    339    AioContext *ctx = iothread_get_aio_context(iothread);
    340    BlockBackend *blk;
    341    BlockDriverState *bs;
    342    BdrvChild *c;
    343
    344    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
    345    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    346    bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
    347    blk_insert_bs(blk, bs, &error_abort);
    348    c = QLIST_FIRST(&bs->parents);
    349
    350    blk_set_aio_context(blk, ctx, &error_abort);
    351    aio_context_acquire(ctx);
    352    t->fn(c);
    353    if (t->blkfn) {
    354        t->blkfn(blk);
    355    }
    356    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    357    aio_context_release(ctx);
    358
    359    bdrv_unref(bs);
    360    blk_unref(blk);
    361}
    362
    363typedef struct TestBlockJob {
    364    BlockJob common;
    365    bool should_complete;
    366    int n;
    367} TestBlockJob;
    368
    369static int test_job_prepare(Job *job)
    370{
    371    g_assert(qemu_get_current_aio_context() == qemu_get_aio_context());
    372    return 0;
    373}
    374
    375static int coroutine_fn test_job_run(Job *job, Error **errp)
    376{
    377    TestBlockJob *s = container_of(job, TestBlockJob, common.job);
    378
    379    job_transition_to_ready(&s->common.job);
    380    while (!s->should_complete) {
    381        s->n++;
    382        g_assert(qemu_get_current_aio_context() == job->aio_context);
    383
    384        /* Avoid job_sleep_ns() because it marks the job as !busy. We want to
    385         * emulate some actual activity (probably some I/O) here so that the
    386         * drain involved in AioContext switches has to wait for this activity
    387         * to stop. */
    388        qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 1000000);
    389
    390        job_pause_point(&s->common.job);
    391    }
    392
    393    g_assert(qemu_get_current_aio_context() == job->aio_context);
    394    return 0;
    395}
    396
    397static void test_job_complete(Job *job, Error **errp)
    398{
    399    TestBlockJob *s = container_of(job, TestBlockJob, common.job);
    400    s->should_complete = true;
    401}
    402
    403BlockJobDriver test_job_driver = {
    404    .job_driver = {
    405        .instance_size  = sizeof(TestBlockJob),
    406        .free           = block_job_free,
    407        .user_resume    = block_job_user_resume,
    408        .run            = test_job_run,
    409        .complete       = test_job_complete,
    410        .prepare        = test_job_prepare,
    411    },
    412};
    413
    414static void test_attach_blockjob(void)
    415{
    416    IOThread *iothread = iothread_new();
    417    AioContext *ctx = iothread_get_aio_context(iothread);
    418    BlockBackend *blk;
    419    BlockDriverState *bs;
    420    TestBlockJob *tjob;
    421
    422    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
    423    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    424    blk_insert_bs(blk, bs, &error_abort);
    425
    426    tjob = block_job_create("job0", &test_job_driver, NULL, bs,
    427                            0, BLK_PERM_ALL,
    428                            0, 0, NULL, NULL, &error_abort);
    429    job_start(&tjob->common.job);
    430
    431    while (tjob->n == 0) {
    432        aio_poll(qemu_get_aio_context(), false);
    433    }
    434
    435    blk_set_aio_context(blk, ctx, &error_abort);
    436
    437    tjob->n = 0;
    438    while (tjob->n == 0) {
    439        aio_poll(qemu_get_aio_context(), false);
    440    }
    441
    442    aio_context_acquire(ctx);
    443    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    444    aio_context_release(ctx);
    445
    446    tjob->n = 0;
    447    while (tjob->n == 0) {
    448        aio_poll(qemu_get_aio_context(), false);
    449    }
    450
    451    blk_set_aio_context(blk, ctx, &error_abort);
    452
    453    tjob->n = 0;
    454    while (tjob->n == 0) {
    455        aio_poll(qemu_get_aio_context(), false);
    456    }
    457
    458    aio_context_acquire(ctx);
    459    job_complete_sync(&tjob->common.job, &error_abort);
    460    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    461    aio_context_release(ctx);
    462
    463    bdrv_unref(bs);
    464    blk_unref(blk);
    465}
    466
    467/*
    468 * Test that changing the AioContext for one node in a tree (here through blk)
    469 * changes all other nodes as well:
    470 *
    471 *  blk
    472 *   |
    473 *   |  bs_verify [blkverify]
    474 *   |   /               \
    475 *   |  /                 \
    476 *  bs_a [bdrv_test]    bs_b [bdrv_test]
    477 *
    478 */
    479static void test_propagate_basic(void)
    480{
    481    IOThread *iothread = iothread_new();
    482    AioContext *ctx = iothread_get_aio_context(iothread);
    483    AioContext *main_ctx;
    484    BlockBackend *blk;
    485    BlockDriverState *bs_a, *bs_b, *bs_verify;
    486    QDict *options;
    487
    488    /*
    489     * Create bs_a and its BlockBackend.  We cannot take the RESIZE
    490     * permission because blkverify will not share it on the test
    491     * image.
    492     */
    493    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
    494                  BLK_PERM_ALL);
    495    bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
    496    blk_insert_bs(blk, bs_a, &error_abort);
    497
    498    /* Create bs_b */
    499    bs_b = bdrv_new_open_driver(&bdrv_test, "bs_b", BDRV_O_RDWR, &error_abort);
    500
    501    /* Create blkverify filter that references both bs_a and bs_b */
    502    options = qdict_new();
    503    qdict_put_str(options, "driver", "blkverify");
    504    qdict_put_str(options, "test", "bs_a");
    505    qdict_put_str(options, "raw", "bs_b");
    506
    507    bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    508
    509    /* Switch the AioContext */
    510    blk_set_aio_context(blk, ctx, &error_abort);
    511    g_assert(blk_get_aio_context(blk) == ctx);
    512    g_assert(bdrv_get_aio_context(bs_a) == ctx);
    513    g_assert(bdrv_get_aio_context(bs_verify) == ctx);
    514    g_assert(bdrv_get_aio_context(bs_b) == ctx);
    515
    516    /* Switch the AioContext back */
    517    main_ctx = qemu_get_aio_context();
    518    aio_context_acquire(ctx);
    519    blk_set_aio_context(blk, main_ctx, &error_abort);
    520    aio_context_release(ctx);
    521    g_assert(blk_get_aio_context(blk) == main_ctx);
    522    g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
    523    g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
    524    g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
    525
    526    bdrv_unref(bs_verify);
    527    bdrv_unref(bs_b);
    528    bdrv_unref(bs_a);
    529    blk_unref(blk);
    530}
    531
    532/*
    533 * Test that diamonds in the graph don't lead to endless recursion:
    534 *
    535 *              blk
    536 *               |
    537 *      bs_verify [blkverify]
    538 *       /              \
    539 *      /                \
    540 *   bs_b [raw]         bs_c[raw]
    541 *      \                /
    542 *       \              /
    543 *       bs_a [bdrv_test]
    544 */
    545static void test_propagate_diamond(void)
    546{
    547    IOThread *iothread = iothread_new();
    548    AioContext *ctx = iothread_get_aio_context(iothread);
    549    AioContext *main_ctx;
    550    BlockBackend *blk;
    551    BlockDriverState *bs_a, *bs_b, *bs_c, *bs_verify;
    552    QDict *options;
    553
    554    /* Create bs_a */
    555    bs_a = bdrv_new_open_driver(&bdrv_test, "bs_a", BDRV_O_RDWR, &error_abort);
    556
    557    /* Create bs_b and bc_c */
    558    options = qdict_new();
    559    qdict_put_str(options, "driver", "raw");
    560    qdict_put_str(options, "file", "bs_a");
    561    qdict_put_str(options, "node-name", "bs_b");
    562    bs_b = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    563
    564    options = qdict_new();
    565    qdict_put_str(options, "driver", "raw");
    566    qdict_put_str(options, "file", "bs_a");
    567    qdict_put_str(options, "node-name", "bs_c");
    568    bs_c = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    569
    570    /* Create blkverify filter that references both bs_b and bs_c */
    571    options = qdict_new();
    572    qdict_put_str(options, "driver", "blkverify");
    573    qdict_put_str(options, "test", "bs_b");
    574    qdict_put_str(options, "raw", "bs_c");
    575
    576    bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    577    /*
    578     * Do not take the RESIZE permission: This would require the same
    579     * from bs_c and thus from bs_a; however, blkverify will not share
    580     * it on bs_b, and thus it will not be available for bs_a.
    581     */
    582    blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL & ~BLK_PERM_RESIZE,
    583                  BLK_PERM_ALL);
    584    blk_insert_bs(blk, bs_verify, &error_abort);
    585
    586    /* Switch the AioContext */
    587    blk_set_aio_context(blk, ctx, &error_abort);
    588    g_assert(blk_get_aio_context(blk) == ctx);
    589    g_assert(bdrv_get_aio_context(bs_verify) == ctx);
    590    g_assert(bdrv_get_aio_context(bs_a) == ctx);
    591    g_assert(bdrv_get_aio_context(bs_b) == ctx);
    592    g_assert(bdrv_get_aio_context(bs_c) == ctx);
    593
    594    /* Switch the AioContext back */
    595    main_ctx = qemu_get_aio_context();
    596    aio_context_acquire(ctx);
    597    blk_set_aio_context(blk, main_ctx, &error_abort);
    598    aio_context_release(ctx);
    599    g_assert(blk_get_aio_context(blk) == main_ctx);
    600    g_assert(bdrv_get_aio_context(bs_verify) == main_ctx);
    601    g_assert(bdrv_get_aio_context(bs_a) == main_ctx);
    602    g_assert(bdrv_get_aio_context(bs_b) == main_ctx);
    603    g_assert(bdrv_get_aio_context(bs_c) == main_ctx);
    604
    605    blk_unref(blk);
    606    bdrv_unref(bs_verify);
    607    bdrv_unref(bs_c);
    608    bdrv_unref(bs_b);
    609    bdrv_unref(bs_a);
    610}
    611
    612static void test_propagate_mirror(void)
    613{
    614    IOThread *iothread = iothread_new();
    615    AioContext *ctx = iothread_get_aio_context(iothread);
    616    AioContext *main_ctx = qemu_get_aio_context();
    617    BlockDriverState *src, *target, *filter;
    618    BlockBackend *blk;
    619    Job *job;
    620    Error *local_err = NULL;
    621
    622    /* Create src and target*/
    623    src = bdrv_new_open_driver(&bdrv_test, "src", BDRV_O_RDWR, &error_abort);
    624    target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR,
    625                                  &error_abort);
    626
    627    /* Start a mirror job */
    628    mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0,
    629                 MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false,
    630                 BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
    631                 false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
    632                 &error_abort);
    633    job = job_get("job0");
    634    filter = bdrv_find_node("filter_node");
    635
    636    /* Change the AioContext of src */
    637    bdrv_try_set_aio_context(src, ctx, &error_abort);
    638    g_assert(bdrv_get_aio_context(src) == ctx);
    639    g_assert(bdrv_get_aio_context(target) == ctx);
    640    g_assert(bdrv_get_aio_context(filter) == ctx);
    641    g_assert(job->aio_context == ctx);
    642
    643    /* Change the AioContext of target */
    644    aio_context_acquire(ctx);
    645    bdrv_try_set_aio_context(target, main_ctx, &error_abort);
    646    aio_context_release(ctx);
    647    g_assert(bdrv_get_aio_context(src) == main_ctx);
    648    g_assert(bdrv_get_aio_context(target) == main_ctx);
    649    g_assert(bdrv_get_aio_context(filter) == main_ctx);
    650
    651    /* With a BlockBackend on src, changing target must fail */
    652    blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
    653    blk_insert_bs(blk, src, &error_abort);
    654
    655    bdrv_try_set_aio_context(target, ctx, &local_err);
    656    error_free_or_abort(&local_err);
    657
    658    g_assert(blk_get_aio_context(blk) == main_ctx);
    659    g_assert(bdrv_get_aio_context(src) == main_ctx);
    660    g_assert(bdrv_get_aio_context(target) == main_ctx);
    661    g_assert(bdrv_get_aio_context(filter) == main_ctx);
    662
    663    /* ...unless we explicitly allow it */
    664    aio_context_acquire(ctx);
    665    blk_set_allow_aio_context_change(blk, true);
    666    bdrv_try_set_aio_context(target, ctx, &error_abort);
    667    aio_context_release(ctx);
    668
    669    g_assert(blk_get_aio_context(blk) == ctx);
    670    g_assert(bdrv_get_aio_context(src) == ctx);
    671    g_assert(bdrv_get_aio_context(target) == ctx);
    672    g_assert(bdrv_get_aio_context(filter) == ctx);
    673
    674    job_cancel_sync_all();
    675
    676    aio_context_acquire(ctx);
    677    blk_set_aio_context(blk, main_ctx, &error_abort);
    678    bdrv_try_set_aio_context(target, main_ctx, &error_abort);
    679    aio_context_release(ctx);
    680
    681    blk_unref(blk);
    682    bdrv_unref(src);
    683    bdrv_unref(target);
    684}
    685
    686static void test_attach_second_node(void)
    687{
    688    IOThread *iothread = iothread_new();
    689    AioContext *ctx = iothread_get_aio_context(iothread);
    690    AioContext *main_ctx = qemu_get_aio_context();
    691    BlockBackend *blk;
    692    BlockDriverState *bs, *filter;
    693    QDict *options;
    694
    695    blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
    696    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    697    blk_insert_bs(blk, bs, &error_abort);
    698
    699    options = qdict_new();
    700    qdict_put_str(options, "driver", "raw");
    701    qdict_put_str(options, "file", "base");
    702
    703    filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
    704    g_assert(blk_get_aio_context(blk) == ctx);
    705    g_assert(bdrv_get_aio_context(bs) == ctx);
    706    g_assert(bdrv_get_aio_context(filter) == ctx);
    707
    708    aio_context_acquire(ctx);
    709    blk_set_aio_context(blk, main_ctx, &error_abort);
    710    aio_context_release(ctx);
    711    g_assert(blk_get_aio_context(blk) == main_ctx);
    712    g_assert(bdrv_get_aio_context(bs) == main_ctx);
    713    g_assert(bdrv_get_aio_context(filter) == main_ctx);
    714
    715    bdrv_unref(filter);
    716    bdrv_unref(bs);
    717    blk_unref(blk);
    718}
    719
    720static void test_attach_preserve_blk_ctx(void)
    721{
    722    IOThread *iothread = iothread_new();
    723    AioContext *ctx = iothread_get_aio_context(iothread);
    724    BlockBackend *blk;
    725    BlockDriverState *bs;
    726
    727    blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL);
    728    bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
    729    bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
    730
    731    /* Add node to BlockBackend that has an iothread context assigned */
    732    blk_insert_bs(blk, bs, &error_abort);
    733    g_assert(blk_get_aio_context(blk) == ctx);
    734    g_assert(bdrv_get_aio_context(bs) == ctx);
    735
    736    /* Remove the node again */
    737    aio_context_acquire(ctx);
    738    blk_remove_bs(blk);
    739    aio_context_release(ctx);
    740    g_assert(blk_get_aio_context(blk) == ctx);
    741    g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context());
    742
    743    /* Re-attach the node */
    744    blk_insert_bs(blk, bs, &error_abort);
    745    g_assert(blk_get_aio_context(blk) == ctx);
    746    g_assert(bdrv_get_aio_context(bs) == ctx);
    747
    748    aio_context_acquire(ctx);
    749    blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
    750    aio_context_release(ctx);
    751    bdrv_unref(bs);
    752    blk_unref(blk);
    753}
    754
    755int main(int argc, char **argv)
    756{
    757    int i;
    758
    759    bdrv_init();
    760    qemu_init_main_loop(&error_abort);
    761
    762    g_test_init(&argc, &argv, NULL);
    763
    764    for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) {
    765        const SyncOpTest *t = &sync_op_tests[i];
    766        g_test_add_data_func(t->name, t, test_sync_op);
    767    }
    768
    769    g_test_add_func("/attach/blockjob", test_attach_blockjob);
    770    g_test_add_func("/attach/second_node", test_attach_second_node);
    771    g_test_add_func("/attach/preserve_blk_ctx", test_attach_preserve_blk_ctx);
    772    g_test_add_func("/propagate/basic", test_propagate_basic);
    773    g_test_add_func("/propagate/diamond", test_propagate_diamond);
    774    g_test_add_func("/propagate/mirror", test_propagate_mirror);
    775
    776    return g_test_run();
    777}