cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

block-copy.c (32098B)


      1/*
      2 * block_copy API
      3 *
      4 * Copyright (C) 2013 Proxmox Server Solutions
      5 * Copyright (c) 2019 Virtuozzo International GmbH.
      6 *
      7 * Authors:
      8 *  Dietmar Maurer (dietmar@proxmox.com)
      9 *  Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
     10 *
     11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
     12 * See the COPYING file in the top-level directory.
     13 */
     14
     15#include "qemu/osdep.h"
     16
     17#include "trace.h"
     18#include "qapi/error.h"
     19#include "block/block-copy.h"
     20#include "sysemu/block-backend.h"
     21#include "qemu/units.h"
     22#include "qemu/coroutine.h"
     23#include "block/aio_task.h"
     24#include "qemu/error-report.h"
     25
     26#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
     27#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
     28#define BLOCK_COPY_MAX_MEM (128 * MiB)
     29#define BLOCK_COPY_MAX_WORKERS 64
     30#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
     31#define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
     32
     33typedef enum {
     34    COPY_READ_WRITE_CLUSTER,
     35    COPY_READ_WRITE,
     36    COPY_WRITE_ZEROES,
     37    COPY_RANGE_SMALL,
     38    COPY_RANGE_FULL
     39} BlockCopyMethod;
     40
     41static coroutine_fn int block_copy_task_entry(AioTask *task);
     42
     43typedef struct BlockCopyCallState {
     44    /* Fields initialized in block_copy_async() and never changed. */
     45    BlockCopyState *s;
     46    int64_t offset;
     47    int64_t bytes;
     48    int max_workers;
     49    int64_t max_chunk;
     50    bool ignore_ratelimit;
     51    BlockCopyAsyncCallbackFunc cb;
     52    void *cb_opaque;
     53    /* Coroutine where async block-copy is running */
     54    Coroutine *co;
     55
     56    /* Fields whose state changes throughout the execution */
     57    bool finished; /* atomic */
     58    QemuCoSleep sleep; /* TODO: protect API with a lock */
     59    bool cancelled; /* atomic */
     60    /* To reference all call states from BlockCopyState */
     61    QLIST_ENTRY(BlockCopyCallState) list;
     62
     63    /*
     64     * Fields that report information about return values and erros.
     65     * Protected by lock in BlockCopyState.
     66     */
     67    bool error_is_read;
     68    /*
     69     * @ret is set concurrently by tasks under mutex. Only set once by first
     70     * failed task (and untouched if no task failed).
     71     * After finishing (call_state->finished is true), it is not modified
     72     * anymore and may be safely read without mutex.
     73     */
     74    int ret;
     75} BlockCopyCallState;
     76
     77typedef struct BlockCopyTask {
     78    AioTask task;
     79
     80    /*
     81     * Fields initialized in block_copy_task_create()
     82     * and never changed.
     83     */
     84    BlockCopyState *s;
     85    BlockCopyCallState *call_state;
     86    int64_t offset;
     87    /*
     88     * @method can also be set again in the while loop of
     89     * block_copy_dirty_clusters(), but it is never accessed concurrently
     90     * because the only other function that reads it is
     91     * block_copy_task_entry() and it is invoked afterwards in the same
     92     * iteration.
     93     */
     94    BlockCopyMethod method;
     95
     96    /*
     97     * Fields whose state changes throughout the execution
     98     * Protected by lock in BlockCopyState.
     99     */
    100    CoQueue wait_queue; /* coroutines blocked on this task */
    101    /*
    102     * Only protect the case of parallel read while updating @bytes
    103     * value in block_copy_task_shrink().
    104     */
    105    int64_t bytes;
    106    QLIST_ENTRY(BlockCopyTask) list;
    107} BlockCopyTask;
    108
    109static int64_t task_end(BlockCopyTask *task)
    110{
    111    return task->offset + task->bytes;
    112}
    113
    114typedef struct BlockCopyState {
    115    /*
    116     * BdrvChild objects are not owned or managed by block-copy. They are
    117     * provided by block-copy user and user is responsible for appropriate
    118     * permissions on these children.
    119     */
    120    BdrvChild *source;
    121    BdrvChild *target;
    122
    123    /*
    124     * Fields initialized in block_copy_state_new()
    125     * and never changed.
    126     */
    127    int64_t cluster_size;
    128    int64_t max_transfer;
    129    uint64_t len;
    130    BdrvRequestFlags write_flags;
    131
    132    /*
    133     * Fields whose state changes throughout the execution
    134     * Protected by lock.
    135     */
    136    CoMutex lock;
    137    int64_t in_flight_bytes;
    138    BlockCopyMethod method;
    139    QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls */
    140    QLIST_HEAD(, BlockCopyCallState) calls;
    141    /*
    142     * skip_unallocated:
    143     *
    144     * Used by sync=top jobs, which first scan the source node for unallocated
    145     * areas and clear them in the copy_bitmap.  During this process, the bitmap
    146     * is thus not fully initialized: It may still have bits set for areas that
    147     * are unallocated and should actually not be copied.
    148     *
    149     * This is indicated by skip_unallocated.
    150     *
    151     * In this case, block_copy() will query the source’s allocation status,
    152     * skip unallocated regions, clear them in the copy_bitmap, and invoke
    153     * block_copy_reset_unallocated() every time it does.
    154     */
    155    bool skip_unallocated; /* atomic */
    156    /* State fields that use a thread-safe API */
    157    BdrvDirtyBitmap *copy_bitmap;
    158    ProgressMeter *progress;
    159    SharedResource *mem;
    160    RateLimit rate_limit;
    161} BlockCopyState;
    162
    163/* Called with lock held */
    164static BlockCopyTask *find_conflicting_task(BlockCopyState *s,
    165                                            int64_t offset, int64_t bytes)
    166{
    167    BlockCopyTask *t;
    168
    169    QLIST_FOREACH(t, &s->tasks, list) {
    170        if (offset + bytes > t->offset && offset < t->offset + t->bytes) {
    171            return t;
    172        }
    173    }
    174
    175    return NULL;
    176}
    177
    178/*
    179 * If there are no intersecting tasks return false. Otherwise, wait for the
    180 * first found intersecting tasks to finish and return true.
    181 *
    182 * Called with lock held. May temporary release the lock.
    183 * Return value of 0 proves that lock was NOT released.
    184 */
    185static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t offset,
    186                                             int64_t bytes)
    187{
    188    BlockCopyTask *task = find_conflicting_task(s, offset, bytes);
    189
    190    if (!task) {
    191        return false;
    192    }
    193
    194    qemu_co_queue_wait(&task->wait_queue, &s->lock);
    195
    196    return true;
    197}
    198
    199/* Called with lock held */
    200static int64_t block_copy_chunk_size(BlockCopyState *s)
    201{
    202    switch (s->method) {
    203    case COPY_READ_WRITE_CLUSTER:
    204        return s->cluster_size;
    205    case COPY_READ_WRITE:
    206    case COPY_RANGE_SMALL:
    207        return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
    208                   s->max_transfer);
    209    case COPY_RANGE_FULL:
    210        return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
    211                   s->max_transfer);
    212    default:
    213        /* Cannot have COPY_WRITE_ZEROES here.  */
    214        abort();
    215    }
    216}
    217
    218/*
    219 * Search for the first dirty area in offset/bytes range and create task at
    220 * the beginning of it.
    221 */
    222static coroutine_fn BlockCopyTask *
    223block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
    224                       int64_t offset, int64_t bytes)
    225{
    226    BlockCopyTask *task;
    227    int64_t max_chunk;
    228
    229    QEMU_LOCK_GUARD(&s->lock);
    230    max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
    231    if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
    232                                           offset, offset + bytes,
    233                                           max_chunk, &offset, &bytes))
    234    {
    235        return NULL;
    236    }
    237
    238    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    239    bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
    240
    241    /* region is dirty, so no existent tasks possible in it */
    242    assert(!find_conflicting_task(s, offset, bytes));
    243
    244    bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
    245    s->in_flight_bytes += bytes;
    246
    247    task = g_new(BlockCopyTask, 1);
    248    *task = (BlockCopyTask) {
    249        .task.func = block_copy_task_entry,
    250        .s = s,
    251        .call_state = call_state,
    252        .offset = offset,
    253        .bytes = bytes,
    254        .method = s->method,
    255    };
    256    qemu_co_queue_init(&task->wait_queue);
    257    QLIST_INSERT_HEAD(&s->tasks, task, list);
    258
    259    return task;
    260}
    261
    262/*
    263 * block_copy_task_shrink
    264 *
    265 * Drop the tail of the task to be handled later. Set dirty bits back and
    266 * wake up all tasks waiting for us (may be some of them are not intersecting
    267 * with shrunk task)
    268 */
    269static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
    270                                                int64_t new_bytes)
    271{
    272    QEMU_LOCK_GUARD(&task->s->lock);
    273    if (new_bytes == task->bytes) {
    274        return;
    275    }
    276
    277    assert(new_bytes > 0 && new_bytes < task->bytes);
    278
    279    task->s->in_flight_bytes -= task->bytes - new_bytes;
    280    bdrv_set_dirty_bitmap(task->s->copy_bitmap,
    281                          task->offset + new_bytes, task->bytes - new_bytes);
    282
    283    task->bytes = new_bytes;
    284    qemu_co_queue_restart_all(&task->wait_queue);
    285}
    286
    287static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
    288{
    289    QEMU_LOCK_GUARD(&task->s->lock);
    290    task->s->in_flight_bytes -= task->bytes;
    291    if (ret < 0) {
    292        bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->offset, task->bytes);
    293    }
    294    QLIST_REMOVE(task, list);
    295    if (task->s->progress) {
    296        progress_set_remaining(task->s->progress,
    297                               bdrv_get_dirty_count(task->s->copy_bitmap) +
    298                               task->s->in_flight_bytes);
    299    }
    300    qemu_co_queue_restart_all(&task->wait_queue);
    301}
    302
    303void block_copy_state_free(BlockCopyState *s)
    304{
    305    if (!s) {
    306        return;
    307    }
    308
    309    ratelimit_destroy(&s->rate_limit);
    310    bdrv_release_dirty_bitmap(s->copy_bitmap);
    311    shres_destroy(s->mem);
    312    g_free(s);
    313}
    314
    315static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
    316{
    317    return MIN_NON_ZERO(INT_MAX,
    318                        MIN_NON_ZERO(source->bs->bl.max_transfer,
    319                                     target->bs->bl.max_transfer));
    320}
    321
    322void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
    323                              bool compress)
    324{
    325    /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
    326    s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
    327        (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
    328
    329    if (s->max_transfer < s->cluster_size) {
    330        /*
    331         * copy_range does not respect max_transfer. We don't want to bother
    332         * with requests smaller than block-copy cluster size, so fallback to
    333         * buffered copying (read and write respect max_transfer on their
    334         * behalf).
    335         */
    336        s->method = COPY_READ_WRITE_CLUSTER;
    337    } else if (compress) {
    338        /* Compression supports only cluster-size writes and no copy-range. */
    339        s->method = COPY_READ_WRITE_CLUSTER;
    340    } else {
    341        /*
    342         * If copy range enabled, start with COPY_RANGE_SMALL, until first
    343         * successful copy_range (look at block_copy_do_copy).
    344         */
    345        s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
    346    }
    347}
    348
    349static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
    350                                                 Error **errp)
    351{
    352    int ret;
    353    BlockDriverInfo bdi;
    354    bool target_does_cow = bdrv_backing_chain_next(target);
    355
    356    /*
    357     * If there is no backing file on the target, we cannot rely on COW if our
    358     * backup cluster size is smaller than the target cluster size. Even for
    359     * targets with a backing file, try to avoid COW if possible.
    360     */
    361    ret = bdrv_get_info(target, &bdi);
    362    if (ret == -ENOTSUP && !target_does_cow) {
    363        /* Cluster size is not defined */
    364        warn_report("The target block device doesn't provide "
    365                    "information about the block size and it doesn't have a "
    366                    "backing file. The default block size of %u bytes is "
    367                    "used. If the actual block size of the target exceeds "
    368                    "this default, the backup may be unusable",
    369                    BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
    370        return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
    371    } else if (ret < 0 && !target_does_cow) {
    372        error_setg_errno(errp, -ret,
    373            "Couldn't determine the cluster size of the target image, "
    374            "which has no backing file");
    375        error_append_hint(errp,
    376            "Aborting, since this may create an unusable destination image\n");
    377        return ret;
    378    } else if (ret < 0 && target_does_cow) {
    379        /* Not fatal; just trudge on ahead. */
    380        return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
    381    }
    382
    383    return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
    384}
    385
    386BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
    387                                     Error **errp)
    388{
    389    BlockCopyState *s;
    390    int64_t cluster_size;
    391    BdrvDirtyBitmap *copy_bitmap;
    392    bool is_fleecing;
    393
    394    cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
    395    if (cluster_size < 0) {
    396        return NULL;
    397    }
    398
    399    copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
    400                                           errp);
    401    if (!copy_bitmap) {
    402        return NULL;
    403    }
    404    bdrv_disable_dirty_bitmap(copy_bitmap);
    405
    406    /*
    407     * If source is in backing chain of target assume that target is going to be
    408     * used for "image fleecing", i.e. it should represent a kind of snapshot of
    409     * source at backup-start point in time. And target is going to be read by
    410     * somebody (for example, used as NBD export) during backup job.
    411     *
    412     * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
    413     * intersection of backup writes and third party reads from target,
    414     * otherwise reading from target we may occasionally read already updated by
    415     * guest data.
    416     *
    417     * For more information see commit f8d59dfb40bb and test
    418     * tests/qemu-iotests/222
    419     */
    420    is_fleecing = bdrv_chain_contains(target->bs, source->bs);
    421
    422    s = g_new(BlockCopyState, 1);
    423    *s = (BlockCopyState) {
    424        .source = source,
    425        .target = target,
    426        .copy_bitmap = copy_bitmap,
    427        .cluster_size = cluster_size,
    428        .len = bdrv_dirty_bitmap_size(copy_bitmap),
    429        .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
    430        .mem = shres_create(BLOCK_COPY_MAX_MEM),
    431        .max_transfer = QEMU_ALIGN_DOWN(
    432                                    block_copy_max_transfer(source, target),
    433                                    cluster_size),
    434    };
    435
    436    block_copy_set_copy_opts(s, false, false);
    437
    438    ratelimit_init(&s->rate_limit);
    439    qemu_co_mutex_init(&s->lock);
    440    QLIST_INIT(&s->tasks);
    441    QLIST_INIT(&s->calls);
    442
    443    return s;
    444}
    445
    446/* Only set before running the job, no need for locking. */
    447void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
    448{
    449    s->progress = pm;
    450}
    451
    452/*
    453 * Takes ownership of @task
    454 *
    455 * If pool is NULL directly run the task, otherwise schedule it into the pool.
    456 *
    457 * Returns: task.func return code if pool is NULL
    458 *          otherwise -ECANCELED if pool status is bad
    459 *          otherwise 0 (successfully scheduled)
    460 */
    461static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
    462                                            BlockCopyTask *task)
    463{
    464    if (!pool) {
    465        int ret = task->task.func(&task->task);
    466
    467        g_free(task);
    468        return ret;
    469    }
    470
    471    aio_task_pool_wait_slot(pool);
    472    if (aio_task_pool_status(pool) < 0) {
    473        co_put_to_shres(task->s->mem, task->bytes);
    474        block_copy_task_end(task, -ECANCELED);
    475        g_free(task);
    476        return -ECANCELED;
    477    }
    478
    479    aio_task_pool_start_task(pool, &task->task);
    480
    481    return 0;
    482}
    483
    484/*
    485 * block_copy_do_copy
    486 *
    487 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
    488 * s->len only to cover last cluster when s->len is not aligned to clusters.
    489 *
    490 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
    491 *
    492 * @method is an in-out argument, so that copy_range can be either extended to
    493 * a full-size buffer or disabled if the copy_range attempt fails.  The output
    494 * value of @method should be used for subsequent tasks.
    495 * Returns 0 on success.
    496 */
    497static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
    498                                           int64_t offset, int64_t bytes,
    499                                           BlockCopyMethod *method,
    500                                           bool *error_is_read)
    501{
    502    int ret;
    503    int64_t nbytes = MIN(offset + bytes, s->len) - offset;
    504    void *bounce_buffer = NULL;
    505
    506    assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
    507    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    508    assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
    509    assert(offset < s->len);
    510    assert(offset + bytes <= s->len ||
    511           offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
    512    assert(nbytes < INT_MAX);
    513
    514    switch (*method) {
    515    case COPY_WRITE_ZEROES:
    516        ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
    517                                    ~BDRV_REQ_WRITE_COMPRESSED);
    518        if (ret < 0) {
    519            trace_block_copy_write_zeroes_fail(s, offset, ret);
    520            *error_is_read = false;
    521        }
    522        return ret;
    523
    524    case COPY_RANGE_SMALL:
    525    case COPY_RANGE_FULL:
    526        ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
    527                                 0, s->write_flags);
    528        if (ret >= 0) {
    529            /* Successful copy-range, increase chunk size.  */
    530            *method = COPY_RANGE_FULL;
    531            return 0;
    532        }
    533
    534        trace_block_copy_copy_range_fail(s, offset, ret);
    535        *method = COPY_READ_WRITE;
    536        /* Fall through to read+write with allocated buffer */
    537
    538    case COPY_READ_WRITE_CLUSTER:
    539    case COPY_READ_WRITE:
    540        /*
    541         * In case of failed copy_range request above, we may proceed with
    542         * buffered request larger than BLOCK_COPY_MAX_BUFFER.
    543         * Still, further requests will be properly limited, so don't care too
    544         * much. Moreover the most likely case (copy_range is unsupported for
    545         * the configuration, so the very first copy_range request fails)
    546         * is handled by setting large copy_size only after first successful
    547         * copy_range.
    548         */
    549
    550        bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
    551
    552        ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
    553        if (ret < 0) {
    554            trace_block_copy_read_fail(s, offset, ret);
    555            *error_is_read = true;
    556            goto out;
    557        }
    558
    559        ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
    560                             s->write_flags);
    561        if (ret < 0) {
    562            trace_block_copy_write_fail(s, offset, ret);
    563            *error_is_read = false;
    564            goto out;
    565        }
    566
    567    out:
    568        qemu_vfree(bounce_buffer);
    569        break;
    570
    571    default:
    572        abort();
    573    }
    574
    575    return ret;
    576}
    577
    578static coroutine_fn int block_copy_task_entry(AioTask *task)
    579{
    580    BlockCopyTask *t = container_of(task, BlockCopyTask, task);
    581    BlockCopyState *s = t->s;
    582    bool error_is_read = false;
    583    BlockCopyMethod method = t->method;
    584    int ret;
    585
    586    ret = block_copy_do_copy(s, t->offset, t->bytes, &method, &error_is_read);
    587
    588    WITH_QEMU_LOCK_GUARD(&s->lock) {
    589        if (s->method == t->method) {
    590            s->method = method;
    591        }
    592
    593        if (ret < 0) {
    594            if (!t->call_state->ret) {
    595                t->call_state->ret = ret;
    596                t->call_state->error_is_read = error_is_read;
    597            }
    598        } else if (s->progress) {
    599            progress_work_done(s->progress, t->bytes);
    600        }
    601    }
    602    co_put_to_shres(s->mem, t->bytes);
    603    block_copy_task_end(t, ret);
    604
    605    return ret;
    606}
    607
    608static int block_copy_block_status(BlockCopyState *s, int64_t offset,
    609                                   int64_t bytes, int64_t *pnum)
    610{
    611    int64_t num;
    612    BlockDriverState *base;
    613    int ret;
    614
    615    if (qatomic_read(&s->skip_unallocated)) {
    616        base = bdrv_backing_chain_next(s->source->bs);
    617    } else {
    618        base = NULL;
    619    }
    620
    621    ret = bdrv_block_status_above(s->source->bs, base, offset, bytes, &num,
    622                                  NULL, NULL);
    623    if (ret < 0 || num < s->cluster_size) {
    624        /*
    625         * On error or if failed to obtain large enough chunk just fallback to
    626         * copy one cluster.
    627         */
    628        num = s->cluster_size;
    629        ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
    630    } else if (offset + num == s->len) {
    631        num = QEMU_ALIGN_UP(num, s->cluster_size);
    632    } else {
    633        num = QEMU_ALIGN_DOWN(num, s->cluster_size);
    634    }
    635
    636    *pnum = num;
    637    return ret;
    638}
    639
    640/*
    641 * Check if the cluster starting at offset is allocated or not.
    642 * return via pnum the number of contiguous clusters sharing this allocation.
    643 */
    644static int block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
    645                                           int64_t *pnum)
    646{
    647    BlockDriverState *bs = s->source->bs;
    648    int64_t count, total_count = 0;
    649    int64_t bytes = s->len - offset;
    650    int ret;
    651
    652    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    653
    654    while (true) {
    655        ret = bdrv_is_allocated(bs, offset, bytes, &count);
    656        if (ret < 0) {
    657            return ret;
    658        }
    659
    660        total_count += count;
    661
    662        if (ret || count == 0) {
    663            /*
    664             * ret: partial segment(s) are considered allocated.
    665             * otherwise: unallocated tail is treated as an entire segment.
    666             */
    667            *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
    668            return ret;
    669        }
    670
    671        /* Unallocated segment(s) with uncertain following segment(s) */
    672        if (total_count >= s->cluster_size) {
    673            *pnum = total_count / s->cluster_size;
    674            return 0;
    675        }
    676
    677        offset += count;
    678        bytes -= count;
    679    }
    680}
    681
    682/*
    683 * Reset bits in copy_bitmap starting at offset if they represent unallocated
    684 * data in the image. May reset subsequent contiguous bits.
    685 * @return 0 when the cluster at @offset was unallocated,
    686 *         1 otherwise, and -ret on error.
    687 */
    688int64_t block_copy_reset_unallocated(BlockCopyState *s,
    689                                     int64_t offset, int64_t *count)
    690{
    691    int ret;
    692    int64_t clusters, bytes;
    693
    694    ret = block_copy_is_cluster_allocated(s, offset, &clusters);
    695    if (ret < 0) {
    696        return ret;
    697    }
    698
    699    bytes = clusters * s->cluster_size;
    700
    701    if (!ret) {
    702        qemu_co_mutex_lock(&s->lock);
    703        bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
    704        if (s->progress) {
    705            progress_set_remaining(s->progress,
    706                                   bdrv_get_dirty_count(s->copy_bitmap) +
    707                                   s->in_flight_bytes);
    708        }
    709        qemu_co_mutex_unlock(&s->lock);
    710    }
    711
    712    *count = bytes;
    713    return ret;
    714}
    715
    716/*
    717 * block_copy_dirty_clusters
    718 *
    719 * Copy dirty clusters in @offset/@bytes range.
    720 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
    721 * clusters found and -errno on failure.
    722 */
    723static int coroutine_fn
    724block_copy_dirty_clusters(BlockCopyCallState *call_state)
    725{
    726    BlockCopyState *s = call_state->s;
    727    int64_t offset = call_state->offset;
    728    int64_t bytes = call_state->bytes;
    729
    730    int ret = 0;
    731    bool found_dirty = false;
    732    int64_t end = offset + bytes;
    733    AioTaskPool *aio = NULL;
    734
    735    /*
    736     * block_copy() user is responsible for keeping source and target in same
    737     * aio context
    738     */
    739    assert(bdrv_get_aio_context(s->source->bs) ==
    740           bdrv_get_aio_context(s->target->bs));
    741
    742    assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
    743    assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
    744
    745    while (bytes && aio_task_pool_status(aio) == 0 &&
    746           !qatomic_read(&call_state->cancelled)) {
    747        BlockCopyTask *task;
    748        int64_t status_bytes;
    749
    750        task = block_copy_task_create(s, call_state, offset, bytes);
    751        if (!task) {
    752            /* No more dirty bits in the bitmap */
    753            trace_block_copy_skip_range(s, offset, bytes);
    754            break;
    755        }
    756        if (task->offset > offset) {
    757            trace_block_copy_skip_range(s, offset, task->offset - offset);
    758        }
    759
    760        found_dirty = true;
    761
    762        ret = block_copy_block_status(s, task->offset, task->bytes,
    763                                      &status_bytes);
    764        assert(ret >= 0); /* never fail */
    765        if (status_bytes < task->bytes) {
    766            block_copy_task_shrink(task, status_bytes);
    767        }
    768        if (qatomic_read(&s->skip_unallocated) &&
    769            !(ret & BDRV_BLOCK_ALLOCATED)) {
    770            block_copy_task_end(task, 0);
    771            trace_block_copy_skip_range(s, task->offset, task->bytes);
    772            offset = task_end(task);
    773            bytes = end - offset;
    774            g_free(task);
    775            continue;
    776        }
    777        if (ret & BDRV_BLOCK_ZERO) {
    778            task->method = COPY_WRITE_ZEROES;
    779        }
    780
    781        if (!call_state->ignore_ratelimit) {
    782            uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
    783            if (ns > 0) {
    784                block_copy_task_end(task, -EAGAIN);
    785                g_free(task);
    786                qemu_co_sleep_ns_wakeable(&call_state->sleep,
    787                                          QEMU_CLOCK_REALTIME, ns);
    788                continue;
    789            }
    790        }
    791
    792        ratelimit_calculate_delay(&s->rate_limit, task->bytes);
    793
    794        trace_block_copy_process(s, task->offset);
    795
    796        co_get_from_shres(s->mem, task->bytes);
    797
    798        offset = task_end(task);
    799        bytes = end - offset;
    800
    801        if (!aio && bytes) {
    802            aio = aio_task_pool_new(call_state->max_workers);
    803        }
    804
    805        ret = block_copy_task_run(aio, task);
    806        if (ret < 0) {
    807            goto out;
    808        }
    809    }
    810
    811out:
    812    if (aio) {
    813        aio_task_pool_wait_all(aio);
    814
    815        /*
    816         * We are not really interested in -ECANCELED returned from
    817         * block_copy_task_run. If it fails, it means some task already failed
    818         * for real reason, let's return first failure.
    819         * Still, assert that we don't rewrite failure by success.
    820         *
    821         * Note: ret may be positive here because of block-status result.
    822         */
    823        assert(ret >= 0 || aio_task_pool_status(aio) < 0);
    824        ret = aio_task_pool_status(aio);
    825
    826        aio_task_pool_free(aio);
    827    }
    828
    829    return ret < 0 ? ret : found_dirty;
    830}
    831
    832void block_copy_kick(BlockCopyCallState *call_state)
    833{
    834    qemu_co_sleep_wake(&call_state->sleep);
    835}
    836
    837/*
    838 * block_copy_common
    839 *
    840 * Copy requested region, accordingly to dirty bitmap.
    841 * Collaborate with parallel block_copy requests: if they succeed it will help
    842 * us. If they fail, we will retry not-copied regions. So, if we return error,
    843 * it means that some I/O operation failed in context of _this_ block_copy call,
    844 * not some parallel operation.
    845 */
    846static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
    847{
    848    int ret;
    849    BlockCopyState *s = call_state->s;
    850
    851    qemu_co_mutex_lock(&s->lock);
    852    QLIST_INSERT_HEAD(&s->calls, call_state, list);
    853    qemu_co_mutex_unlock(&s->lock);
    854
    855    do {
    856        ret = block_copy_dirty_clusters(call_state);
    857
    858        if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
    859            WITH_QEMU_LOCK_GUARD(&s->lock) {
    860                /*
    861                 * Check that there is no task we still need to
    862                 * wait to complete
    863                 */
    864                ret = block_copy_wait_one(s, call_state->offset,
    865                                          call_state->bytes);
    866                if (ret == 0) {
    867                    /*
    868                     * No pending tasks, but check again the bitmap in this
    869                     * same critical section, since a task might have failed
    870                     * between this and the critical section in
    871                     * block_copy_dirty_clusters().
    872                     *
    873                     * block_copy_wait_one return value 0 also means that it
    874                     * didn't release the lock. So, we are still in the same
    875                     * critical section, not interrupted by any concurrent
    876                     * access to state.
    877                     */
    878                    ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
    879                                                       call_state->offset,
    880                                                       call_state->bytes) >= 0;
    881                }
    882            }
    883        }
    884
    885        /*
    886         * We retry in two cases:
    887         * 1. Some progress done
    888         *    Something was copied, which means that there were yield points
    889         *    and some new dirty bits may have appeared (due to failed parallel
    890         *    block-copy requests).
    891         * 2. We have waited for some intersecting block-copy request
    892         *    It may have failed and produced new dirty bits.
    893         */
    894    } while (ret > 0 && !qatomic_read(&call_state->cancelled));
    895
    896    qatomic_store_release(&call_state->finished, true);
    897
    898    if (call_state->cb) {
    899        call_state->cb(call_state->cb_opaque);
    900    }
    901
    902    qemu_co_mutex_lock(&s->lock);
    903    QLIST_REMOVE(call_state, list);
    904    qemu_co_mutex_unlock(&s->lock);
    905
    906    return ret;
    907}
    908
    909int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
    910                            bool ignore_ratelimit)
    911{
    912    BlockCopyCallState call_state = {
    913        .s = s,
    914        .offset = start,
    915        .bytes = bytes,
    916        .ignore_ratelimit = ignore_ratelimit,
    917        .max_workers = BLOCK_COPY_MAX_WORKERS,
    918    };
    919
    920    return block_copy_common(&call_state);
    921}
    922
    923static void coroutine_fn block_copy_async_co_entry(void *opaque)
    924{
    925    block_copy_common(opaque);
    926}
    927
    928BlockCopyCallState *block_copy_async(BlockCopyState *s,
    929                                     int64_t offset, int64_t bytes,
    930                                     int max_workers, int64_t max_chunk,
    931                                     BlockCopyAsyncCallbackFunc cb,
    932                                     void *cb_opaque)
    933{
    934    BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
    935
    936    *call_state = (BlockCopyCallState) {
    937        .s = s,
    938        .offset = offset,
    939        .bytes = bytes,
    940        .max_workers = max_workers,
    941        .max_chunk = max_chunk,
    942        .cb = cb,
    943        .cb_opaque = cb_opaque,
    944
    945        .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
    946    };
    947
    948    qemu_coroutine_enter(call_state->co);
    949
    950    return call_state;
    951}
    952
    953void block_copy_call_free(BlockCopyCallState *call_state)
    954{
    955    if (!call_state) {
    956        return;
    957    }
    958
    959    assert(qatomic_read(&call_state->finished));
    960    g_free(call_state);
    961}
    962
    963bool block_copy_call_finished(BlockCopyCallState *call_state)
    964{
    965    return qatomic_read(&call_state->finished);
    966}
    967
    968bool block_copy_call_succeeded(BlockCopyCallState *call_state)
    969{
    970    return qatomic_load_acquire(&call_state->finished) &&
    971           !qatomic_read(&call_state->cancelled) &&
    972           call_state->ret == 0;
    973}
    974
    975bool block_copy_call_failed(BlockCopyCallState *call_state)
    976{
    977    return qatomic_load_acquire(&call_state->finished) &&
    978           !qatomic_read(&call_state->cancelled) &&
    979           call_state->ret < 0;
    980}
    981
    982bool block_copy_call_cancelled(BlockCopyCallState *call_state)
    983{
    984    return qatomic_read(&call_state->cancelled);
    985}
    986
    987int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
    988{
    989    assert(qatomic_load_acquire(&call_state->finished));
    990    if (error_is_read) {
    991        *error_is_read = call_state->error_is_read;
    992    }
    993    return call_state->ret;
    994}
    995
    996/*
    997 * Note that cancelling and finishing are racy.
    998 * User can cancel a block-copy that is already finished.
    999 */
   1000void block_copy_call_cancel(BlockCopyCallState *call_state)
   1001{
   1002    qatomic_set(&call_state->cancelled, true);
   1003    block_copy_kick(call_state);
   1004}
   1005
   1006BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
   1007{
   1008    return s->copy_bitmap;
   1009}
   1010
   1011int64_t block_copy_cluster_size(BlockCopyState *s)
   1012{
   1013    return s->cluster_size;
   1014}
   1015
   1016void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
   1017{
   1018    qatomic_set(&s->skip_unallocated, skip);
   1019}
   1020
   1021void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
   1022{
   1023    ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
   1024
   1025    /*
   1026     * Note: it's good to kick all call states from here, but it should be done
   1027     * only from a coroutine, to not crash if s->calls list changed while
   1028     * entering one call. So for now, the only user of this function kicks its
   1029     * only one call_state by hand.
   1030     */
   1031}