cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

xen-block.c (24923B)


      1/*
      2 * Copyright (c) 2018  Citrix Systems Inc.
      3 * (c) Gerd Hoffmann <kraxel@redhat.com>
      4 *
      5 * This program is free software; you can redistribute it and/or modify
      6 * it under the terms of the GNU General Public License as published by
      7 * the Free Software Foundation; under version 2 of the License.
      8 *
      9 * This program is distributed in the hope that it will be useful,
     10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
     11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     12 * GNU General Public License for more details.
     13 *
     14 * You should have received a copy of the GNU General Public License along
     15 * with this program; if not, see <http://www.gnu.org/licenses/>.
     16 *
     17 * Contributions after 2012-01-13 are licensed under the terms of the
     18 * GNU GPL, version 2 or (at your option) any later version.
     19 */
     20
     21#include "qemu/osdep.h"
     22#include "qemu/error-report.h"
     23#include "qemu/main-loop.h"
     24#include "qapi/error.h"
     25#include "hw/xen/xen_common.h"
     26#include "hw/block/xen_blkif.h"
     27#include "sysemu/block-backend.h"
     28#include "sysemu/iothread.h"
     29#include "xen-block.h"
     30
     31typedef struct XenBlockRequest {
     32    blkif_request_t req;
     33    int16_t status;
     34    off_t start;
     35    QEMUIOVector v;
     36    void *buf;
     37    size_t size;
     38    int presync;
     39    int aio_inflight;
     40    int aio_errors;
     41    XenBlockDataPlane *dataplane;
     42    QLIST_ENTRY(XenBlockRequest) list;
     43    BlockAcctCookie acct;
     44} XenBlockRequest;
     45
     46struct XenBlockDataPlane {
     47    XenDevice *xendev;
     48    XenEventChannel *event_channel;
     49    unsigned int *ring_ref;
     50    unsigned int nr_ring_ref;
     51    void *sring;
     52    int protocol;
     53    blkif_back_rings_t rings;
     54    int more_work;
     55    QLIST_HEAD(inflight_head, XenBlockRequest) inflight;
     56    QLIST_HEAD(freelist_head, XenBlockRequest) freelist;
     57    int requests_total;
     58    int requests_inflight;
     59    unsigned int max_requests;
     60    BlockBackend *blk;
     61    unsigned int sector_size;
     62    QEMUBH *bh;
     63    IOThread *iothread;
     64    AioContext *ctx;
     65};
     66
     67static int xen_block_send_response(XenBlockRequest *request);
     68
     69static void reset_request(XenBlockRequest *request)
     70{
     71    memset(&request->req, 0, sizeof(request->req));
     72    request->status = 0;
     73    request->start = 0;
     74    request->size = 0;
     75    request->presync = 0;
     76
     77    request->aio_inflight = 0;
     78    request->aio_errors = 0;
     79
     80    request->dataplane = NULL;
     81    memset(&request->list, 0, sizeof(request->list));
     82    memset(&request->acct, 0, sizeof(request->acct));
     83
     84    qemu_iovec_reset(&request->v);
     85}
     86
     87static XenBlockRequest *xen_block_start_request(XenBlockDataPlane *dataplane)
     88{
     89    XenBlockRequest *request = NULL;
     90
     91    if (QLIST_EMPTY(&dataplane->freelist)) {
     92        if (dataplane->requests_total >= dataplane->max_requests) {
     93            goto out;
     94        }
     95        /* allocate new struct */
     96        request = g_malloc0(sizeof(*request));
     97        request->dataplane = dataplane;
     98        /*
     99         * We cannot need more pages per requests than this, and since we
    100         * re-use requests, allocate the memory once here. It will be freed
    101         * xen_block_dataplane_destroy() when the request list is freed.
    102         */
    103        request->buf = qemu_memalign(XC_PAGE_SIZE,
    104                                     BLKIF_MAX_SEGMENTS_PER_REQUEST *
    105                                     XC_PAGE_SIZE);
    106        dataplane->requests_total++;
    107        qemu_iovec_init(&request->v, 1);
    108    } else {
    109        /* get one from freelist */
    110        request = QLIST_FIRST(&dataplane->freelist);
    111        QLIST_REMOVE(request, list);
    112    }
    113    QLIST_INSERT_HEAD(&dataplane->inflight, request, list);
    114    dataplane->requests_inflight++;
    115
    116out:
    117    return request;
    118}
    119
    120static void xen_block_complete_request(XenBlockRequest *request)
    121{
    122    XenBlockDataPlane *dataplane = request->dataplane;
    123
    124    if (xen_block_send_response(request)) {
    125        Error *local_err = NULL;
    126
    127        xen_device_notify_event_channel(dataplane->xendev,
    128                                        dataplane->event_channel,
    129                                        &local_err);
    130        if (local_err) {
    131            error_report_err(local_err);
    132        }
    133    }
    134
    135    QLIST_REMOVE(request, list);
    136    dataplane->requests_inflight--;
    137    reset_request(request);
    138    request->dataplane = dataplane;
    139    QLIST_INSERT_HEAD(&dataplane->freelist, request, list);
    140}
    141
    142/*
    143 * translate request into iovec + start offset
    144 * do sanity checks along the way
    145 */
    146static int xen_block_parse_request(XenBlockRequest *request)
    147{
    148    XenBlockDataPlane *dataplane = request->dataplane;
    149    size_t len;
    150    int i;
    151
    152    switch (request->req.operation) {
    153    case BLKIF_OP_READ:
    154        break;
    155    case BLKIF_OP_FLUSH_DISKCACHE:
    156        request->presync = 1;
    157        if (!request->req.nr_segments) {
    158            return 0;
    159        }
    160        /* fall through */
    161    case BLKIF_OP_WRITE:
    162        break;
    163    case BLKIF_OP_DISCARD:
    164        return 0;
    165    default:
    166        error_report("error: unknown operation (%d)", request->req.operation);
    167        goto err;
    168    };
    169
    170    if (request->req.operation != BLKIF_OP_READ &&
    171        !blk_is_writable(dataplane->blk)) {
    172        error_report("error: write req for ro device");
    173        goto err;
    174    }
    175
    176    request->start = request->req.sector_number * dataplane->sector_size;
    177    for (i = 0; i < request->req.nr_segments; i++) {
    178        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
    179            error_report("error: nr_segments too big");
    180            goto err;
    181        }
    182        if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) {
    183            error_report("error: first > last sector");
    184            goto err;
    185        }
    186        if (request->req.seg[i].last_sect * dataplane->sector_size >=
    187            XC_PAGE_SIZE) {
    188            error_report("error: page crossing");
    189            goto err;
    190        }
    191
    192        len = (request->req.seg[i].last_sect -
    193               request->req.seg[i].first_sect + 1) * dataplane->sector_size;
    194        request->size += len;
    195    }
    196    if (request->start + request->size > blk_getlength(dataplane->blk)) {
    197        error_report("error: access beyond end of file");
    198        goto err;
    199    }
    200    return 0;
    201
    202err:
    203    request->status = BLKIF_RSP_ERROR;
    204    return -1;
    205}
    206
    207static int xen_block_copy_request(XenBlockRequest *request)
    208{
    209    XenBlockDataPlane *dataplane = request->dataplane;
    210    XenDevice *xendev = dataplane->xendev;
    211    XenDeviceGrantCopySegment segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
    212    int i, count;
    213    bool to_domain = (request->req.operation == BLKIF_OP_READ);
    214    void *virt = request->buf;
    215    Error *local_err = NULL;
    216
    217    if (request->req.nr_segments == 0) {
    218        return 0;
    219    }
    220
    221    count = request->req.nr_segments;
    222
    223    for (i = 0; i < count; i++) {
    224        if (to_domain) {
    225            segs[i].dest.foreign.ref = request->req.seg[i].gref;
    226            segs[i].dest.foreign.offset = request->req.seg[i].first_sect *
    227                dataplane->sector_size;
    228            segs[i].source.virt = virt;
    229        } else {
    230            segs[i].source.foreign.ref = request->req.seg[i].gref;
    231            segs[i].source.foreign.offset = request->req.seg[i].first_sect *
    232                dataplane->sector_size;
    233            segs[i].dest.virt = virt;
    234        }
    235        segs[i].len = (request->req.seg[i].last_sect -
    236                       request->req.seg[i].first_sect + 1) *
    237                      dataplane->sector_size;
    238        virt += segs[i].len;
    239    }
    240
    241    xen_device_copy_grant_refs(xendev, to_domain, segs, count, &local_err);
    242
    243    if (local_err) {
    244        error_reportf_err(local_err, "failed to copy data: ");
    245
    246        request->aio_errors++;
    247        return -1;
    248    }
    249
    250    return 0;
    251}
    252
    253static int xen_block_do_aio(XenBlockRequest *request);
    254
    255static void xen_block_complete_aio(void *opaque, int ret)
    256{
    257    XenBlockRequest *request = opaque;
    258    XenBlockDataPlane *dataplane = request->dataplane;
    259
    260    aio_context_acquire(dataplane->ctx);
    261
    262    if (ret != 0) {
    263        error_report("%s I/O error",
    264                     request->req.operation == BLKIF_OP_READ ?
    265                     "read" : "write");
    266        request->aio_errors++;
    267    }
    268
    269    request->aio_inflight--;
    270    if (request->presync) {
    271        request->presync = 0;
    272        xen_block_do_aio(request);
    273        goto done;
    274    }
    275    if (request->aio_inflight > 0) {
    276        goto done;
    277    }
    278
    279    switch (request->req.operation) {
    280    case BLKIF_OP_READ:
    281        /* in case of failure request->aio_errors is increased */
    282        if (ret == 0) {
    283            xen_block_copy_request(request);
    284        }
    285        break;
    286    case BLKIF_OP_WRITE:
    287    case BLKIF_OP_FLUSH_DISKCACHE:
    288    default:
    289        break;
    290    }
    291
    292    request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
    293
    294    switch (request->req.operation) {
    295    case BLKIF_OP_WRITE:
    296    case BLKIF_OP_FLUSH_DISKCACHE:
    297        if (!request->req.nr_segments) {
    298            break;
    299        }
    300        /* fall through */
    301    case BLKIF_OP_READ:
    302        if (request->status == BLKIF_RSP_OKAY) {
    303            block_acct_done(blk_get_stats(dataplane->blk), &request->acct);
    304        } else {
    305            block_acct_failed(blk_get_stats(dataplane->blk), &request->acct);
    306        }
    307        break;
    308    case BLKIF_OP_DISCARD:
    309    default:
    310        break;
    311    }
    312
    313    xen_block_complete_request(request);
    314
    315    if (dataplane->more_work) {
    316        qemu_bh_schedule(dataplane->bh);
    317    }
    318
    319done:
    320    aio_context_release(dataplane->ctx);
    321}
    322
    323static bool xen_block_split_discard(XenBlockRequest *request,
    324                                    blkif_sector_t sector_number,
    325                                    uint64_t nr_sectors)
    326{
    327    XenBlockDataPlane *dataplane = request->dataplane;
    328    int64_t byte_offset;
    329    int byte_chunk;
    330    uint64_t byte_remaining;
    331    uint64_t sec_start = sector_number;
    332    uint64_t sec_count = nr_sectors;
    333
    334    /* Wrap around, or overflowing byte limit? */
    335    if (sec_start + sec_count < sec_count ||
    336        sec_start + sec_count > INT64_MAX / dataplane->sector_size) {
    337        return false;
    338    }
    339
    340    byte_offset = sec_start * dataplane->sector_size;
    341    byte_remaining = sec_count * dataplane->sector_size;
    342
    343    do {
    344        byte_chunk = byte_remaining > BDRV_REQUEST_MAX_BYTES ?
    345            BDRV_REQUEST_MAX_BYTES : byte_remaining;
    346        request->aio_inflight++;
    347        blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk,
    348                         xen_block_complete_aio, request);
    349        byte_remaining -= byte_chunk;
    350        byte_offset += byte_chunk;
    351    } while (byte_remaining > 0);
    352
    353    return true;
    354}
    355
    356static int xen_block_do_aio(XenBlockRequest *request)
    357{
    358    XenBlockDataPlane *dataplane = request->dataplane;
    359
    360    if (request->req.nr_segments &&
    361        (request->req.operation == BLKIF_OP_WRITE ||
    362         request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
    363        xen_block_copy_request(request)) {
    364        goto err;
    365    }
    366
    367    request->aio_inflight++;
    368    if (request->presync) {
    369        blk_aio_flush(request->dataplane->blk, xen_block_complete_aio,
    370                      request);
    371        return 0;
    372    }
    373
    374    switch (request->req.operation) {
    375    case BLKIF_OP_READ:
    376        qemu_iovec_add(&request->v, request->buf, request->size);
    377        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
    378                         request->v.size, BLOCK_ACCT_READ);
    379        request->aio_inflight++;
    380        blk_aio_preadv(dataplane->blk, request->start, &request->v, 0,
    381                       xen_block_complete_aio, request);
    382        break;
    383    case BLKIF_OP_WRITE:
    384    case BLKIF_OP_FLUSH_DISKCACHE:
    385        if (!request->req.nr_segments) {
    386            break;
    387        }
    388
    389        qemu_iovec_add(&request->v, request->buf, request->size);
    390        block_acct_start(blk_get_stats(dataplane->blk), &request->acct,
    391                         request->v.size,
    392                         request->req.operation == BLKIF_OP_WRITE ?
    393                         BLOCK_ACCT_WRITE : BLOCK_ACCT_FLUSH);
    394        request->aio_inflight++;
    395        blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0,
    396                        xen_block_complete_aio, request);
    397        break;
    398    case BLKIF_OP_DISCARD:
    399    {
    400        struct blkif_request_discard *req = (void *)&request->req;
    401        if (!xen_block_split_discard(request, req->sector_number,
    402                                     req->nr_sectors)) {
    403            goto err;
    404        }
    405        break;
    406    }
    407    default:
    408        /* unknown operation (shouldn't happen -- parse catches this) */
    409        goto err;
    410    }
    411
    412    xen_block_complete_aio(request, 0);
    413
    414    return 0;
    415
    416err:
    417    request->status = BLKIF_RSP_ERROR;
    418    xen_block_complete_request(request);
    419    return -1;
    420}
    421
    422static int xen_block_send_response(XenBlockRequest *request)
    423{
    424    XenBlockDataPlane *dataplane = request->dataplane;
    425    int send_notify = 0;
    426    int have_requests = 0;
    427    blkif_response_t *resp;
    428
    429    /* Place on the response ring for the relevant domain. */
    430    switch (dataplane->protocol) {
    431    case BLKIF_PROTOCOL_NATIVE:
    432        resp = (blkif_response_t *)RING_GET_RESPONSE(
    433            &dataplane->rings.native,
    434            dataplane->rings.native.rsp_prod_pvt);
    435        break;
    436    case BLKIF_PROTOCOL_X86_32:
    437        resp = (blkif_response_t *)RING_GET_RESPONSE(
    438            &dataplane->rings.x86_32_part,
    439            dataplane->rings.x86_32_part.rsp_prod_pvt);
    440        break;
    441    case BLKIF_PROTOCOL_X86_64:
    442        resp = (blkif_response_t *)RING_GET_RESPONSE(
    443            &dataplane->rings.x86_64_part,
    444            dataplane->rings.x86_64_part.rsp_prod_pvt);
    445        break;
    446    default:
    447        return 0;
    448    }
    449
    450    resp->id = request->req.id;
    451    resp->operation = request->req.operation;
    452    resp->status = request->status;
    453
    454    dataplane->rings.common.rsp_prod_pvt++;
    455
    456    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common,
    457                                         send_notify);
    458    if (dataplane->rings.common.rsp_prod_pvt ==
    459        dataplane->rings.common.req_cons) {
    460        /*
    461         * Tail check for pending requests. Allows frontend to avoid
    462         * notifications if requests are already in flight (lower
    463         * overheads and promotes batching).
    464         */
    465        RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common,
    466                                      have_requests);
    467    } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) {
    468        have_requests = 1;
    469    }
    470
    471    if (have_requests) {
    472        dataplane->more_work++;
    473    }
    474    return send_notify;
    475}
    476
    477static int xen_block_get_request(XenBlockDataPlane *dataplane,
    478                                 XenBlockRequest *request, RING_IDX rc)
    479{
    480    switch (dataplane->protocol) {
    481    case BLKIF_PROTOCOL_NATIVE: {
    482        blkif_request_t *req =
    483            RING_GET_REQUEST(&dataplane->rings.native, rc);
    484
    485        memcpy(&request->req, req, sizeof(request->req));
    486        break;
    487    }
    488    case BLKIF_PROTOCOL_X86_32: {
    489        blkif_x86_32_request_t *req =
    490            RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc);
    491
    492        blkif_get_x86_32_req(&request->req, req);
    493        break;
    494    }
    495    case BLKIF_PROTOCOL_X86_64: {
    496        blkif_x86_64_request_t *req =
    497            RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc);
    498
    499        blkif_get_x86_64_req(&request->req, req);
    500        break;
    501    }
    502    }
    503    /* Prevent the compiler from accessing the on-ring fields instead. */
    504    barrier();
    505    return 0;
    506}
    507
    508/*
    509 * Threshold of in-flight requests above which we will start using
    510 * blk_io_plug()/blk_io_unplug() to batch requests.
    511 */
    512#define IO_PLUG_THRESHOLD 1
    513
    514static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
    515{
    516    RING_IDX rc, rp;
    517    XenBlockRequest *request;
    518    int inflight_atstart = dataplane->requests_inflight;
    519    int batched = 0;
    520    bool done_something = false;
    521
    522    dataplane->more_work = 0;
    523
    524    rc = dataplane->rings.common.req_cons;
    525    rp = dataplane->rings.common.sring->req_prod;
    526    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
    527
    528    /*
    529     * If there was more than IO_PLUG_THRESHOLD requests in flight
    530     * when we got here, this is an indication that there the bottleneck
    531     * is below us, so it's worth beginning to batch up I/O requests
    532     * rather than submitting them immediately. The maximum number
    533     * of requests we're willing to batch is the number already in
    534     * flight, so it can grow up to max_requests when the bottleneck
    535     * is below us.
    536     */
    537    if (inflight_atstart > IO_PLUG_THRESHOLD) {
    538        blk_io_plug(dataplane->blk);
    539    }
    540    while (rc != rp) {
    541        /* pull request from ring */
    542        if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) {
    543            break;
    544        }
    545        request = xen_block_start_request(dataplane);
    546        if (request == NULL) {
    547            dataplane->more_work++;
    548            break;
    549        }
    550        xen_block_get_request(dataplane, request, rc);
    551        dataplane->rings.common.req_cons = ++rc;
    552        done_something = true;
    553
    554        /* parse them */
    555        if (xen_block_parse_request(request) != 0) {
    556            switch (request->req.operation) {
    557            case BLKIF_OP_READ:
    558                block_acct_invalid(blk_get_stats(dataplane->blk),
    559                                   BLOCK_ACCT_READ);
    560                break;
    561            case BLKIF_OP_WRITE:
    562                block_acct_invalid(blk_get_stats(dataplane->blk),
    563                                   BLOCK_ACCT_WRITE);
    564                break;
    565            case BLKIF_OP_FLUSH_DISKCACHE:
    566                block_acct_invalid(blk_get_stats(dataplane->blk),
    567                                   BLOCK_ACCT_FLUSH);
    568            default:
    569                break;
    570            };
    571
    572            xen_block_complete_request(request);
    573            continue;
    574        }
    575
    576        if (inflight_atstart > IO_PLUG_THRESHOLD &&
    577            batched >= inflight_atstart) {
    578            blk_io_unplug(dataplane->blk);
    579        }
    580        xen_block_do_aio(request);
    581        if (inflight_atstart > IO_PLUG_THRESHOLD) {
    582            if (batched >= inflight_atstart) {
    583                blk_io_plug(dataplane->blk);
    584                batched = 0;
    585            } else {
    586                batched++;
    587            }
    588        }
    589    }
    590    if (inflight_atstart > IO_PLUG_THRESHOLD) {
    591        blk_io_unplug(dataplane->blk);
    592    }
    593
    594    return done_something;
    595}
    596
    597static void xen_block_dataplane_bh(void *opaque)
    598{
    599    XenBlockDataPlane *dataplane = opaque;
    600
    601    aio_context_acquire(dataplane->ctx);
    602    xen_block_handle_requests(dataplane);
    603    aio_context_release(dataplane->ctx);
    604}
    605
    606static bool xen_block_dataplane_event(void *opaque)
    607{
    608    XenBlockDataPlane *dataplane = opaque;
    609
    610    return xen_block_handle_requests(dataplane);
    611}
    612
    613XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
    614                                              BlockBackend *blk,
    615                                              unsigned int sector_size,
    616                                              IOThread *iothread)
    617{
    618    XenBlockDataPlane *dataplane = g_new0(XenBlockDataPlane, 1);
    619
    620    dataplane->xendev = xendev;
    621    dataplane->blk = blk;
    622    dataplane->sector_size = sector_size;
    623
    624    QLIST_INIT(&dataplane->inflight);
    625    QLIST_INIT(&dataplane->freelist);
    626
    627    if (iothread) {
    628        dataplane->iothread = iothread;
    629        object_ref(OBJECT(dataplane->iothread));
    630        dataplane->ctx = iothread_get_aio_context(dataplane->iothread);
    631    } else {
    632        dataplane->ctx = qemu_get_aio_context();
    633    }
    634    dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh,
    635                               dataplane);
    636
    637    return dataplane;
    638}
    639
    640void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane)
    641{
    642    XenBlockRequest *request;
    643
    644    if (!dataplane) {
    645        return;
    646    }
    647
    648    while (!QLIST_EMPTY(&dataplane->freelist)) {
    649        request = QLIST_FIRST(&dataplane->freelist);
    650        QLIST_REMOVE(request, list);
    651        qemu_iovec_destroy(&request->v);
    652        qemu_vfree(request->buf);
    653        g_free(request);
    654    }
    655
    656    qemu_bh_delete(dataplane->bh);
    657    if (dataplane->iothread) {
    658        object_unref(OBJECT(dataplane->iothread));
    659    }
    660
    661    g_free(dataplane);
    662}
    663
    664void xen_block_dataplane_stop(XenBlockDataPlane *dataplane)
    665{
    666    XenDevice *xendev;
    667
    668    if (!dataplane) {
    669        return;
    670    }
    671
    672    xendev = dataplane->xendev;
    673
    674    aio_context_acquire(dataplane->ctx);
    675    if (dataplane->event_channel) {
    676        /* Only reason for failure is a NULL channel */
    677        xen_device_set_event_channel_context(xendev, dataplane->event_channel,
    678                                             qemu_get_aio_context(),
    679                                             &error_abort);
    680    }
    681    /* Xen doesn't have multiple users for nodes, so this can't fail */
    682    blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
    683    aio_context_release(dataplane->ctx);
    684
    685    /*
    686     * Now that the context has been moved onto the main thread, cancel
    687     * further processing.
    688     */
    689    qemu_bh_cancel(dataplane->bh);
    690
    691    if (dataplane->event_channel) {
    692        Error *local_err = NULL;
    693
    694        xen_device_unbind_event_channel(xendev, dataplane->event_channel,
    695                                        &local_err);
    696        dataplane->event_channel = NULL;
    697
    698        if (local_err) {
    699            error_report_err(local_err);
    700        }
    701    }
    702
    703    if (dataplane->sring) {
    704        Error *local_err = NULL;
    705
    706        xen_device_unmap_grant_refs(xendev, dataplane->sring,
    707                                    dataplane->nr_ring_ref, &local_err);
    708        dataplane->sring = NULL;
    709
    710        if (local_err) {
    711            error_report_err(local_err);
    712        }
    713    }
    714
    715    g_free(dataplane->ring_ref);
    716    dataplane->ring_ref = NULL;
    717}
    718
    719void xen_block_dataplane_start(XenBlockDataPlane *dataplane,
    720                               const unsigned int ring_ref[],
    721                               unsigned int nr_ring_ref,
    722                               unsigned int event_channel,
    723                               unsigned int protocol,
    724                               Error **errp)
    725{
    726    ERRP_GUARD();
    727    XenDevice *xendev = dataplane->xendev;
    728    AioContext *old_context;
    729    unsigned int ring_size;
    730    unsigned int i;
    731
    732    dataplane->nr_ring_ref = nr_ring_ref;
    733    dataplane->ring_ref = g_new(unsigned int, nr_ring_ref);
    734
    735    for (i = 0; i < nr_ring_ref; i++) {
    736        dataplane->ring_ref[i] = ring_ref[i];
    737    }
    738
    739    dataplane->protocol = protocol;
    740
    741    ring_size = XC_PAGE_SIZE * dataplane->nr_ring_ref;
    742    switch (dataplane->protocol) {
    743    case BLKIF_PROTOCOL_NATIVE:
    744    {
    745        dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size);
    746        break;
    747    }
    748    case BLKIF_PROTOCOL_X86_32:
    749    {
    750        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size);
    751        break;
    752    }
    753    case BLKIF_PROTOCOL_X86_64:
    754    {
    755        dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size);
    756        break;
    757    }
    758    default:
    759        error_setg(errp, "unknown protocol %u", dataplane->protocol);
    760        return;
    761    }
    762
    763    xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref,
    764                                  errp);
    765    if (*errp) {
    766        goto stop;
    767    }
    768
    769    dataplane->sring = xen_device_map_grant_refs(xendev,
    770                                              dataplane->ring_ref,
    771                                              dataplane->nr_ring_ref,
    772                                              PROT_READ | PROT_WRITE,
    773                                              errp);
    774    if (*errp) {
    775        goto stop;
    776    }
    777
    778    switch (dataplane->protocol) {
    779    case BLKIF_PROTOCOL_NATIVE:
    780    {
    781        blkif_sring_t *sring_native = dataplane->sring;
    782
    783        BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size);
    784        break;
    785    }
    786    case BLKIF_PROTOCOL_X86_32:
    787    {
    788        blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring;
    789
    790        BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32,
    791                       ring_size);
    792        break;
    793    }
    794    case BLKIF_PROTOCOL_X86_64:
    795    {
    796        blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring;
    797
    798        BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64,
    799                       ring_size);
    800        break;
    801    }
    802    }
    803
    804    dataplane->event_channel =
    805        xen_device_bind_event_channel(xendev, event_channel,
    806                                      xen_block_dataplane_event, dataplane,
    807                                      errp);
    808    if (*errp) {
    809        goto stop;
    810    }
    811
    812    old_context = blk_get_aio_context(dataplane->blk);
    813    aio_context_acquire(old_context);
    814    /* If other users keep the BlockBackend in the iothread, that's ok */
    815    blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
    816    aio_context_release(old_context);
    817
    818    /* Only reason for failure is a NULL channel */
    819    aio_context_acquire(dataplane->ctx);
    820    xen_device_set_event_channel_context(xendev, dataplane->event_channel,
    821                                         dataplane->ctx, &error_abort);
    822    aio_context_release(dataplane->ctx);
    823
    824    return;
    825
    826stop:
    827    xen_block_dataplane_stop(dataplane);
    828}