cachepc-qemu

Fork of AMDESE/qemu with changes for cachepc side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-qemu
Log | Files | Refs | Submodules | LICENSE | sfeed.txt

scsi-generic.c (24085B)


      1/*
      2 * Generic SCSI Device support
      3 *
      4 * Copyright (c) 2007 Bull S.A.S.
      5 * Based on code by Paul Brook
      6 * Based on code by Fabrice Bellard
      7 *
      8 * Written by Laurent Vivier <Laurent.Vivier@bull.net>
      9 *
     10 * This code is licensed under the LGPL.
     11 *
     12 */
     13
     14#include "qemu/osdep.h"
     15#include "qapi/error.h"
     16#include "qemu/ctype.h"
     17#include "qemu/error-report.h"
     18#include "qemu/module.h"
     19#include "hw/scsi/scsi.h"
     20#include "migration/qemu-file-types.h"
     21#include "hw/qdev-properties.h"
     22#include "hw/qdev-properties-system.h"
     23#include "hw/scsi/emulation.h"
     24#include "sysemu/block-backend.h"
     25#include "trace.h"
     26
     27#ifdef __linux__
     28
     29#include <scsi/sg.h>
     30#include "scsi/constants.h"
     31
     32#ifndef MAX_UINT
     33#define MAX_UINT ((unsigned int)-1)
     34#endif
     35
     36typedef struct SCSIGenericReq {
     37    SCSIRequest req;
     38    uint8_t *buf;
     39    int buflen;
     40    int len;
     41    sg_io_hdr_t io_header;
     42} SCSIGenericReq;
     43
     44static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
     45{
     46    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
     47
     48    qemu_put_sbe32s(f, &r->buflen);
     49    if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
     50        assert(!r->req.sg);
     51        qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
     52    }
     53}
     54
     55static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
     56{
     57    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
     58
     59    qemu_get_sbe32s(f, &r->buflen);
     60    if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
     61        assert(!r->req.sg);
     62        qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
     63    }
     64}
     65
     66static void scsi_free_request(SCSIRequest *req)
     67{
     68    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
     69
     70    g_free(r->buf);
     71}
     72
     73/* Helper function for command completion.  */
     74static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
     75{
     76    int status;
     77    SCSISense sense;
     78    sg_io_hdr_t *io_hdr = &r->io_header;
     79
     80    assert(r->req.aiocb == NULL);
     81
     82    if (r->req.io_canceled) {
     83        scsi_req_cancel_complete(&r->req);
     84        goto done;
     85    }
     86    if (ret < 0) {
     87        status = scsi_sense_from_errno(-ret, &sense);
     88        if (status == CHECK_CONDITION) {
     89            scsi_req_build_sense(&r->req, sense);
     90        }
     91    } else if (io_hdr->host_status != SCSI_HOST_OK) {
     92        scsi_req_complete_failed(&r->req, io_hdr->host_status);
     93        goto done;
     94    } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
     95        status = BUSY;
     96    } else {
     97        status = io_hdr->status;
     98        if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) {
     99            r->req.sense_len = io_hdr->sb_len_wr;
    100        }
    101    }
    102    trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
    103
    104    scsi_req_complete(&r->req, status);
    105done:
    106    scsi_req_unref(&r->req);
    107}
    108
    109static void scsi_command_complete(void *opaque, int ret)
    110{
    111    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    112    SCSIDevice *s = r->req.dev;
    113
    114    assert(r->req.aiocb != NULL);
    115    r->req.aiocb = NULL;
    116
    117    aio_context_acquire(blk_get_aio_context(s->conf.blk));
    118    scsi_command_complete_noio(r, ret);
    119    aio_context_release(blk_get_aio_context(s->conf.blk));
    120}
    121
    122static int execute_command(BlockBackend *blk,
    123                           SCSIGenericReq *r, int direction,
    124                           BlockCompletionFunc *complete)
    125{
    126    SCSIDevice *s = r->req.dev;
    127
    128    r->io_header.interface_id = 'S';
    129    r->io_header.dxfer_direction = direction;
    130    r->io_header.dxferp = r->buf;
    131    r->io_header.dxfer_len = r->buflen;
    132    r->io_header.cmdp = r->req.cmd.buf;
    133    r->io_header.cmd_len = r->req.cmd.len;
    134    r->io_header.mx_sb_len = sizeof(r->req.sense);
    135    r->io_header.sbp = r->req.sense;
    136    r->io_header.timeout = s->io_timeout * 1000;
    137    r->io_header.usr_ptr = r;
    138    r->io_header.flags |= SG_FLAG_DIRECT_IO;
    139
    140    trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0],
    141                                        r->io_header.timeout);
    142    r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
    143    if (r->req.aiocb == NULL) {
    144        return -EIO;
    145    }
    146
    147    return 0;
    148}
    149
    150static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len)
    151{
    152    uint8_t page, page_idx;
    153
    154    /*
    155     *  EVPD set to zero returns the standard INQUIRY data.
    156     *
    157     *  Check if scsi_version is unset (-1) to avoid re-defining it
    158     *  each time an INQUIRY with standard data is received.
    159     *  scsi_version is initialized with -1 in scsi_generic_reset
    160     *  and scsi_disk_reset, making sure that we'll set the
    161     *  scsi_version after a reset. If the version field of the
    162     *  INQUIRY response somehow changes after a guest reboot,
    163     *  we'll be able to keep track of it.
    164     *
    165     *  On SCSI-2 and older, first 3 bits of byte 2 is the
    166     *  ANSI-approved version, while on later versions the
    167     *  whole byte 2 contains the version. Check if we're dealing
    168     *  with a newer version and, in that case, assign the
    169     *  whole byte.
    170     */
    171    if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
    172        s->scsi_version = r->buf[2] & 0x07;
    173        if (s->scsi_version > 2) {
    174            s->scsi_version = r->buf[2];
    175        }
    176    }
    177
    178    if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
    179        (r->req.cmd.buf[1] & 0x01)) {
    180        page = r->req.cmd.buf[2];
    181        if (page == 0xb0) {
    182            uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk);
    183            uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk);
    184
    185            assert(max_transfer);
    186            max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size)
    187                / s->blocksize;
    188            stl_be_p(&r->buf[8], max_transfer);
    189            /* Also take care of the opt xfer len. */
    190            stl_be_p(&r->buf[12],
    191                    MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
    192        } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
    193            /*
    194             * Now we're capable of supplying the VPD Block Limits
    195             * response if the hardware can't. Add it in the INQUIRY
    196             * Supported VPD pages response in case we are using the
    197             * emulation for this device.
    198             *
    199             * This way, the guest kernel will be aware of the support
    200             * and will use it to proper setup the SCSI device.
    201             *
    202             * VPD page numbers must be sorted, so insert 0xb0 at the
    203             * right place with an in-place insert.  When the while loop
    204             * begins the device response is at r[0] to r[page_idx - 1].
    205             */
    206            page_idx = lduw_be_p(r->buf + 2) + 4;
    207            page_idx = MIN(page_idx, r->buflen);
    208            while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
    209                if (page_idx < r->buflen) {
    210                    r->buf[page_idx] = r->buf[page_idx - 1];
    211                }
    212                page_idx--;
    213            }
    214            if (page_idx < r->buflen) {
    215                r->buf[page_idx] = 0xb0;
    216            }
    217            stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
    218
    219            if (len < r->buflen) {
    220                len++;
    221            }
    222        }
    223    }
    224    return len;
    225}
    226
    227static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
    228{
    229    int len;
    230    uint8_t buf[64];
    231
    232    SCSIBlockLimits bl = {
    233        .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
    234    };
    235
    236    memset(r->buf, 0, r->buflen);
    237    stb_p(buf, s->type);
    238    stb_p(buf + 1, 0xb0);
    239    len = scsi_emulate_block_limits(buf + 4, &bl);
    240    assert(len <= sizeof(buf) - 4);
    241    stw_be_p(buf + 2, len);
    242
    243    memcpy(r->buf, buf, MIN(r->buflen, len + 4));
    244
    245    r->io_header.sb_len_wr = 0;
    246
    247    /*
    248    * We have valid contents in the reply buffer but the
    249    * io_header can report a sense error coming from
    250    * the hardware in scsi_command_complete_noio. Clean
    251    * up the io_header to avoid reporting it.
    252    */
    253    r->io_header.driver_status = 0;
    254    r->io_header.status = 0;
    255
    256    return r->buflen;
    257}
    258
    259static void scsi_read_complete(void * opaque, int ret)
    260{
    261    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    262    SCSIDevice *s = r->req.dev;
    263    int len;
    264
    265    assert(r->req.aiocb != NULL);
    266    r->req.aiocb = NULL;
    267
    268    aio_context_acquire(blk_get_aio_context(s->conf.blk));
    269
    270    if (ret || r->req.io_canceled) {
    271        scsi_command_complete_noio(r, ret);
    272        goto done;
    273    }
    274
    275    len = r->io_header.dxfer_len - r->io_header.resid;
    276    trace_scsi_generic_read_complete(r->req.tag, len);
    277
    278    r->len = -1;
    279
    280    if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
    281        SCSISense sense =
    282            scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
    283
    284        /*
    285         * Check if this is a VPD Block Limits request that
    286         * resulted in sense error but would need emulation.
    287         * In this case, emulate a valid VPD response.
    288         */
    289        if (sense.key == ILLEGAL_REQUEST &&
    290            s->needs_vpd_bl_emulation &&
    291            r->req.cmd.buf[0] == INQUIRY &&
    292            (r->req.cmd.buf[1] & 0x01) &&
    293            r->req.cmd.buf[2] == 0xb0) {
    294            len = scsi_generic_emulate_block_limits(r, s);
    295            /*
    296             * It's okay to jup to req_complete: no need to
    297             * let scsi_handle_inquiry_reply handle an
    298             * INQUIRY VPD BL request we created manually.
    299             */
    300        }
    301        if (sense.key) {
    302            goto req_complete;
    303        }
    304    }
    305
    306    if (r->io_header.host_status != SCSI_HOST_OK ||
    307        (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) ||
    308        r->io_header.status != GOOD ||
    309        len == 0) {
    310        scsi_command_complete_noio(r, 0);
    311        goto done;
    312    }
    313
    314    /* Snoop READ CAPACITY output to set the blocksize.  */
    315    if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
    316        (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
    317        s->blocksize = ldl_be_p(&r->buf[4]);
    318        s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
    319    } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
    320               (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
    321        s->blocksize = ldl_be_p(&r->buf[8]);
    322        s->max_lba = ldq_be_p(&r->buf[0]);
    323    }
    324    blk_set_guest_block_size(s->conf.blk, s->blocksize);
    325
    326    /*
    327     * Patch MODE SENSE device specific parameters if the BDS is opened
    328     * readonly.
    329     */
    330    if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
    331        !blk_is_writable(s->conf.blk) &&
    332        (r->req.cmd.buf[0] == MODE_SENSE ||
    333         r->req.cmd.buf[0] == MODE_SENSE_10) &&
    334        (r->req.cmd.buf[1] & 0x8) == 0) {
    335        if (r->req.cmd.buf[0] == MODE_SENSE) {
    336            r->buf[2] |= 0x80;
    337        } else  {
    338            r->buf[3] |= 0x80;
    339        }
    340    }
    341    if (r->req.cmd.buf[0] == INQUIRY) {
    342        len = scsi_handle_inquiry_reply(r, s, len);
    343    }
    344
    345req_complete:
    346    scsi_req_data(&r->req, len);
    347    scsi_req_unref(&r->req);
    348
    349done:
    350    aio_context_release(blk_get_aio_context(s->conf.blk));
    351}
    352
    353/* Read more data from scsi device into buffer.  */
    354static void scsi_read_data(SCSIRequest *req)
    355{
    356    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
    357    SCSIDevice *s = r->req.dev;
    358    int ret;
    359
    360    trace_scsi_generic_read_data(req->tag);
    361
    362    /* The request is used as the AIO opaque value, so add a ref.  */
    363    scsi_req_ref(&r->req);
    364    if (r->len == -1) {
    365        scsi_command_complete_noio(r, 0);
    366        return;
    367    }
    368
    369    ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
    370                          scsi_read_complete);
    371    if (ret < 0) {
    372        scsi_command_complete_noio(r, ret);
    373    }
    374}
    375
    376static void scsi_write_complete(void * opaque, int ret)
    377{
    378    SCSIGenericReq *r = (SCSIGenericReq *)opaque;
    379    SCSIDevice *s = r->req.dev;
    380
    381    trace_scsi_generic_write_complete(ret);
    382
    383    assert(r->req.aiocb != NULL);
    384    r->req.aiocb = NULL;
    385
    386    aio_context_acquire(blk_get_aio_context(s->conf.blk));
    387
    388    if (ret || r->req.io_canceled) {
    389        scsi_command_complete_noio(r, ret);
    390        goto done;
    391    }
    392
    393    if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
    394        s->type == TYPE_TAPE) {
    395        s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
    396        trace_scsi_generic_write_complete_blocksize(s->blocksize);
    397    }
    398
    399    scsi_command_complete_noio(r, ret);
    400
    401done:
    402    aio_context_release(blk_get_aio_context(s->conf.blk));
    403}
    404
    405/* Write data to a scsi device.  Returns nonzero on failure.
    406   The transfer may complete asynchronously.  */
    407static void scsi_write_data(SCSIRequest *req)
    408{
    409    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
    410    SCSIDevice *s = r->req.dev;
    411    int ret;
    412
    413    trace_scsi_generic_write_data(req->tag);
    414    if (r->len == 0) {
    415        r->len = r->buflen;
    416        scsi_req_data(&r->req, r->len);
    417        return;
    418    }
    419
    420    /* The request is used as the AIO opaque value, so add a ref.  */
    421    scsi_req_ref(&r->req);
    422    ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
    423    if (ret < 0) {
    424        scsi_command_complete_noio(r, ret);
    425    }
    426}
    427
    428/* Return a pointer to the data buffer.  */
    429static uint8_t *scsi_get_buf(SCSIRequest *req)
    430{
    431    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
    432
    433    return r->buf;
    434}
    435
    436static void scsi_generic_command_dump(uint8_t *cmd, int len)
    437{
    438    int i;
    439    char *line_buffer, *p;
    440
    441    line_buffer = g_malloc(len * 5 + 1);
    442
    443    for (i = 0, p = line_buffer; i < len; i++) {
    444        p += sprintf(p, " 0x%02x", cmd[i]);
    445    }
    446    trace_scsi_generic_send_command(line_buffer);
    447
    448    g_free(line_buffer);
    449}
    450
    451/* Execute a scsi command.  Returns the length of the data expected by the
    452   command.  This will be Positive for data transfers from the device
    453   (eg. disk reads), negative for transfers to the device (eg. disk writes),
    454   and zero if the command does not transfer any data.  */
    455
    456static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
    457{
    458    SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
    459    SCSIDevice *s = r->req.dev;
    460    int ret;
    461
    462    if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
    463        scsi_generic_command_dump(cmd, r->req.cmd.len);
    464    }
    465
    466    if (r->req.cmd.xfer == 0) {
    467        g_free(r->buf);
    468        r->buflen = 0;
    469        r->buf = NULL;
    470        /* The request is used as the AIO opaque value, so add a ref.  */
    471        scsi_req_ref(&r->req);
    472        ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
    473                              scsi_command_complete);
    474        if (ret < 0) {
    475            scsi_command_complete_noio(r, ret);
    476            return 0;
    477        }
    478        return 0;
    479    }
    480
    481    if (r->buflen != r->req.cmd.xfer) {
    482        g_free(r->buf);
    483        r->buf = g_malloc(r->req.cmd.xfer);
    484        r->buflen = r->req.cmd.xfer;
    485    }
    486
    487    memset(r->buf, 0, r->buflen);
    488    r->len = r->req.cmd.xfer;
    489    if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
    490        r->len = 0;
    491        return -r->req.cmd.xfer;
    492    } else {
    493        return r->req.cmd.xfer;
    494    }
    495}
    496
    497static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
    498{
    499    int i;
    500
    501    if ((p[1] & 0xF) == 3) {
    502        /* NAA designator type */
    503        if (p[3] != 8) {
    504            return -EINVAL;
    505        }
    506        *p_wwn = ldq_be_p(p + 4);
    507        return 0;
    508    }
    509
    510    if ((p[1] & 0xF) == 8) {
    511        /* SCSI name string designator type */
    512        if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
    513            return -EINVAL;
    514        }
    515        if (p[3] > 20 && p[24] != ',') {
    516            return -EINVAL;
    517        }
    518        *p_wwn = 0;
    519        for (i = 8; i < 24; i++) {
    520            char c = qemu_toupper(p[i]);
    521            c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
    522            *p_wwn = (*p_wwn << 4) | c;
    523        }
    524        return 0;
    525    }
    526
    527    return -EINVAL;
    528}
    529
    530int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
    531                        uint8_t *buf, uint8_t buf_size, uint32_t timeout)
    532{
    533    sg_io_hdr_t io_header;
    534    uint8_t sensebuf[8];
    535    int ret;
    536
    537    memset(&io_header, 0, sizeof(io_header));
    538    io_header.interface_id = 'S';
    539    io_header.dxfer_direction = SG_DXFER_FROM_DEV;
    540    io_header.dxfer_len = buf_size;
    541    io_header.dxferp = buf;
    542    io_header.cmdp = cmd;
    543    io_header.cmd_len = cmd_size;
    544    io_header.mx_sb_len = sizeof(sensebuf);
    545    io_header.sbp = sensebuf;
    546    io_header.timeout = timeout * 1000;
    547
    548    trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout);
    549    ret = blk_ioctl(blk, SG_IO, &io_header);
    550    if (ret < 0 || io_header.status ||
    551        io_header.driver_status || io_header.host_status) {
    552        trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status,
    553                                           io_header.host_status);
    554        return -1;
    555    }
    556    return 0;
    557}
    558
    559/*
    560 * Executes an INQUIRY request with EVPD set to retrieve the
    561 * available VPD pages of the device. If the device does
    562 * not support the Block Limits page (page 0xb0), set
    563 * the needs_vpd_bl_emulation flag for future use.
    564 */
    565static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
    566{
    567    uint8_t cmd[6];
    568    uint8_t buf[250];
    569    uint8_t page_len;
    570    int ret, i;
    571
    572    memset(cmd, 0, sizeof(cmd));
    573    memset(buf, 0, sizeof(buf));
    574    cmd[0] = INQUIRY;
    575    cmd[1] = 1;
    576    cmd[2] = 0x00;
    577    cmd[4] = sizeof(buf);
    578
    579    ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
    580                              buf, sizeof(buf), s->io_timeout);
    581    if (ret < 0) {
    582        /*
    583         * Do not assume anything if we can't retrieve the
    584         * INQUIRY response to assert the VPD Block Limits
    585         * support.
    586         */
    587        s->needs_vpd_bl_emulation = false;
    588        return;
    589    }
    590
    591    page_len = buf[3];
    592    for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
    593        if (buf[i] == 0xb0) {
    594            s->needs_vpd_bl_emulation = false;
    595            return;
    596        }
    597    }
    598    s->needs_vpd_bl_emulation = true;
    599}
    600
    601static void scsi_generic_read_device_identification(SCSIDevice *s)
    602{
    603    uint8_t cmd[6];
    604    uint8_t buf[250];
    605    int ret;
    606    int i, len;
    607
    608    memset(cmd, 0, sizeof(cmd));
    609    memset(buf, 0, sizeof(buf));
    610    cmd[0] = INQUIRY;
    611    cmd[1] = 1;
    612    cmd[2] = 0x83;
    613    cmd[4] = sizeof(buf);
    614
    615    ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
    616                              buf, sizeof(buf), s->io_timeout);
    617    if (ret < 0) {
    618        return;
    619    }
    620
    621    len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
    622    for (i = 0; i + 3 <= len; ) {
    623        const uint8_t *p = &buf[i + 4];
    624        uint64_t wwn;
    625
    626        if (i + (p[3] + 4) > len) {
    627            break;
    628        }
    629
    630        if ((p[1] & 0x10) == 0) {
    631            /* Associated with the logical unit */
    632            if (read_naa_id(p, &wwn) == 0) {
    633                s->wwn = wwn;
    634            }
    635        } else if ((p[1] & 0x10) == 0x10) {
    636            /* Associated with the target port */
    637            if (read_naa_id(p, &wwn) == 0) {
    638                s->port_wwn = wwn;
    639            }
    640        }
    641
    642        i += p[3] + 4;
    643    }
    644}
    645
    646void scsi_generic_read_device_inquiry(SCSIDevice *s)
    647{
    648    scsi_generic_read_device_identification(s);
    649    if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
    650        scsi_generic_set_vpd_bl_emulation(s);
    651    } else {
    652        s->needs_vpd_bl_emulation = false;
    653    }
    654}
    655
    656static int get_stream_blocksize(BlockBackend *blk)
    657{
    658    uint8_t cmd[6];
    659    uint8_t buf[12];
    660    int ret;
    661
    662    memset(cmd, 0, sizeof(cmd));
    663    memset(buf, 0, sizeof(buf));
    664    cmd[0] = MODE_SENSE;
    665    cmd[4] = sizeof(buf);
    666
    667    ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6);
    668    if (ret < 0) {
    669        return -1;
    670    }
    671
    672    return (buf[9] << 16) | (buf[10] << 8) | buf[11];
    673}
    674
    675static void scsi_generic_reset(DeviceState *dev)
    676{
    677    SCSIDevice *s = SCSI_DEVICE(dev);
    678
    679    s->scsi_version = s->default_scsi_version;
    680    scsi_device_purge_requests(s, SENSE_CODE(RESET));
    681}
    682
    683static void scsi_generic_realize(SCSIDevice *s, Error **errp)
    684{
    685    int rc;
    686    int sg_version;
    687    struct sg_scsi_id scsiid;
    688
    689    if (!s->conf.blk) {
    690        error_setg(errp, "drive property not set");
    691        return;
    692    }
    693
    694    if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC &&
    695        blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) {
    696        error_setg(errp, "Device doesn't support drive option werror");
    697        return;
    698    }
    699    if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
    700        error_setg(errp, "Device doesn't support drive option rerror");
    701        return;
    702    }
    703
    704    /* check we are using a driver managing SG_IO (version 3 and after */
    705    rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
    706    if (rc < 0) {
    707        error_setg_errno(errp, -rc, "cannot get SG_IO version number");
    708        if (rc != -EPERM) {
    709            error_append_hint(errp, "Is this a SCSI device?\n");
    710        }
    711        return;
    712    }
    713    if (sg_version < 30000) {
    714        error_setg(errp, "scsi generic interface too old");
    715        return;
    716    }
    717
    718    /* get LUN of the /dev/sg? */
    719    if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
    720        error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
    721        return;
    722    }
    723    if (!blkconf_apply_backend_options(&s->conf,
    724                                       !blk_supports_write_perm(s->conf.blk),
    725                                       true, errp)) {
    726        return;
    727    }
    728
    729    /* define device state */
    730    s->type = scsiid.scsi_type;
    731    trace_scsi_generic_realize_type(s->type);
    732
    733    switch (s->type) {
    734    case TYPE_TAPE:
    735        s->blocksize = get_stream_blocksize(s->conf.blk);
    736        if (s->blocksize == -1) {
    737            s->blocksize = 0;
    738        }
    739        break;
    740
    741        /* Make a guess for block devices, we'll fix it when the guest sends.
    742         * READ CAPACITY.  If they don't, they likely would assume these sizes
    743         * anyway. (TODO: they could also send MODE SENSE).
    744         */
    745    case TYPE_ROM:
    746    case TYPE_WORM:
    747        s->blocksize = 2048;
    748        break;
    749    default:
    750        s->blocksize = 512;
    751        break;
    752    }
    753
    754    trace_scsi_generic_realize_blocksize(s->blocksize);
    755
    756    /* Only used by scsi-block, but initialize it nevertheless to be clean.  */
    757    s->default_scsi_version = -1;
    758    s->io_timeout = DEFAULT_IO_TIMEOUT;
    759    scsi_generic_read_device_inquiry(s);
    760}
    761
    762const SCSIReqOps scsi_generic_req_ops = {
    763    .size         = sizeof(SCSIGenericReq),
    764    .free_req     = scsi_free_request,
    765    .send_command = scsi_send_command,
    766    .read_data    = scsi_read_data,
    767    .write_data   = scsi_write_data,
    768    .get_buf      = scsi_get_buf,
    769    .load_request = scsi_generic_load_request,
    770    .save_request = scsi_generic_save_request,
    771};
    772
    773static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
    774                                     uint8_t *buf, void *hba_private)
    775{
    776    return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
    777}
    778
    779static Property scsi_generic_properties[] = {
    780    DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
    781    DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
    782    DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout,
    783                       DEFAULT_IO_TIMEOUT),
    784    DEFINE_PROP_END_OF_LIST(),
    785};
    786
    787static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
    788                                  uint8_t *buf, void *hba_private)
    789{
    790    return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
    791}
    792
    793static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
    794{
    795    DeviceClass *dc = DEVICE_CLASS(klass);
    796    SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
    797
    798    sc->realize      = scsi_generic_realize;
    799    sc->alloc_req    = scsi_new_request;
    800    sc->parse_cdb    = scsi_generic_parse_cdb;
    801    dc->fw_name = "disk";
    802    dc->desc = "pass through generic scsi device (/dev/sg*)";
    803    dc->reset = scsi_generic_reset;
    804    device_class_set_props(dc, scsi_generic_properties);
    805    dc->vmsd  = &vmstate_scsi_device;
    806}
    807
    808static const TypeInfo scsi_generic_info = {
    809    .name          = "scsi-generic",
    810    .parent        = TYPE_SCSI_DEVICE,
    811    .instance_size = sizeof(SCSIDevice),
    812    .class_init    = scsi_generic_class_initfn,
    813};
    814
    815static void scsi_generic_register_types(void)
    816{
    817    type_register_static(&scsi_generic_info);
    818}
    819
    820type_init(scsi_generic_register_types)
    821
    822#endif /* __linux__ */