cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_sbc.c (39012B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * SCSI Block Commands (SBC) parsing and emulation.
      4 *
      5 * (c) Copyright 2002-2013 Datera, Inc.
      6 *
      7 * Nicholas A. Bellinger <nab@kernel.org>
      8 */
      9
     10#include <linux/kernel.h>
     11#include <linux/module.h>
     12#include <linux/ratelimit.h>
     13#include <linux/crc-t10dif.h>
     14#include <linux/t10-pi.h>
     15#include <asm/unaligned.h>
     16#include <scsi/scsi_proto.h>
     17#include <scsi/scsi_tcq.h>
     18
     19#include <target/target_core_base.h>
     20#include <target/target_core_backend.h>
     21#include <target/target_core_fabric.h>
     22
     23#include "target_core_internal.h"
     24#include "target_core_ua.h"
     25#include "target_core_alua.h"
     26
     27static sense_reason_t
     28sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool);
     29static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd);
     30
     31static sense_reason_t
     32sbc_emulate_readcapacity(struct se_cmd *cmd)
     33{
     34	struct se_device *dev = cmd->se_dev;
     35	unsigned char *cdb = cmd->t_task_cdb;
     36	unsigned long long blocks_long = dev->transport->get_blocks(dev);
     37	unsigned char *rbuf;
     38	unsigned char buf[8];
     39	u32 blocks;
     40
     41	/*
     42	 * SBC-2 says:
     43	 *   If the PMI bit is set to zero and the LOGICAL BLOCK
     44	 *   ADDRESS field is not set to zero, the device server shall
     45	 *   terminate the command with CHECK CONDITION status with
     46	 *   the sense key set to ILLEGAL REQUEST and the additional
     47	 *   sense code set to INVALID FIELD IN CDB.
     48	 *
     49	 * In SBC-3, these fields are obsolete, but some SCSI
     50	 * compliance tests actually check this, so we might as well
     51	 * follow SBC-2.
     52	 */
     53	if (!(cdb[8] & 1) && !!(cdb[2] | cdb[3] | cdb[4] | cdb[5]))
     54		return TCM_INVALID_CDB_FIELD;
     55
     56	if (blocks_long >= 0x00000000ffffffff)
     57		blocks = 0xffffffff;
     58	else
     59		blocks = (u32)blocks_long;
     60
     61	put_unaligned_be32(blocks, &buf[0]);
     62	put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]);
     63
     64	rbuf = transport_kmap_data_sg(cmd);
     65	if (rbuf) {
     66		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
     67		transport_kunmap_data_sg(cmd);
     68	}
     69
     70	target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8);
     71	return 0;
     72}
     73
     74static sense_reason_t
     75sbc_emulate_readcapacity_16(struct se_cmd *cmd)
     76{
     77	struct se_device *dev = cmd->se_dev;
     78	struct se_session *sess = cmd->se_sess;
     79	int pi_prot_type = dev->dev_attrib.pi_prot_type;
     80
     81	unsigned char *rbuf;
     82	unsigned char buf[32];
     83	unsigned long long blocks = dev->transport->get_blocks(dev);
     84
     85	memset(buf, 0, sizeof(buf));
     86	put_unaligned_be64(blocks, &buf[0]);
     87	put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]);
     88	/*
     89	 * Set P_TYPE and PROT_EN bits for DIF support
     90	 */
     91	if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
     92		/*
     93		 * Only override a device's pi_prot_type if no T10-PI is
     94		 * available, and sess_prot_type has been explicitly enabled.
     95		 */
     96		if (!pi_prot_type)
     97			pi_prot_type = sess->sess_prot_type;
     98
     99		if (pi_prot_type)
    100			buf[12] = (pi_prot_type - 1) << 1 | 0x1;
    101	}
    102
    103	if (dev->transport->get_lbppbe)
    104		buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
    105
    106	if (dev->transport->get_alignment_offset_lbas) {
    107		u16 lalba = dev->transport->get_alignment_offset_lbas(dev);
    108
    109		put_unaligned_be16(lalba, &buf[14]);
    110	}
    111
    112	/*
    113	 * Set Thin Provisioning Enable bit following sbc3r22 in section
    114	 * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
    115	 */
    116	if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) {
    117		buf[14] |= 0x80;
    118
    119		/*
    120		 * LBPRZ signifies that zeroes will be read back from an LBA after
    121		 * an UNMAP or WRITE SAME w/ unmap bit (sbc3r36 5.16.2)
    122		 */
    123		if (dev->dev_attrib.unmap_zeroes_data)
    124			buf[14] |= 0x40;
    125	}
    126
    127	rbuf = transport_kmap_data_sg(cmd);
    128	if (rbuf) {
    129		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
    130		transport_kunmap_data_sg(cmd);
    131	}
    132
    133	target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 32);
    134	return 0;
    135}
    136
    137static sense_reason_t
    138sbc_emulate_startstop(struct se_cmd *cmd)
    139{
    140	unsigned char *cdb = cmd->t_task_cdb;
    141
    142	/*
    143	 * See sbc3r36 section 5.25
    144	 * Immediate bit should be set since there is nothing to complete
    145	 * POWER CONDITION MODIFIER 0h
    146	 */
    147	if (!(cdb[1] & 1) || cdb[2] || cdb[3])
    148		return TCM_INVALID_CDB_FIELD;
    149
    150	/*
    151	 * See sbc3r36 section 5.25
    152	 * POWER CONDITION 0h START_VALID - process START and LOEJ
    153	 */
    154	if (cdb[4] >> 4 & 0xf)
    155		return TCM_INVALID_CDB_FIELD;
    156
    157	/*
    158	 * See sbc3r36 section 5.25
    159	 * LOEJ 0h - nothing to load or unload
    160	 * START 1h - we are ready
    161	 */
    162	if (!(cdb[4] & 1) || (cdb[4] & 2) || (cdb[4] & 4))
    163		return TCM_INVALID_CDB_FIELD;
    164
    165	target_complete_cmd(cmd, SAM_STAT_GOOD);
    166	return 0;
    167}
    168
    169sector_t sbc_get_write_same_sectors(struct se_cmd *cmd)
    170{
    171	u32 num_blocks;
    172
    173	if (cmd->t_task_cdb[0] == WRITE_SAME)
    174		num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
    175	else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
    176		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
    177	else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
    178		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
    179
    180	/*
    181	 * Use the explicit range when non zero is supplied, otherwise calculate
    182	 * the remaining range based on ->get_blocks() - starting LBA.
    183	 */
    184	if (num_blocks)
    185		return num_blocks;
    186
    187	return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
    188		cmd->t_task_lba + 1;
    189}
    190EXPORT_SYMBOL(sbc_get_write_same_sectors);
    191
    192static sense_reason_t
    193sbc_execute_write_same_unmap(struct se_cmd *cmd)
    194{
    195	struct sbc_ops *ops = cmd->protocol_data;
    196	sector_t nolb = sbc_get_write_same_sectors(cmd);
    197	sense_reason_t ret;
    198
    199	if (nolb) {
    200		ret = ops->execute_unmap(cmd, cmd->t_task_lba, nolb);
    201		if (ret)
    202			return ret;
    203	}
    204
    205	target_complete_cmd(cmd, SAM_STAT_GOOD);
    206	return 0;
    207}
    208
    209static sense_reason_t
    210sbc_emulate_noop(struct se_cmd *cmd)
    211{
    212	target_complete_cmd(cmd, SAM_STAT_GOOD);
    213	return 0;
    214}
    215
    216static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
    217{
    218	return cmd->se_dev->dev_attrib.block_size * sectors;
    219}
    220
    221static inline u32 transport_get_sectors_6(unsigned char *cdb)
    222{
    223	/*
    224	 * Use 8-bit sector value.  SBC-3 says:
    225	 *
    226	 *   A TRANSFER LENGTH field set to zero specifies that 256
    227	 *   logical blocks shall be written.  Any other value
    228	 *   specifies the number of logical blocks that shall be
    229	 *   written.
    230	 */
    231	return cdb[4] ? : 256;
    232}
    233
    234static inline u32 transport_get_sectors_10(unsigned char *cdb)
    235{
    236	return get_unaligned_be16(&cdb[7]);
    237}
    238
    239static inline u32 transport_get_sectors_12(unsigned char *cdb)
    240{
    241	return get_unaligned_be32(&cdb[6]);
    242}
    243
    244static inline u32 transport_get_sectors_16(unsigned char *cdb)
    245{
    246	return get_unaligned_be32(&cdb[10]);
    247}
    248
    249/*
    250 * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
    251 */
    252static inline u32 transport_get_sectors_32(unsigned char *cdb)
    253{
    254	return get_unaligned_be32(&cdb[28]);
    255
    256}
    257
    258static inline u32 transport_lba_21(unsigned char *cdb)
    259{
    260	return get_unaligned_be24(&cdb[1]) & 0x1fffff;
    261}
    262
    263static inline u32 transport_lba_32(unsigned char *cdb)
    264{
    265	return get_unaligned_be32(&cdb[2]);
    266}
    267
    268static inline unsigned long long transport_lba_64(unsigned char *cdb)
    269{
    270	return get_unaligned_be64(&cdb[2]);
    271}
    272
    273/*
    274 * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
    275 */
    276static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
    277{
    278	return get_unaligned_be64(&cdb[12]);
    279}
    280
    281static sense_reason_t
    282sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops)
    283{
    284	struct se_device *dev = cmd->se_dev;
    285	sector_t end_lba = dev->transport->get_blocks(dev) + 1;
    286	unsigned int sectors = sbc_get_write_same_sectors(cmd);
    287	sense_reason_t ret;
    288
    289	if ((flags & 0x04) || (flags & 0x02)) {
    290		pr_err("WRITE_SAME PBDATA and LBDATA"
    291			" bits not supported for Block Discard"
    292			" Emulation\n");
    293		return TCM_UNSUPPORTED_SCSI_OPCODE;
    294	}
    295	if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) {
    296		pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n",
    297			sectors, cmd->se_dev->dev_attrib.max_write_same_len);
    298		return TCM_INVALID_CDB_FIELD;
    299	}
    300	/*
    301	 * Sanity check for LBA wrap and request past end of device.
    302	 */
    303	if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
    304	    ((cmd->t_task_lba + sectors) > end_lba)) {
    305		pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
    306		       (unsigned long long)end_lba, cmd->t_task_lba, sectors);
    307		return TCM_ADDRESS_OUT_OF_RANGE;
    308	}
    309
    310	/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
    311	if (flags & 0x10) {
    312		pr_warn("WRITE SAME with ANCHOR not supported\n");
    313		return TCM_INVALID_CDB_FIELD;
    314	}
    315	/*
    316	 * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting
    317	 * translated into block discard requests within backend code.
    318	 */
    319	if (flags & 0x08) {
    320		if (!ops->execute_unmap)
    321			return TCM_UNSUPPORTED_SCSI_OPCODE;
    322
    323		if (!dev->dev_attrib.emulate_tpws) {
    324			pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
    325			       " has emulate_tpws disabled\n");
    326			return TCM_UNSUPPORTED_SCSI_OPCODE;
    327		}
    328		cmd->execute_cmd = sbc_execute_write_same_unmap;
    329		return 0;
    330	}
    331	if (!ops->execute_write_same)
    332		return TCM_UNSUPPORTED_SCSI_OPCODE;
    333
    334	ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true);
    335	if (ret)
    336		return ret;
    337
    338	cmd->execute_cmd = ops->execute_write_same;
    339	return 0;
    340}
    341
    342static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
    343					   int *post_ret)
    344{
    345	unsigned char *buf, *addr;
    346	struct scatterlist *sg;
    347	unsigned int offset;
    348	sense_reason_t ret = TCM_NO_SENSE;
    349	int i, count;
    350
    351	if (!success)
    352		return 0;
    353
    354	/*
    355	 * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
    356	 *
    357	 * 1) read the specified logical block(s);
    358	 * 2) transfer logical blocks from the data-out buffer;
    359	 * 3) XOR the logical blocks transferred from the data-out buffer with
    360	 *    the logical blocks read, storing the resulting XOR data in a buffer;
    361	 * 4) if the DISABLE WRITE bit is set to zero, then write the logical
    362	 *    blocks transferred from the data-out buffer; and
    363	 * 5) transfer the resulting XOR data to the data-in buffer.
    364	 */
    365	buf = kmalloc(cmd->data_length, GFP_KERNEL);
    366	if (!buf) {
    367		pr_err("Unable to allocate xor_callback buf\n");
    368		return TCM_OUT_OF_RESOURCES;
    369	}
    370	/*
    371	 * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
    372	 * into the locally allocated *buf
    373	 */
    374	sg_copy_to_buffer(cmd->t_data_sg,
    375			  cmd->t_data_nents,
    376			  buf,
    377			  cmd->data_length);
    378
    379	/*
    380	 * Now perform the XOR against the BIDI read memory located at
    381	 * cmd->t_mem_bidi_list
    382	 */
    383
    384	offset = 0;
    385	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
    386		addr = kmap_atomic(sg_page(sg));
    387		if (!addr) {
    388			ret = TCM_OUT_OF_RESOURCES;
    389			goto out;
    390		}
    391
    392		for (i = 0; i < sg->length; i++)
    393			*(addr + sg->offset + i) ^= *(buf + offset + i);
    394
    395		offset += sg->length;
    396		kunmap_atomic(addr);
    397	}
    398
    399out:
    400	kfree(buf);
    401	return ret;
    402}
    403
    404static sense_reason_t
    405sbc_execute_rw(struct se_cmd *cmd)
    406{
    407	struct sbc_ops *ops = cmd->protocol_data;
    408
    409	return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents,
    410			       cmd->data_direction);
    411}
    412
    413static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
    414					     int *post_ret)
    415{
    416	struct se_device *dev = cmd->se_dev;
    417	sense_reason_t ret = TCM_NO_SENSE;
    418
    419	spin_lock_irq(&cmd->t_state_lock);
    420	if (success) {
    421		*post_ret = 1;
    422
    423		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
    424			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    425	}
    426	spin_unlock_irq(&cmd->t_state_lock);
    427
    428	/*
    429	 * Unlock ->caw_sem originally obtained during sbc_compare_and_write()
    430	 * before the original READ I/O submission.
    431	 */
    432	up(&dev->caw_sem);
    433
    434	return ret;
    435}
    436
    437/*
    438 * compare @cmp_len bytes of @read_sgl with @cmp_sgl. On miscompare, fill
    439 * @miscmp_off and return TCM_MISCOMPARE_VERIFY.
    440 */
    441static sense_reason_t
    442compare_and_write_do_cmp(struct scatterlist *read_sgl, unsigned int read_nents,
    443			 struct scatterlist *cmp_sgl, unsigned int cmp_nents,
    444			 unsigned int cmp_len, unsigned int *miscmp_off)
    445{
    446	unsigned char *buf = NULL;
    447	struct scatterlist *sg;
    448	sense_reason_t ret;
    449	unsigned int offset;
    450	size_t rc;
    451	int sg_cnt;
    452
    453	buf = kzalloc(cmp_len, GFP_KERNEL);
    454	if (!buf) {
    455		ret = TCM_OUT_OF_RESOURCES;
    456		goto out;
    457	}
    458
    459	rc = sg_copy_to_buffer(cmp_sgl, cmp_nents, buf, cmp_len);
    460	if (!rc) {
    461		pr_err("sg_copy_to_buffer() failed for compare_and_write\n");
    462		ret = TCM_OUT_OF_RESOURCES;
    463		goto out;
    464	}
    465	/*
    466	 * Compare SCSI READ payload against verify payload
    467	 */
    468	offset = 0;
    469	ret = TCM_NO_SENSE;
    470	for_each_sg(read_sgl, sg, read_nents, sg_cnt) {
    471		unsigned int len = min(sg->length, cmp_len);
    472		unsigned char *addr = kmap_atomic(sg_page(sg));
    473
    474		if (memcmp(addr, buf + offset, len)) {
    475			unsigned int i;
    476
    477			for (i = 0; i < len && addr[i] == buf[offset + i]; i++)
    478				;
    479			*miscmp_off = offset + i;
    480			pr_warn("Detected MISCOMPARE at offset %u\n",
    481				*miscmp_off);
    482			ret = TCM_MISCOMPARE_VERIFY;
    483		}
    484		kunmap_atomic(addr);
    485		if (ret != TCM_NO_SENSE)
    486			goto out;
    487
    488		offset += len;
    489		cmp_len -= len;
    490		if (!cmp_len)
    491			break;
    492	}
    493	pr_debug("COMPARE AND WRITE read data matches compare data\n");
    494out:
    495	kfree(buf);
    496	return ret;
    497}
    498
    499static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
    500						 int *post_ret)
    501{
    502	struct se_device *dev = cmd->se_dev;
    503	struct sg_table write_tbl = { };
    504	struct scatterlist *write_sg;
    505	struct sg_mapping_iter m;
    506	unsigned int len;
    507	unsigned int block_size = dev->dev_attrib.block_size;
    508	unsigned int compare_len = (cmd->t_task_nolb * block_size);
    509	unsigned int miscmp_off = 0;
    510	sense_reason_t ret = TCM_NO_SENSE;
    511	int i;
    512
    513	/*
    514	 * Handle early failure in transport_generic_request_failure(),
    515	 * which will not have taken ->caw_sem yet..
    516	 */
    517	if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
    518		return TCM_NO_SENSE;
    519	/*
    520	 * Handle special case for zero-length COMPARE_AND_WRITE
    521	 */
    522	if (!cmd->data_length)
    523		goto out;
    524	/*
    525	 * Immediately exit + release dev->caw_sem if command has already
    526	 * been failed with a non-zero SCSI status.
    527	 */
    528	if (cmd->scsi_status) {
    529		pr_debug("compare_and_write_callback: non zero scsi_status:"
    530			" 0x%02x\n", cmd->scsi_status);
    531		*post_ret = 1;
    532		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
    533			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    534		goto out;
    535	}
    536
    537	ret = compare_and_write_do_cmp(cmd->t_bidi_data_sg,
    538				       cmd->t_bidi_data_nents,
    539				       cmd->t_data_sg,
    540				       cmd->t_data_nents,
    541				       compare_len,
    542				       &miscmp_off);
    543	if (ret == TCM_MISCOMPARE_VERIFY) {
    544		/*
    545		 * SBC-4 r15: 5.3 COMPARE AND WRITE command
    546		 * In the sense data (see 4.18 and SPC-5) the offset from the
    547		 * start of the Data-Out Buffer to the first byte of data that
    548		 * was not equal shall be reported in the INFORMATION field.
    549		 */
    550		cmd->sense_info = miscmp_off;
    551		goto out;
    552	} else if (ret)
    553		goto out;
    554
    555	if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
    556		pr_err("Unable to allocate compare_and_write sg\n");
    557		ret = TCM_OUT_OF_RESOURCES;
    558		goto out;
    559	}
    560	write_sg = write_tbl.sgl;
    561
    562	i = 0;
    563	len = compare_len;
    564	sg_miter_start(&m, cmd->t_data_sg, cmd->t_data_nents, SG_MITER_TO_SG);
    565	/*
    566	 * Currently assumes NoLB=1 and SGLs are PAGE_SIZE..
    567	 */
    568	while (len) {
    569		sg_miter_next(&m);
    570
    571		if (block_size < PAGE_SIZE) {
    572			sg_set_page(&write_sg[i], m.page, block_size,
    573				    m.piter.sg->offset + block_size);
    574		} else {
    575			sg_miter_next(&m);
    576			sg_set_page(&write_sg[i], m.page, block_size,
    577				    m.piter.sg->offset);
    578		}
    579		len -= block_size;
    580		i++;
    581	}
    582	sg_miter_stop(&m);
    583	/*
    584	 * Save the original SGL + nents values before updating to new
    585	 * assignments, to be released in transport_free_pages() ->
    586	 * transport_reset_sgl_orig()
    587	 */
    588	cmd->t_data_sg_orig = cmd->t_data_sg;
    589	cmd->t_data_sg = write_sg;
    590	cmd->t_data_nents_orig = cmd->t_data_nents;
    591	cmd->t_data_nents = 1;
    592
    593	cmd->sam_task_attr = TCM_HEAD_TAG;
    594	cmd->transport_complete_callback = compare_and_write_post;
    595	/*
    596	 * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
    597	 * for submitting the adjusted SGL to write instance user-data.
    598	 */
    599	cmd->execute_cmd = sbc_execute_rw;
    600
    601	spin_lock_irq(&cmd->t_state_lock);
    602	cmd->t_state = TRANSPORT_PROCESSING;
    603	cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
    604	spin_unlock_irq(&cmd->t_state_lock);
    605
    606	__target_execute_cmd(cmd, false);
    607
    608	return ret;
    609
    610out:
    611	/*
    612	 * In the MISCOMPARE or failure case, unlock ->caw_sem obtained in
    613	 * sbc_compare_and_write() before the original READ I/O submission.
    614	 */
    615	up(&dev->caw_sem);
    616	sg_free_table(&write_tbl);
    617	return ret;
    618}
    619
    620static sense_reason_t
    621sbc_compare_and_write(struct se_cmd *cmd)
    622{
    623	struct sbc_ops *ops = cmd->protocol_data;
    624	struct se_device *dev = cmd->se_dev;
    625	sense_reason_t ret;
    626	int rc;
    627	/*
    628	 * Submit the READ first for COMPARE_AND_WRITE to perform the
    629	 * comparision using SGLs at cmd->t_bidi_data_sg..
    630	 */
    631	rc = down_interruptible(&dev->caw_sem);
    632	if (rc != 0) {
    633		cmd->transport_complete_callback = NULL;
    634		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    635	}
    636	/*
    637	 * Reset cmd->data_length to individual block_size in order to not
    638	 * confuse backend drivers that depend on this value matching the
    639	 * size of the I/O being submitted.
    640	 */
    641	cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size;
    642
    643	ret = ops->execute_rw(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents,
    644			      DMA_FROM_DEVICE);
    645	if (ret) {
    646		cmd->transport_complete_callback = NULL;
    647		up(&dev->caw_sem);
    648		return ret;
    649	}
    650	/*
    651	 * Unlock of dev->caw_sem to occur in compare_and_write_callback()
    652	 * upon MISCOMPARE, or in compare_and_write_done() upon completion
    653	 * of WRITE instance user-data.
    654	 */
    655	return TCM_NO_SENSE;
    656}
    657
    658static int
    659sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_type,
    660		       bool is_write, struct se_cmd *cmd)
    661{
    662	if (is_write) {
    663		cmd->prot_op = fabric_prot ? TARGET_PROT_DOUT_STRIP :
    664			       protect ? TARGET_PROT_DOUT_PASS :
    665			       TARGET_PROT_DOUT_INSERT;
    666		switch (protect) {
    667		case 0x0:
    668		case 0x3:
    669			cmd->prot_checks = 0;
    670			break;
    671		case 0x1:
    672		case 0x5:
    673			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
    674			if (prot_type == TARGET_DIF_TYPE1_PROT)
    675				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
    676			break;
    677		case 0x2:
    678			if (prot_type == TARGET_DIF_TYPE1_PROT)
    679				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
    680			break;
    681		case 0x4:
    682			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
    683			break;
    684		default:
    685			pr_err("Unsupported protect field %d\n", protect);
    686			return -EINVAL;
    687		}
    688	} else {
    689		cmd->prot_op = fabric_prot ? TARGET_PROT_DIN_INSERT :
    690			       protect ? TARGET_PROT_DIN_PASS :
    691			       TARGET_PROT_DIN_STRIP;
    692		switch (protect) {
    693		case 0x0:
    694		case 0x1:
    695		case 0x5:
    696			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
    697			if (prot_type == TARGET_DIF_TYPE1_PROT)
    698				cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
    699			break;
    700		case 0x2:
    701			if (prot_type == TARGET_DIF_TYPE1_PROT)
    702				cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
    703			break;
    704		case 0x3:
    705			cmd->prot_checks = 0;
    706			break;
    707		case 0x4:
    708			cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
    709			break;
    710		default:
    711			pr_err("Unsupported protect field %d\n", protect);
    712			return -EINVAL;
    713		}
    714	}
    715
    716	return 0;
    717}
    718
    719static sense_reason_t
    720sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect,
    721	       u32 sectors, bool is_write)
    722{
    723	int sp_ops = cmd->se_sess->sup_prot_ops;
    724	int pi_prot_type = dev->dev_attrib.pi_prot_type;
    725	bool fabric_prot = false;
    726
    727	if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
    728		if (unlikely(protect &&
    729		    !dev->dev_attrib.pi_prot_type && !cmd->se_sess->sess_prot_type)) {
    730			pr_err("CDB contains protect bit, but device + fabric does"
    731			       " not advertise PROTECT=1 feature bit\n");
    732			return TCM_INVALID_CDB_FIELD;
    733		}
    734		if (cmd->prot_pto)
    735			return TCM_NO_SENSE;
    736	}
    737
    738	switch (dev->dev_attrib.pi_prot_type) {
    739	case TARGET_DIF_TYPE3_PROT:
    740		cmd->reftag_seed = 0xffffffff;
    741		break;
    742	case TARGET_DIF_TYPE2_PROT:
    743		if (protect)
    744			return TCM_INVALID_CDB_FIELD;
    745
    746		cmd->reftag_seed = cmd->t_task_lba;
    747		break;
    748	case TARGET_DIF_TYPE1_PROT:
    749		cmd->reftag_seed = cmd->t_task_lba;
    750		break;
    751	case TARGET_DIF_TYPE0_PROT:
    752		/*
    753		 * See if the fabric supports T10-PI, and the session has been
    754		 * configured to allow export PROTECT=1 feature bit with backend
    755		 * devices that don't support T10-PI.
    756		 */
    757		fabric_prot = is_write ?
    758			      !!(sp_ops & (TARGET_PROT_DOUT_PASS | TARGET_PROT_DOUT_STRIP)) :
    759			      !!(sp_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DIN_INSERT));
    760
    761		if (fabric_prot && cmd->se_sess->sess_prot_type) {
    762			pi_prot_type = cmd->se_sess->sess_prot_type;
    763			break;
    764		}
    765		if (!protect)
    766			return TCM_NO_SENSE;
    767		fallthrough;
    768	default:
    769		pr_err("Unable to determine pi_prot_type for CDB: 0x%02x "
    770		       "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect);
    771		return TCM_INVALID_CDB_FIELD;
    772	}
    773
    774	if (sbc_set_prot_op_checks(protect, fabric_prot, pi_prot_type, is_write, cmd))
    775		return TCM_INVALID_CDB_FIELD;
    776
    777	cmd->prot_type = pi_prot_type;
    778	cmd->prot_length = dev->prot_length * sectors;
    779
    780	/**
    781	 * In case protection information exists over the wire
    782	 * we modify command data length to describe pure data.
    783	 * The actual transfer length is data length + protection
    784	 * length
    785	 **/
    786	if (protect)
    787		cmd->data_length = sectors * dev->dev_attrib.block_size;
    788
    789	pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
    790		 "prot_op=%d prot_checks=%d\n",
    791		 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
    792		 cmd->prot_op, cmd->prot_checks);
    793
    794	return TCM_NO_SENSE;
    795}
    796
    797static int
    798sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
    799{
    800	if (cdb[1] & 0x10) {
    801		/* see explanation in spc_emulate_modesense */
    802		if (!target_check_fua(dev)) {
    803			pr_err("Got CDB: 0x%02x with DPO bit set, but device"
    804			       " does not advertise support for DPO\n", cdb[0]);
    805			return -EINVAL;
    806		}
    807	}
    808	if (cdb[1] & 0x8) {
    809		if (!target_check_fua(dev)) {
    810			pr_err("Got CDB: 0x%02x with FUA bit set, but device"
    811			       " does not advertise support for FUA write\n",
    812			       cdb[0]);
    813			return -EINVAL;
    814		}
    815		cmd->se_cmd_flags |= SCF_FUA;
    816	}
    817	return 0;
    818}
    819
    820sense_reason_t
    821sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
    822{
    823	struct se_device *dev = cmd->se_dev;
    824	unsigned char *cdb = cmd->t_task_cdb;
    825	unsigned int size;
    826	u32 sectors = 0;
    827	sense_reason_t ret;
    828
    829	cmd->protocol_data = ops;
    830
    831	switch (cdb[0]) {
    832	case READ_6:
    833		sectors = transport_get_sectors_6(cdb);
    834		cmd->t_task_lba = transport_lba_21(cdb);
    835		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    836		cmd->execute_cmd = sbc_execute_rw;
    837		break;
    838	case READ_10:
    839		sectors = transport_get_sectors_10(cdb);
    840		cmd->t_task_lba = transport_lba_32(cdb);
    841
    842		if (sbc_check_dpofua(dev, cmd, cdb))
    843			return TCM_INVALID_CDB_FIELD;
    844
    845		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
    846		if (ret)
    847			return ret;
    848
    849		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    850		cmd->execute_cmd = sbc_execute_rw;
    851		break;
    852	case READ_12:
    853		sectors = transport_get_sectors_12(cdb);
    854		cmd->t_task_lba = transport_lba_32(cdb);
    855
    856		if (sbc_check_dpofua(dev, cmd, cdb))
    857			return TCM_INVALID_CDB_FIELD;
    858
    859		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
    860		if (ret)
    861			return ret;
    862
    863		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    864		cmd->execute_cmd = sbc_execute_rw;
    865		break;
    866	case READ_16:
    867		sectors = transport_get_sectors_16(cdb);
    868		cmd->t_task_lba = transport_lba_64(cdb);
    869
    870		if (sbc_check_dpofua(dev, cmd, cdb))
    871			return TCM_INVALID_CDB_FIELD;
    872
    873		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false);
    874		if (ret)
    875			return ret;
    876
    877		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    878		cmd->execute_cmd = sbc_execute_rw;
    879		break;
    880	case WRITE_6:
    881		sectors = transport_get_sectors_6(cdb);
    882		cmd->t_task_lba = transport_lba_21(cdb);
    883		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    884		cmd->execute_cmd = sbc_execute_rw;
    885		break;
    886	case WRITE_10:
    887	case WRITE_VERIFY:
    888		sectors = transport_get_sectors_10(cdb);
    889		cmd->t_task_lba = transport_lba_32(cdb);
    890
    891		if (sbc_check_dpofua(dev, cmd, cdb))
    892			return TCM_INVALID_CDB_FIELD;
    893
    894		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
    895		if (ret)
    896			return ret;
    897
    898		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    899		cmd->execute_cmd = sbc_execute_rw;
    900		break;
    901	case WRITE_12:
    902		sectors = transport_get_sectors_12(cdb);
    903		cmd->t_task_lba = transport_lba_32(cdb);
    904
    905		if (sbc_check_dpofua(dev, cmd, cdb))
    906			return TCM_INVALID_CDB_FIELD;
    907
    908		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
    909		if (ret)
    910			return ret;
    911
    912		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    913		cmd->execute_cmd = sbc_execute_rw;
    914		break;
    915	case WRITE_16:
    916	case WRITE_VERIFY_16:
    917		sectors = transport_get_sectors_16(cdb);
    918		cmd->t_task_lba = transport_lba_64(cdb);
    919
    920		if (sbc_check_dpofua(dev, cmd, cdb))
    921			return TCM_INVALID_CDB_FIELD;
    922
    923		ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true);
    924		if (ret)
    925			return ret;
    926
    927		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    928		cmd->execute_cmd = sbc_execute_rw;
    929		break;
    930	case XDWRITEREAD_10:
    931		if (cmd->data_direction != DMA_TO_DEVICE ||
    932		    !(cmd->se_cmd_flags & SCF_BIDI))
    933			return TCM_INVALID_CDB_FIELD;
    934		sectors = transport_get_sectors_10(cdb);
    935
    936		if (sbc_check_dpofua(dev, cmd, cdb))
    937			return TCM_INVALID_CDB_FIELD;
    938
    939		cmd->t_task_lba = transport_lba_32(cdb);
    940		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    941
    942		/*
    943		 * Setup BIDI XOR callback to be run after I/O completion.
    944		 */
    945		cmd->execute_cmd = sbc_execute_rw;
    946		cmd->transport_complete_callback = &xdreadwrite_callback;
    947		break;
    948	case VARIABLE_LENGTH_CMD:
    949	{
    950		u16 service_action = get_unaligned_be16(&cdb[8]);
    951		switch (service_action) {
    952		case XDWRITEREAD_32:
    953			sectors = transport_get_sectors_32(cdb);
    954
    955			if (sbc_check_dpofua(dev, cmd, cdb))
    956				return TCM_INVALID_CDB_FIELD;
    957			/*
    958			 * Use WRITE_32 and READ_32 opcodes for the emulated
    959			 * XDWRITE_READ_32 logic.
    960			 */
    961			cmd->t_task_lba = transport_lba_64_ext(cdb);
    962			cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
    963
    964			/*
    965			 * Setup BIDI XOR callback to be run during after I/O
    966			 * completion.
    967			 */
    968			cmd->execute_cmd = sbc_execute_rw;
    969			cmd->transport_complete_callback = &xdreadwrite_callback;
    970			break;
    971		case WRITE_SAME_32:
    972			sectors = transport_get_sectors_32(cdb);
    973			if (!sectors) {
    974				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
    975				       " supported\n");
    976				return TCM_INVALID_CDB_FIELD;
    977			}
    978
    979			size = sbc_get_size(cmd, 1);
    980			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
    981
    982			ret = sbc_setup_write_same(cmd, cdb[10], ops);
    983			if (ret)
    984				return ret;
    985			break;
    986		default:
    987			pr_err("VARIABLE_LENGTH_CMD service action"
    988				" 0x%04x not supported\n", service_action);
    989			return TCM_UNSUPPORTED_SCSI_OPCODE;
    990		}
    991		break;
    992	}
    993	case COMPARE_AND_WRITE:
    994		if (!dev->dev_attrib.emulate_caw) {
    995			pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n",
    996					   dev->se_hba->backend->ops->name,
    997					   config_item_name(&dev->dev_group.cg_item),
    998					   dev->t10_wwn.unit_serial);
    999			return TCM_UNSUPPORTED_SCSI_OPCODE;
   1000		}
   1001		sectors = cdb[13];
   1002		/*
   1003		 * Currently enforce COMPARE_AND_WRITE for a single sector
   1004		 */
   1005		if (sectors > 1) {
   1006			pr_err("COMPARE_AND_WRITE contains NoLB: %u greater"
   1007			       " than 1\n", sectors);
   1008			return TCM_INVALID_CDB_FIELD;
   1009		}
   1010		if (sbc_check_dpofua(dev, cmd, cdb))
   1011			return TCM_INVALID_CDB_FIELD;
   1012
   1013		/*
   1014		 * Double size because we have two buffers, note that
   1015		 * zero is not an error..
   1016		 */
   1017		size = 2 * sbc_get_size(cmd, sectors);
   1018		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
   1019		cmd->t_task_nolb = sectors;
   1020		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB | SCF_COMPARE_AND_WRITE;
   1021		cmd->execute_cmd = sbc_compare_and_write;
   1022		cmd->transport_complete_callback = compare_and_write_callback;
   1023		break;
   1024	case READ_CAPACITY:
   1025		size = READ_CAP_LEN;
   1026		cmd->execute_cmd = sbc_emulate_readcapacity;
   1027		break;
   1028	case SERVICE_ACTION_IN_16:
   1029		switch (cmd->t_task_cdb[1] & 0x1f) {
   1030		case SAI_READ_CAPACITY_16:
   1031			cmd->execute_cmd = sbc_emulate_readcapacity_16;
   1032			break;
   1033		case SAI_REPORT_REFERRALS:
   1034			cmd->execute_cmd = target_emulate_report_referrals;
   1035			break;
   1036		default:
   1037			pr_err("Unsupported SA: 0x%02x\n",
   1038				cmd->t_task_cdb[1] & 0x1f);
   1039			return TCM_INVALID_CDB_FIELD;
   1040		}
   1041		size = get_unaligned_be32(&cdb[10]);
   1042		break;
   1043	case SYNCHRONIZE_CACHE:
   1044	case SYNCHRONIZE_CACHE_16:
   1045		if (cdb[0] == SYNCHRONIZE_CACHE) {
   1046			sectors = transport_get_sectors_10(cdb);
   1047			cmd->t_task_lba = transport_lba_32(cdb);
   1048		} else {
   1049			sectors = transport_get_sectors_16(cdb);
   1050			cmd->t_task_lba = transport_lba_64(cdb);
   1051		}
   1052		if (ops->execute_sync_cache) {
   1053			cmd->execute_cmd = ops->execute_sync_cache;
   1054			goto check_lba;
   1055		}
   1056		size = 0;
   1057		cmd->execute_cmd = sbc_emulate_noop;
   1058		break;
   1059	case UNMAP:
   1060		if (!ops->execute_unmap)
   1061			return TCM_UNSUPPORTED_SCSI_OPCODE;
   1062
   1063		if (!dev->dev_attrib.emulate_tpu) {
   1064			pr_err("Got UNMAP, but backend device has"
   1065			       " emulate_tpu disabled\n");
   1066			return TCM_UNSUPPORTED_SCSI_OPCODE;
   1067		}
   1068		size = get_unaligned_be16(&cdb[7]);
   1069		cmd->execute_cmd = sbc_execute_unmap;
   1070		break;
   1071	case WRITE_SAME_16:
   1072		sectors = transport_get_sectors_16(cdb);
   1073		if (!sectors) {
   1074			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
   1075			return TCM_INVALID_CDB_FIELD;
   1076		}
   1077
   1078		size = sbc_get_size(cmd, 1);
   1079		cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
   1080
   1081		ret = sbc_setup_write_same(cmd, cdb[1], ops);
   1082		if (ret)
   1083			return ret;
   1084		break;
   1085	case WRITE_SAME:
   1086		sectors = transport_get_sectors_10(cdb);
   1087		if (!sectors) {
   1088			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
   1089			return TCM_INVALID_CDB_FIELD;
   1090		}
   1091
   1092		size = sbc_get_size(cmd, 1);
   1093		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
   1094
   1095		/*
   1096		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence
   1097		 * of byte 1 bit 3 UNMAP instead of original reserved field
   1098		 */
   1099		ret = sbc_setup_write_same(cmd, cdb[1], ops);
   1100		if (ret)
   1101			return ret;
   1102		break;
   1103	case VERIFY:
   1104	case VERIFY_16:
   1105		size = 0;
   1106		if (cdb[0] == VERIFY) {
   1107			sectors = transport_get_sectors_10(cdb);
   1108			cmd->t_task_lba = transport_lba_32(cdb);
   1109		} else {
   1110			sectors = transport_get_sectors_16(cdb);
   1111			cmd->t_task_lba = transport_lba_64(cdb);
   1112		}
   1113		cmd->execute_cmd = sbc_emulate_noop;
   1114		goto check_lba;
   1115	case REZERO_UNIT:
   1116	case SEEK_6:
   1117	case SEEK_10:
   1118		/*
   1119		 * There are still clients out there which use these old SCSI-2
   1120		 * commands. This mainly happens when running VMs with legacy
   1121		 * guest systems, connected via SCSI command pass-through to
   1122		 * iSCSI targets. Make them happy and return status GOOD.
   1123		 */
   1124		size = 0;
   1125		cmd->execute_cmd = sbc_emulate_noop;
   1126		break;
   1127	case START_STOP:
   1128		size = 0;
   1129		cmd->execute_cmd = sbc_emulate_startstop;
   1130		break;
   1131	default:
   1132		ret = spc_parse_cdb(cmd, &size);
   1133		if (ret)
   1134			return ret;
   1135	}
   1136
   1137	/* reject any command that we don't have a handler for */
   1138	if (!cmd->execute_cmd)
   1139		return TCM_UNSUPPORTED_SCSI_OPCODE;
   1140
   1141	if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
   1142		unsigned long long end_lba;
   1143check_lba:
   1144		end_lba = dev->transport->get_blocks(dev) + 1;
   1145		if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
   1146		    ((cmd->t_task_lba + sectors) > end_lba)) {
   1147			pr_err("cmd exceeds last lba %llu "
   1148				"(lba %llu, sectors %u)\n",
   1149				end_lba, cmd->t_task_lba, sectors);
   1150			return TCM_ADDRESS_OUT_OF_RANGE;
   1151		}
   1152
   1153		if (!(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE))
   1154			size = sbc_get_size(cmd, sectors);
   1155	}
   1156
   1157	return target_cmd_size_check(cmd, size);
   1158}
   1159EXPORT_SYMBOL(sbc_parse_cdb);
   1160
   1161u32 sbc_get_device_type(struct se_device *dev)
   1162{
   1163	return TYPE_DISK;
   1164}
   1165EXPORT_SYMBOL(sbc_get_device_type);
   1166
   1167static sense_reason_t
   1168sbc_execute_unmap(struct se_cmd *cmd)
   1169{
   1170	struct sbc_ops *ops = cmd->protocol_data;
   1171	struct se_device *dev = cmd->se_dev;
   1172	unsigned char *buf, *ptr = NULL;
   1173	sector_t lba;
   1174	int size;
   1175	u32 range;
   1176	sense_reason_t ret = 0;
   1177	int dl, bd_dl;
   1178
   1179	/* We never set ANC_SUP */
   1180	if (cmd->t_task_cdb[1])
   1181		return TCM_INVALID_CDB_FIELD;
   1182
   1183	if (cmd->data_length == 0) {
   1184		target_complete_cmd(cmd, SAM_STAT_GOOD);
   1185		return 0;
   1186	}
   1187
   1188	if (cmd->data_length < 8) {
   1189		pr_warn("UNMAP parameter list length %u too small\n",
   1190			cmd->data_length);
   1191		return TCM_PARAMETER_LIST_LENGTH_ERROR;
   1192	}
   1193
   1194	buf = transport_kmap_data_sg(cmd);
   1195	if (!buf)
   1196		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
   1197
   1198	dl = get_unaligned_be16(&buf[0]);
   1199	bd_dl = get_unaligned_be16(&buf[2]);
   1200
   1201	size = cmd->data_length - 8;
   1202	if (bd_dl > size)
   1203		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
   1204			cmd->data_length, bd_dl);
   1205	else
   1206		size = bd_dl;
   1207
   1208	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
   1209		ret = TCM_INVALID_PARAMETER_LIST;
   1210		goto err;
   1211	}
   1212
   1213	/* First UNMAP block descriptor starts at 8 byte offset */
   1214	ptr = &buf[8];
   1215	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
   1216		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
   1217
   1218	while (size >= 16) {
   1219		lba = get_unaligned_be64(&ptr[0]);
   1220		range = get_unaligned_be32(&ptr[8]);
   1221		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
   1222				 (unsigned long long)lba, range);
   1223
   1224		if (range > dev->dev_attrib.max_unmap_lba_count) {
   1225			ret = TCM_INVALID_PARAMETER_LIST;
   1226			goto err;
   1227		}
   1228
   1229		if (lba + range > dev->transport->get_blocks(dev) + 1) {
   1230			ret = TCM_ADDRESS_OUT_OF_RANGE;
   1231			goto err;
   1232		}
   1233
   1234		if (range) {
   1235			ret = ops->execute_unmap(cmd, lba, range);
   1236			if (ret)
   1237				goto err;
   1238		}
   1239
   1240		ptr += 16;
   1241		size -= 16;
   1242	}
   1243
   1244err:
   1245	transport_kunmap_data_sg(cmd);
   1246	if (!ret)
   1247		target_complete_cmd(cmd, SAM_STAT_GOOD);
   1248	return ret;
   1249}
   1250
   1251void
   1252sbc_dif_generate(struct se_cmd *cmd)
   1253{
   1254	struct se_device *dev = cmd->se_dev;
   1255	struct t10_pi_tuple *sdt;
   1256	struct scatterlist *dsg = cmd->t_data_sg, *psg;
   1257	sector_t sector = cmd->t_task_lba;
   1258	void *daddr, *paddr;
   1259	int i, j, offset = 0;
   1260	unsigned int block_size = dev->dev_attrib.block_size;
   1261
   1262	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
   1263		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
   1264		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1265
   1266		for (j = 0; j < psg->length;
   1267				j += sizeof(*sdt)) {
   1268			__u16 crc;
   1269			unsigned int avail;
   1270
   1271			if (offset >= dsg->length) {
   1272				offset -= dsg->length;
   1273				kunmap_atomic(daddr - dsg->offset);
   1274				dsg = sg_next(dsg);
   1275				if (!dsg) {
   1276					kunmap_atomic(paddr - psg->offset);
   1277					return;
   1278				}
   1279				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1280			}
   1281
   1282			sdt = paddr + j;
   1283			avail = min(block_size, dsg->length - offset);
   1284			crc = crc_t10dif(daddr + offset, avail);
   1285			if (avail < block_size) {
   1286				kunmap_atomic(daddr - dsg->offset);
   1287				dsg = sg_next(dsg);
   1288				if (!dsg) {
   1289					kunmap_atomic(paddr - psg->offset);
   1290					return;
   1291				}
   1292				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1293				offset = block_size - avail;
   1294				crc = crc_t10dif_update(crc, daddr, offset);
   1295			} else {
   1296				offset += block_size;
   1297			}
   1298
   1299			sdt->guard_tag = cpu_to_be16(crc);
   1300			if (cmd->prot_type == TARGET_DIF_TYPE1_PROT)
   1301				sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
   1302			sdt->app_tag = 0;
   1303
   1304			pr_debug("DIF %s INSERT sector: %llu guard_tag: 0x%04x"
   1305				 " app_tag: 0x%04x ref_tag: %u\n",
   1306				 (cmd->data_direction == DMA_TO_DEVICE) ?
   1307				 "WRITE" : "READ", (unsigned long long)sector,
   1308				 sdt->guard_tag, sdt->app_tag,
   1309				 be32_to_cpu(sdt->ref_tag));
   1310
   1311			sector++;
   1312		}
   1313
   1314		kunmap_atomic(daddr - dsg->offset);
   1315		kunmap_atomic(paddr - psg->offset);
   1316	}
   1317}
   1318
   1319static sense_reason_t
   1320sbc_dif_v1_verify(struct se_cmd *cmd, struct t10_pi_tuple *sdt,
   1321		  __u16 crc, sector_t sector, unsigned int ei_lba)
   1322{
   1323	__be16 csum;
   1324
   1325	if (!(cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
   1326		goto check_ref;
   1327
   1328	csum = cpu_to_be16(crc);
   1329
   1330	if (sdt->guard_tag != csum) {
   1331		pr_err("DIFv1 checksum failed on sector %llu guard tag 0x%04x"
   1332			" csum 0x%04x\n", (unsigned long long)sector,
   1333			be16_to_cpu(sdt->guard_tag), be16_to_cpu(csum));
   1334		return TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
   1335	}
   1336
   1337check_ref:
   1338	if (!(cmd->prot_checks & TARGET_DIF_CHECK_REFTAG))
   1339		return 0;
   1340
   1341	if (cmd->prot_type == TARGET_DIF_TYPE1_PROT &&
   1342	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
   1343		pr_err("DIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x"
   1344		       " sector MSB: 0x%08x\n", (unsigned long long)sector,
   1345		       be32_to_cpu(sdt->ref_tag), (u32)(sector & 0xffffffff));
   1346		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
   1347	}
   1348
   1349	if (cmd->prot_type == TARGET_DIF_TYPE2_PROT &&
   1350	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
   1351		pr_err("DIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x"
   1352		       " ei_lba: 0x%08x\n", (unsigned long long)sector,
   1353			be32_to_cpu(sdt->ref_tag), ei_lba);
   1354		return TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
   1355	}
   1356
   1357	return 0;
   1358}
   1359
   1360void sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
   1361		       struct scatterlist *sg, int sg_off)
   1362{
   1363	struct se_device *dev = cmd->se_dev;
   1364	struct scatterlist *psg;
   1365	void *paddr, *addr;
   1366	unsigned int i, len, left;
   1367	unsigned int offset = sg_off;
   1368
   1369	if (!sg)
   1370		return;
   1371
   1372	left = sectors * dev->prot_length;
   1373
   1374	for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
   1375		unsigned int psg_len, copied = 0;
   1376
   1377		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
   1378		psg_len = min(left, psg->length);
   1379		while (psg_len) {
   1380			len = min(psg_len, sg->length - offset);
   1381			addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
   1382
   1383			if (read)
   1384				memcpy(paddr + copied, addr, len);
   1385			else
   1386				memcpy(addr, paddr + copied, len);
   1387
   1388			left -= len;
   1389			offset += len;
   1390			copied += len;
   1391			psg_len -= len;
   1392
   1393			kunmap_atomic(addr - sg->offset - offset);
   1394
   1395			if (offset >= sg->length) {
   1396				sg = sg_next(sg);
   1397				offset = 0;
   1398			}
   1399		}
   1400		kunmap_atomic(paddr - psg->offset);
   1401	}
   1402}
   1403EXPORT_SYMBOL(sbc_dif_copy_prot);
   1404
   1405sense_reason_t
   1406sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors,
   1407	       unsigned int ei_lba, struct scatterlist *psg, int psg_off)
   1408{
   1409	struct se_device *dev = cmd->se_dev;
   1410	struct t10_pi_tuple *sdt;
   1411	struct scatterlist *dsg = cmd->t_data_sg;
   1412	sector_t sector = start;
   1413	void *daddr, *paddr;
   1414	int i;
   1415	sense_reason_t rc;
   1416	int dsg_off = 0;
   1417	unsigned int block_size = dev->dev_attrib.block_size;
   1418
   1419	for (; psg && sector < start + sectors; psg = sg_next(psg)) {
   1420		paddr = kmap_atomic(sg_page(psg)) + psg->offset;
   1421		daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1422
   1423		for (i = psg_off; i < psg->length &&
   1424				sector < start + sectors;
   1425				i += sizeof(*sdt)) {
   1426			__u16 crc;
   1427			unsigned int avail;
   1428
   1429			if (dsg_off >= dsg->length) {
   1430				dsg_off -= dsg->length;
   1431				kunmap_atomic(daddr - dsg->offset);
   1432				dsg = sg_next(dsg);
   1433				if (!dsg) {
   1434					kunmap_atomic(paddr - psg->offset);
   1435					return 0;
   1436				}
   1437				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1438			}
   1439
   1440			sdt = paddr + i;
   1441
   1442			pr_debug("DIF READ sector: %llu guard_tag: 0x%04x"
   1443				 " app_tag: 0x%04x ref_tag: %u\n",
   1444				 (unsigned long long)sector, sdt->guard_tag,
   1445				 sdt->app_tag, be32_to_cpu(sdt->ref_tag));
   1446
   1447			if (sdt->app_tag == T10_PI_APP_ESCAPE) {
   1448				dsg_off += block_size;
   1449				goto next;
   1450			}
   1451
   1452			avail = min(block_size, dsg->length - dsg_off);
   1453			crc = crc_t10dif(daddr + dsg_off, avail);
   1454			if (avail < block_size) {
   1455				kunmap_atomic(daddr - dsg->offset);
   1456				dsg = sg_next(dsg);
   1457				if (!dsg) {
   1458					kunmap_atomic(paddr - psg->offset);
   1459					return 0;
   1460				}
   1461				daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
   1462				dsg_off = block_size - avail;
   1463				crc = crc_t10dif_update(crc, daddr, dsg_off);
   1464			} else {
   1465				dsg_off += block_size;
   1466			}
   1467
   1468			rc = sbc_dif_v1_verify(cmd, sdt, crc, sector, ei_lba);
   1469			if (rc) {
   1470				kunmap_atomic(daddr - dsg->offset);
   1471				kunmap_atomic(paddr - psg->offset);
   1472				cmd->sense_info = sector;
   1473				return rc;
   1474			}
   1475next:
   1476			sector++;
   1477			ei_lba++;
   1478		}
   1479
   1480		psg_off = 0;
   1481		kunmap_atomic(daddr - dsg->offset);
   1482		kunmap_atomic(paddr - psg->offset);
   1483	}
   1484
   1485	return 0;
   1486}
   1487EXPORT_SYMBOL(sbc_dif_verify);