cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_file.c (24744B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * Filename:  target_core_file.c
      4 *
      5 * This file contains the Storage Engine <-> FILEIO transport specific functions
      6 *
      7 * (c) Copyright 2005-2013 Datera, Inc.
      8 *
      9 * Nicholas A. Bellinger <nab@kernel.org>
     10 *
     11 ******************************************************************************/
     12
     13#include <linux/string.h>
     14#include <linux/parser.h>
     15#include <linux/timer.h>
     16#include <linux/blkdev.h>
     17#include <linux/slab.h>
     18#include <linux/spinlock.h>
     19#include <linux/module.h>
     20#include <linux/vmalloc.h>
     21#include <linux/falloc.h>
     22#include <linux/uio.h>
     23#include <linux/scatterlist.h>
     24#include <scsi/scsi_proto.h>
     25#include <asm/unaligned.h>
     26
     27#include <target/target_core_base.h>
     28#include <target/target_core_backend.h>
     29
     30#include "target_core_file.h"
     31
     32static inline struct fd_dev *FD_DEV(struct se_device *dev)
     33{
     34	return container_of(dev, struct fd_dev, dev);
     35}
     36
     37static int fd_attach_hba(struct se_hba *hba, u32 host_id)
     38{
     39	struct fd_host *fd_host;
     40
     41	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
     42	if (!fd_host) {
     43		pr_err("Unable to allocate memory for struct fd_host\n");
     44		return -ENOMEM;
     45	}
     46
     47	fd_host->fd_host_id = host_id;
     48
     49	hba->hba_ptr = fd_host;
     50
     51	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
     52		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
     53		TARGET_CORE_VERSION);
     54	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
     55		hba->hba_id, fd_host->fd_host_id);
     56
     57	return 0;
     58}
     59
     60static void fd_detach_hba(struct se_hba *hba)
     61{
     62	struct fd_host *fd_host = hba->hba_ptr;
     63
     64	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
     65		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
     66
     67	kfree(fd_host);
     68	hba->hba_ptr = NULL;
     69}
     70
     71static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
     72{
     73	struct fd_dev *fd_dev;
     74	struct fd_host *fd_host = hba->hba_ptr;
     75
     76	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
     77	if (!fd_dev) {
     78		pr_err("Unable to allocate memory for struct fd_dev\n");
     79		return NULL;
     80	}
     81
     82	fd_dev->fd_host = fd_host;
     83
     84	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
     85
     86	return &fd_dev->dev;
     87}
     88
     89static int fd_configure_device(struct se_device *dev)
     90{
     91	struct fd_dev *fd_dev = FD_DEV(dev);
     92	struct fd_host *fd_host = dev->se_hba->hba_ptr;
     93	struct file *file;
     94	struct inode *inode = NULL;
     95	int flags, ret = -EINVAL;
     96
     97	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
     98		pr_err("Missing fd_dev_name=\n");
     99		return -EINVAL;
    100	}
    101
    102	/*
    103	 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
    104	 * of pure timestamp updates.
    105	 */
    106	flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
    107
    108	/*
    109	 * Optionally allow fd_buffered_io=1 to be enabled for people
    110	 * who want use the fs buffer cache as an WriteCache mechanism.
    111	 *
    112	 * This means that in event of a hard failure, there is a risk
    113	 * of silent data-loss if the SCSI client has *not* performed a
    114	 * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
    115	 * to write-out the entire device cache.
    116	 */
    117	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
    118		pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
    119		flags &= ~O_DSYNC;
    120	}
    121
    122	file = filp_open(fd_dev->fd_dev_name, flags, 0600);
    123	if (IS_ERR(file)) {
    124		pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
    125		ret = PTR_ERR(file);
    126		goto fail;
    127	}
    128	fd_dev->fd_file = file;
    129	/*
    130	 * If using a block backend with this struct file, we extract
    131	 * fd_dev->fd_[block,dev]_size from struct block_device.
    132	 *
    133	 * Otherwise, we use the passed fd_size= from configfs
    134	 */
    135	inode = file->f_mapping->host;
    136	if (S_ISBLK(inode->i_mode)) {
    137		struct block_device *bdev = I_BDEV(inode);
    138		unsigned long long dev_size;
    139
    140		fd_dev->fd_block_size = bdev_logical_block_size(bdev);
    141		/*
    142		 * Determine the number of bytes from i_size_read() minus
    143		 * one (1) logical sector from underlying struct block_device
    144		 */
    145		dev_size = (i_size_read(file->f_mapping->host) -
    146				       fd_dev->fd_block_size);
    147
    148		pr_debug("FILEIO: Using size: %llu bytes from struct"
    149			" block_device blocks: %llu logical_block_size: %d\n",
    150			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
    151			fd_dev->fd_block_size);
    152
    153		if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
    154			pr_debug("IFILE: BLOCK Discard support available,"
    155				 " disabled by default\n");
    156		/*
    157		 * Enable write same emulation for IBLOCK and use 0xFFFF as
    158		 * the smaller WRITE_SAME(10) only has a two-byte block count.
    159		 */
    160		dev->dev_attrib.max_write_same_len = 0xFFFF;
    161
    162		if (bdev_nonrot(bdev))
    163			dev->dev_attrib.is_nonrot = 1;
    164	} else {
    165		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
    166			pr_err("FILEIO: Missing fd_dev_size="
    167				" parameter, and no backing struct"
    168				" block_device\n");
    169			goto fail;
    170		}
    171
    172		fd_dev->fd_block_size = FD_BLOCKSIZE;
    173		/*
    174		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
    175		 */
    176		dev->dev_attrib.max_unmap_lba_count = 0x2000;
    177		/*
    178		 * Currently hardcoded to 1 in Linux/SCSI code..
    179		 */
    180		dev->dev_attrib.max_unmap_block_desc_count = 1;
    181		dev->dev_attrib.unmap_granularity = 1;
    182		dev->dev_attrib.unmap_granularity_alignment = 0;
    183
    184		/*
    185		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
    186		 * based upon struct iovec limit for vfs_writev()
    187		 */
    188		dev->dev_attrib.max_write_same_len = 0x1000;
    189	}
    190
    191	dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
    192	dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
    193	dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
    194	dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
    195
    196	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
    197		pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
    198			" with FDBD_HAS_BUFFERED_IO_WCE\n");
    199		dev->dev_attrib.emulate_write_cache = 1;
    200	}
    201
    202	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
    203	fd_dev->fd_queue_depth = dev->queue_depth;
    204
    205	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
    206		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
    207			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
    208
    209	return 0;
    210fail:
    211	if (fd_dev->fd_file) {
    212		filp_close(fd_dev->fd_file, NULL);
    213		fd_dev->fd_file = NULL;
    214	}
    215	return ret;
    216}
    217
    218static void fd_dev_call_rcu(struct rcu_head *p)
    219{
    220	struct se_device *dev = container_of(p, struct se_device, rcu_head);
    221	struct fd_dev *fd_dev = FD_DEV(dev);
    222
    223	kfree(fd_dev);
    224}
    225
    226static void fd_free_device(struct se_device *dev)
    227{
    228	call_rcu(&dev->rcu_head, fd_dev_call_rcu);
    229}
    230
    231static void fd_destroy_device(struct se_device *dev)
    232{
    233	struct fd_dev *fd_dev = FD_DEV(dev);
    234
    235	if (fd_dev->fd_file) {
    236		filp_close(fd_dev->fd_file, NULL);
    237		fd_dev->fd_file = NULL;
    238	}
    239}
    240
    241struct target_core_file_cmd {
    242	unsigned long	len;
    243	struct se_cmd	*cmd;
    244	struct kiocb	iocb;
    245	struct bio_vec	bvecs[];
    246};
    247
    248static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
    249{
    250	struct target_core_file_cmd *cmd;
    251
    252	cmd = container_of(iocb, struct target_core_file_cmd, iocb);
    253
    254	if (ret != cmd->len)
    255		target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
    256	else
    257		target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
    258
    259	kfree(cmd);
    260}
    261
    262static sense_reason_t
    263fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
    264	      enum dma_data_direction data_direction)
    265{
    266	int is_write = !(data_direction == DMA_FROM_DEVICE);
    267	struct se_device *dev = cmd->se_dev;
    268	struct fd_dev *fd_dev = FD_DEV(dev);
    269	struct file *file = fd_dev->fd_file;
    270	struct target_core_file_cmd *aio_cmd;
    271	struct iov_iter iter;
    272	struct scatterlist *sg;
    273	ssize_t len = 0;
    274	int ret = 0, i;
    275
    276	aio_cmd = kmalloc(struct_size(aio_cmd, bvecs, sgl_nents), GFP_KERNEL);
    277	if (!aio_cmd)
    278		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    279
    280	for_each_sg(sgl, sg, sgl_nents, i) {
    281		aio_cmd->bvecs[i].bv_page = sg_page(sg);
    282		aio_cmd->bvecs[i].bv_len = sg->length;
    283		aio_cmd->bvecs[i].bv_offset = sg->offset;
    284
    285		len += sg->length;
    286	}
    287
    288	iov_iter_bvec(&iter, is_write, aio_cmd->bvecs, sgl_nents, len);
    289
    290	aio_cmd->cmd = cmd;
    291	aio_cmd->len = len;
    292	aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
    293	aio_cmd->iocb.ki_filp = file;
    294	aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
    295	aio_cmd->iocb.ki_flags = IOCB_DIRECT;
    296
    297	if (is_write && (cmd->se_cmd_flags & SCF_FUA))
    298		aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
    299
    300	if (is_write)
    301		ret = call_write_iter(file, &aio_cmd->iocb, &iter);
    302	else
    303		ret = call_read_iter(file, &aio_cmd->iocb, &iter);
    304
    305	if (ret != -EIOCBQUEUED)
    306		cmd_rw_aio_complete(&aio_cmd->iocb, ret);
    307
    308	return 0;
    309}
    310
    311static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
    312		    u32 block_size, struct scatterlist *sgl,
    313		    u32 sgl_nents, u32 data_length, int is_write)
    314{
    315	struct scatterlist *sg;
    316	struct iov_iter iter;
    317	struct bio_vec *bvec;
    318	ssize_t len = 0;
    319	loff_t pos = (cmd->t_task_lba * block_size);
    320	int ret = 0, i;
    321
    322	bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
    323	if (!bvec) {
    324		pr_err("Unable to allocate fd_do_readv iov[]\n");
    325		return -ENOMEM;
    326	}
    327
    328	for_each_sg(sgl, sg, sgl_nents, i) {
    329		bvec[i].bv_page = sg_page(sg);
    330		bvec[i].bv_len = sg->length;
    331		bvec[i].bv_offset = sg->offset;
    332
    333		len += sg->length;
    334	}
    335
    336	iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
    337	if (is_write)
    338		ret = vfs_iter_write(fd, &iter, &pos, 0);
    339	else
    340		ret = vfs_iter_read(fd, &iter, &pos, 0);
    341
    342	if (is_write) {
    343		if (ret < 0 || ret != data_length) {
    344			pr_err("%s() write returned %d\n", __func__, ret);
    345			if (ret >= 0)
    346				ret = -EINVAL;
    347		}
    348	} else {
    349		/*
    350		 * Return zeros and GOOD status even if the READ did not return
    351		 * the expected virt_size for struct file w/o a backing struct
    352		 * block_device.
    353		 */
    354		if (S_ISBLK(file_inode(fd)->i_mode)) {
    355			if (ret < 0 || ret != data_length) {
    356				pr_err("%s() returned %d, expecting %u for "
    357						"S_ISBLK\n", __func__, ret,
    358						data_length);
    359				if (ret >= 0)
    360					ret = -EINVAL;
    361			}
    362		} else {
    363			if (ret < 0) {
    364				pr_err("%s() returned %d for non S_ISBLK\n",
    365						__func__, ret);
    366			} else if (ret != data_length) {
    367				/*
    368				 * Short read case:
    369				 * Probably some one truncate file under us.
    370				 * We must explicitly zero sg-pages to prevent
    371				 * expose uninizialized pages to userspace.
    372				 */
    373				if (ret < data_length)
    374					ret += iov_iter_zero(data_length - ret, &iter);
    375				else
    376					ret = -EINVAL;
    377			}
    378		}
    379	}
    380	kfree(bvec);
    381	return ret;
    382}
    383
    384static sense_reason_t
    385fd_execute_sync_cache(struct se_cmd *cmd)
    386{
    387	struct se_device *dev = cmd->se_dev;
    388	struct fd_dev *fd_dev = FD_DEV(dev);
    389	int immed = (cmd->t_task_cdb[1] & 0x2);
    390	loff_t start, end;
    391	int ret;
    392
    393	/*
    394	 * If the Immediate bit is set, queue up the GOOD response
    395	 * for this SYNCHRONIZE_CACHE op
    396	 */
    397	if (immed)
    398		target_complete_cmd(cmd, SAM_STAT_GOOD);
    399
    400	/*
    401	 * Determine if we will be flushing the entire device.
    402	 */
    403	if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
    404		start = 0;
    405		end = LLONG_MAX;
    406	} else {
    407		start = cmd->t_task_lba * dev->dev_attrib.block_size;
    408		if (cmd->data_length)
    409			end = start + cmd->data_length - 1;
    410		else
    411			end = LLONG_MAX;
    412	}
    413
    414	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
    415	if (ret != 0)
    416		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
    417
    418	if (immed)
    419		return 0;
    420
    421	if (ret)
    422		target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
    423	else
    424		target_complete_cmd(cmd, SAM_STAT_GOOD);
    425
    426	return 0;
    427}
    428
    429static sense_reason_t
    430fd_execute_write_same(struct se_cmd *cmd)
    431{
    432	struct se_device *se_dev = cmd->se_dev;
    433	struct fd_dev *fd_dev = FD_DEV(se_dev);
    434	loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
    435	sector_t nolb = sbc_get_write_same_sectors(cmd);
    436	struct iov_iter iter;
    437	struct bio_vec *bvec;
    438	unsigned int len = 0, i;
    439	ssize_t ret;
    440
    441	if (!nolb) {
    442		target_complete_cmd(cmd, SAM_STAT_GOOD);
    443		return 0;
    444	}
    445	if (cmd->prot_op) {
    446		pr_err("WRITE_SAME: Protection information with FILEIO"
    447		       " backends not supported\n");
    448		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    449	}
    450
    451	if (cmd->t_data_nents > 1 ||
    452	    cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
    453		pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
    454			" block_size: %u\n",
    455			cmd->t_data_nents,
    456			cmd->t_data_sg[0].length,
    457			cmd->se_dev->dev_attrib.block_size);
    458		return TCM_INVALID_CDB_FIELD;
    459	}
    460
    461	bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
    462	if (!bvec)
    463		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    464
    465	for (i = 0; i < nolb; i++) {
    466		bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
    467		bvec[i].bv_len = cmd->t_data_sg[0].length;
    468		bvec[i].bv_offset = cmd->t_data_sg[0].offset;
    469
    470		len += se_dev->dev_attrib.block_size;
    471	}
    472
    473	iov_iter_bvec(&iter, READ, bvec, nolb, len);
    474	ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
    475
    476	kfree(bvec);
    477	if (ret < 0 || ret != len) {
    478		pr_err("vfs_iter_write() returned %zd for write same\n", ret);
    479		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    480	}
    481
    482	target_complete_cmd(cmd, SAM_STAT_GOOD);
    483	return 0;
    484}
    485
    486static int
    487fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
    488		void *buf, size_t bufsize)
    489{
    490	struct fd_dev *fd_dev = FD_DEV(se_dev);
    491	struct file *prot_fd = fd_dev->fd_prot_file;
    492	sector_t prot_length, prot;
    493	loff_t pos = lba * se_dev->prot_length;
    494
    495	if (!prot_fd) {
    496		pr_err("Unable to locate fd_dev->fd_prot_file\n");
    497		return -ENODEV;
    498	}
    499
    500	prot_length = nolb * se_dev->prot_length;
    501
    502	memset(buf, 0xff, bufsize);
    503	for (prot = 0; prot < prot_length;) {
    504		sector_t len = min_t(sector_t, bufsize, prot_length - prot);
    505		ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
    506
    507		if (ret != len) {
    508			pr_err("vfs_write to prot file failed: %zd\n", ret);
    509			return ret < 0 ? ret : -ENODEV;
    510		}
    511		prot += ret;
    512	}
    513
    514	return 0;
    515}
    516
    517static int
    518fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
    519{
    520	void *buf;
    521	int rc;
    522
    523	buf = (void *)__get_free_page(GFP_KERNEL);
    524	if (!buf) {
    525		pr_err("Unable to allocate FILEIO prot buf\n");
    526		return -ENOMEM;
    527	}
    528
    529	rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
    530
    531	free_page((unsigned long)buf);
    532
    533	return rc;
    534}
    535
    536static sense_reason_t
    537fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
    538{
    539	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
    540	struct inode *inode = file->f_mapping->host;
    541	int ret;
    542
    543	if (!nolb) {
    544		return 0;
    545	}
    546
    547	if (cmd->se_dev->dev_attrib.pi_prot_type) {
    548		ret = fd_do_prot_unmap(cmd, lba, nolb);
    549		if (ret)
    550			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    551	}
    552
    553	if (S_ISBLK(inode->i_mode)) {
    554		/* The backend is block device, use discard */
    555		struct block_device *bdev = I_BDEV(inode);
    556		struct se_device *dev = cmd->se_dev;
    557
    558		ret = blkdev_issue_discard(bdev,
    559					   target_to_linux_sector(dev, lba),
    560					   target_to_linux_sector(dev,  nolb),
    561					   GFP_KERNEL);
    562		if (ret < 0) {
    563			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
    564				ret);
    565			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    566		}
    567	} else {
    568		/* The backend is normal file, use fallocate */
    569		struct se_device *se_dev = cmd->se_dev;
    570		loff_t pos = lba * se_dev->dev_attrib.block_size;
    571		unsigned int len = nolb * se_dev->dev_attrib.block_size;
    572		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
    573
    574		if (!file->f_op->fallocate)
    575			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    576
    577		ret = file->f_op->fallocate(file, mode, pos, len);
    578		if (ret < 0) {
    579			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
    580			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    581		}
    582	}
    583
    584	return 0;
    585}
    586
    587static sense_reason_t
    588fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
    589	      enum dma_data_direction data_direction)
    590{
    591	struct se_device *dev = cmd->se_dev;
    592	struct fd_dev *fd_dev = FD_DEV(dev);
    593	struct file *file = fd_dev->fd_file;
    594	struct file *pfile = fd_dev->fd_prot_file;
    595	sense_reason_t rc;
    596	int ret = 0;
    597	/*
    598	 * Call vectorized fileio functions to map struct scatterlist
    599	 * physical memory addresses to struct iovec virtual memory.
    600	 */
    601	if (data_direction == DMA_FROM_DEVICE) {
    602		if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
    603			ret = fd_do_rw(cmd, pfile, dev->prot_length,
    604				       cmd->t_prot_sg, cmd->t_prot_nents,
    605				       cmd->prot_length, 0);
    606			if (ret < 0)
    607				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    608		}
    609
    610		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
    611			       sgl, sgl_nents, cmd->data_length, 0);
    612
    613		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type &&
    614		    dev->dev_attrib.pi_prot_verify) {
    615			u32 sectors = cmd->data_length >>
    616					ilog2(dev->dev_attrib.block_size);
    617
    618			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
    619					    0, cmd->t_prot_sg, 0);
    620			if (rc)
    621				return rc;
    622		}
    623	} else {
    624		if (cmd->prot_type && dev->dev_attrib.pi_prot_type &&
    625		    dev->dev_attrib.pi_prot_verify) {
    626			u32 sectors = cmd->data_length >>
    627					ilog2(dev->dev_attrib.block_size);
    628
    629			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
    630					    0, cmd->t_prot_sg, 0);
    631			if (rc)
    632				return rc;
    633		}
    634
    635		ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
    636			       sgl, sgl_nents, cmd->data_length, 1);
    637		/*
    638		 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
    639		 * for SCSI WRITEs with Forced Unit Access (FUA) set.
    640		 * Allow this to happen independent of WCE=0 setting.
    641		 */
    642		if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
    643			loff_t start = cmd->t_task_lba *
    644				dev->dev_attrib.block_size;
    645			loff_t end;
    646
    647			if (cmd->data_length)
    648				end = start + cmd->data_length - 1;
    649			else
    650				end = LLONG_MAX;
    651
    652			vfs_fsync_range(fd_dev->fd_file, start, end, 1);
    653		}
    654
    655		if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
    656			ret = fd_do_rw(cmd, pfile, dev->prot_length,
    657				       cmd->t_prot_sg, cmd->t_prot_nents,
    658				       cmd->prot_length, 1);
    659			if (ret < 0)
    660				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    661		}
    662	}
    663
    664	if (ret < 0)
    665		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    666
    667	target_complete_cmd(cmd, SAM_STAT_GOOD);
    668	return 0;
    669}
    670
    671static sense_reason_t
    672fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
    673	      enum dma_data_direction data_direction)
    674{
    675	struct se_device *dev = cmd->se_dev;
    676	struct fd_dev *fd_dev = FD_DEV(dev);
    677
    678	/*
    679	 * We are currently limited by the number of iovecs (2048) per
    680	 * single vfs_[writev,readv] call.
    681	 */
    682	if (cmd->data_length > FD_MAX_BYTES) {
    683		pr_err("FILEIO: Not able to process I/O of %u bytes due to"
    684		       "FD_MAX_BYTES: %u iovec count limitation\n",
    685			cmd->data_length, FD_MAX_BYTES);
    686		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    687	}
    688
    689	if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
    690		return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
    691	return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
    692}
    693
    694enum {
    695	Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
    696	Opt_fd_async_io, Opt_err
    697};
    698
    699static match_table_t tokens = {
    700	{Opt_fd_dev_name, "fd_dev_name=%s"},
    701	{Opt_fd_dev_size, "fd_dev_size=%s"},
    702	{Opt_fd_buffered_io, "fd_buffered_io=%d"},
    703	{Opt_fd_async_io, "fd_async_io=%d"},
    704	{Opt_err, NULL}
    705};
    706
    707static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
    708		const char *page, ssize_t count)
    709{
    710	struct fd_dev *fd_dev = FD_DEV(dev);
    711	char *orig, *ptr, *arg_p, *opts;
    712	substring_t args[MAX_OPT_ARGS];
    713	int ret = 0, arg, token;
    714
    715	opts = kstrdup(page, GFP_KERNEL);
    716	if (!opts)
    717		return -ENOMEM;
    718
    719	orig = opts;
    720
    721	while ((ptr = strsep(&opts, ",\n")) != NULL) {
    722		if (!*ptr)
    723			continue;
    724
    725		token = match_token(ptr, tokens, args);
    726		switch (token) {
    727		case Opt_fd_dev_name:
    728			if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
    729				FD_MAX_DEV_NAME) == 0) {
    730				ret = -EINVAL;
    731				break;
    732			}
    733			pr_debug("FILEIO: Referencing Path: %s\n",
    734					fd_dev->fd_dev_name);
    735			fd_dev->fbd_flags |= FBDF_HAS_PATH;
    736			break;
    737		case Opt_fd_dev_size:
    738			arg_p = match_strdup(&args[0]);
    739			if (!arg_p) {
    740				ret = -ENOMEM;
    741				break;
    742			}
    743			ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
    744			kfree(arg_p);
    745			if (ret < 0) {
    746				pr_err("kstrtoull() failed for"
    747						" fd_dev_size=\n");
    748				goto out;
    749			}
    750			pr_debug("FILEIO: Referencing Size: %llu"
    751					" bytes\n", fd_dev->fd_dev_size);
    752			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
    753			break;
    754		case Opt_fd_buffered_io:
    755			ret = match_int(args, &arg);
    756			if (ret)
    757				goto out;
    758			if (arg != 1) {
    759				pr_err("bogus fd_buffered_io=%d value\n", arg);
    760				ret = -EINVAL;
    761				goto out;
    762			}
    763
    764			pr_debug("FILEIO: Using buffered I/O"
    765				" operations for struct fd_dev\n");
    766
    767			fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
    768			break;
    769		case Opt_fd_async_io:
    770			ret = match_int(args, &arg);
    771			if (ret)
    772				goto out;
    773			if (arg != 1) {
    774				pr_err("bogus fd_async_io=%d value\n", arg);
    775				ret = -EINVAL;
    776				goto out;
    777			}
    778
    779			pr_debug("FILEIO: Using async I/O"
    780				" operations for struct fd_dev\n");
    781
    782			fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
    783			break;
    784		default:
    785			break;
    786		}
    787	}
    788
    789out:
    790	kfree(orig);
    791	return (!ret) ? count : ret;
    792}
    793
    794static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
    795{
    796	struct fd_dev *fd_dev = FD_DEV(dev);
    797	ssize_t bl = 0;
    798
    799	bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
    800	bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s Async: %d\n",
    801		fd_dev->fd_dev_name, fd_dev->fd_dev_size,
    802		(fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
    803		"Buffered-WCE" : "O_DSYNC",
    804		!!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
    805	return bl;
    806}
    807
    808static sector_t fd_get_blocks(struct se_device *dev)
    809{
    810	struct fd_dev *fd_dev = FD_DEV(dev);
    811	struct file *f = fd_dev->fd_file;
    812	struct inode *i = f->f_mapping->host;
    813	unsigned long long dev_size;
    814	/*
    815	 * When using a file that references an underlying struct block_device,
    816	 * ensure dev_size is always based on the current inode size in order
    817	 * to handle underlying block_device resize operations.
    818	 */
    819	if (S_ISBLK(i->i_mode))
    820		dev_size = i_size_read(i);
    821	else
    822		dev_size = fd_dev->fd_dev_size;
    823
    824	return div_u64(dev_size - dev->dev_attrib.block_size,
    825		       dev->dev_attrib.block_size);
    826}
    827
    828static int fd_init_prot(struct se_device *dev)
    829{
    830	struct fd_dev *fd_dev = FD_DEV(dev);
    831	struct file *prot_file, *file = fd_dev->fd_file;
    832	struct inode *inode;
    833	int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
    834	char buf[FD_MAX_DEV_PROT_NAME];
    835
    836	if (!file) {
    837		pr_err("Unable to locate fd_dev->fd_file\n");
    838		return -ENODEV;
    839	}
    840
    841	inode = file->f_mapping->host;
    842	if (S_ISBLK(inode->i_mode)) {
    843		pr_err("FILEIO Protection emulation only supported on"
    844		       " !S_ISBLK\n");
    845		return -ENOSYS;
    846	}
    847
    848	if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
    849		flags &= ~O_DSYNC;
    850
    851	snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
    852		 fd_dev->fd_dev_name);
    853
    854	prot_file = filp_open(buf, flags, 0600);
    855	if (IS_ERR(prot_file)) {
    856		pr_err("filp_open(%s) failed\n", buf);
    857		ret = PTR_ERR(prot_file);
    858		return ret;
    859	}
    860	fd_dev->fd_prot_file = prot_file;
    861
    862	return 0;
    863}
    864
    865static int fd_format_prot(struct se_device *dev)
    866{
    867	unsigned char *buf;
    868	int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
    869	int ret;
    870
    871	if (!dev->dev_attrib.pi_prot_type) {
    872		pr_err("Unable to format_prot while pi_prot_type == 0\n");
    873		return -ENODEV;
    874	}
    875
    876	buf = vzalloc(unit_size);
    877	if (!buf) {
    878		pr_err("Unable to allocate FILEIO prot buf\n");
    879		return -ENOMEM;
    880	}
    881
    882	pr_debug("Using FILEIO prot_length: %llu\n",
    883		 (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
    884					dev->prot_length);
    885
    886	ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
    887			      buf, unit_size);
    888	vfree(buf);
    889	return ret;
    890}
    891
    892static void fd_free_prot(struct se_device *dev)
    893{
    894	struct fd_dev *fd_dev = FD_DEV(dev);
    895
    896	if (!fd_dev->fd_prot_file)
    897		return;
    898
    899	filp_close(fd_dev->fd_prot_file, NULL);
    900	fd_dev->fd_prot_file = NULL;
    901}
    902
    903static struct sbc_ops fd_sbc_ops = {
    904	.execute_rw		= fd_execute_rw,
    905	.execute_sync_cache	= fd_execute_sync_cache,
    906	.execute_write_same	= fd_execute_write_same,
    907	.execute_unmap		= fd_execute_unmap,
    908};
    909
    910static sense_reason_t
    911fd_parse_cdb(struct se_cmd *cmd)
    912{
    913	return sbc_parse_cdb(cmd, &fd_sbc_ops);
    914}
    915
    916static const struct target_backend_ops fileio_ops = {
    917	.name			= "fileio",
    918	.inquiry_prod		= "FILEIO",
    919	.inquiry_rev		= FD_VERSION,
    920	.owner			= THIS_MODULE,
    921	.attach_hba		= fd_attach_hba,
    922	.detach_hba		= fd_detach_hba,
    923	.alloc_device		= fd_alloc_device,
    924	.configure_device	= fd_configure_device,
    925	.destroy_device		= fd_destroy_device,
    926	.free_device		= fd_free_device,
    927	.parse_cdb		= fd_parse_cdb,
    928	.set_configfs_dev_params = fd_set_configfs_dev_params,
    929	.show_configfs_dev_params = fd_show_configfs_dev_params,
    930	.get_device_type	= sbc_get_device_type,
    931	.get_blocks		= fd_get_blocks,
    932	.init_prot		= fd_init_prot,
    933	.format_prot		= fd_format_prot,
    934	.free_prot		= fd_free_prot,
    935	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
    936};
    937
    938static int __init fileio_module_init(void)
    939{
    940	return transport_backend_register(&fileio_ops);
    941}
    942
    943static void __exit fileio_module_exit(void)
    944{
    945	target_backend_unregister(&fileio_ops);
    946}
    947
    948MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
    949MODULE_AUTHOR("nab@Linux-iSCSI.org");
    950MODULE_LICENSE("GPL");
    951
    952module_init(fileio_module_init);
    953module_exit(fileio_module_exit);