cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

target_core_rd.c (16581B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*******************************************************************************
      3 * Filename:  target_core_rd.c
      4 *
      5 * This file contains the Storage Engine <-> Ramdisk transport
      6 * specific functions.
      7 *
      8 * (c) Copyright 2003-2013 Datera, Inc.
      9 *
     10 * Nicholas A. Bellinger <nab@kernel.org>
     11 *
     12 ******************************************************************************/
     13
     14#include <linux/string.h>
     15#include <linux/parser.h>
     16#include <linux/highmem.h>
     17#include <linux/timer.h>
     18#include <linux/scatterlist.h>
     19#include <linux/slab.h>
     20#include <linux/spinlock.h>
     21#include <scsi/scsi_proto.h>
     22
     23#include <target/target_core_base.h>
     24#include <target/target_core_backend.h>
     25
     26#include "target_core_rd.h"
     27
     28static inline struct rd_dev *RD_DEV(struct se_device *dev)
     29{
     30	return container_of(dev, struct rd_dev, dev);
     31}
     32
     33static int rd_attach_hba(struct se_hba *hba, u32 host_id)
     34{
     35	struct rd_host *rd_host;
     36
     37	rd_host = kzalloc(sizeof(*rd_host), GFP_KERNEL);
     38	if (!rd_host)
     39		return -ENOMEM;
     40
     41	rd_host->rd_host_id = host_id;
     42
     43	hba->hba_ptr = rd_host;
     44
     45	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
     46		" Generic Target Core Stack %s\n", hba->hba_id,
     47		RD_HBA_VERSION, TARGET_CORE_VERSION);
     48
     49	return 0;
     50}
     51
     52static void rd_detach_hba(struct se_hba *hba)
     53{
     54	struct rd_host *rd_host = hba->hba_ptr;
     55
     56	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
     57		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
     58
     59	kfree(rd_host);
     60	hba->hba_ptr = NULL;
     61}
     62
     63static u32 rd_release_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
     64				 u32 sg_table_count)
     65{
     66	struct page *pg;
     67	struct scatterlist *sg;
     68	u32 i, j, page_count = 0, sg_per_table;
     69
     70	for (i = 0; i < sg_table_count; i++) {
     71		sg = sg_table[i].sg_table;
     72		sg_per_table = sg_table[i].rd_sg_count;
     73
     74		for (j = 0; j < sg_per_table; j++) {
     75			pg = sg_page(&sg[j]);
     76			if (pg) {
     77				__free_page(pg);
     78				page_count++;
     79			}
     80		}
     81		kfree(sg);
     82	}
     83
     84	kfree(sg_table);
     85	return page_count;
     86}
     87
     88static void rd_release_device_space(struct rd_dev *rd_dev)
     89{
     90	u32 page_count;
     91
     92	if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
     93		return;
     94
     95	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array,
     96					  rd_dev->sg_table_count);
     97
     98	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
     99		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
    100		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
    101		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
    102
    103	rd_dev->sg_table_array = NULL;
    104	rd_dev->sg_table_count = 0;
    105}
    106
    107
    108/*	rd_build_device_space():
    109 *
    110 *
    111 */
    112static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *sg_table,
    113				 u32 total_sg_needed, unsigned char init_payload)
    114{
    115	u32 i = 0, j, page_offset = 0, sg_per_table;
    116	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
    117				sizeof(struct scatterlist));
    118	struct page *pg;
    119	struct scatterlist *sg;
    120	unsigned char *p;
    121
    122	while (total_sg_needed) {
    123		unsigned int chain_entry = 0;
    124
    125		sg_per_table = (total_sg_needed > max_sg_per_table) ?
    126			max_sg_per_table : total_sg_needed;
    127
    128		/*
    129		 * Reserve extra element for chain entry
    130		 */
    131		if (sg_per_table < total_sg_needed)
    132			chain_entry = 1;
    133
    134		sg = kmalloc_array(sg_per_table + chain_entry, sizeof(*sg),
    135				GFP_KERNEL);
    136		if (!sg)
    137			return -ENOMEM;
    138
    139		sg_init_table(sg, sg_per_table + chain_entry);
    140
    141		if (i > 0) {
    142			sg_chain(sg_table[i - 1].sg_table,
    143				 max_sg_per_table + 1, sg);
    144		}
    145
    146		sg_table[i].sg_table = sg;
    147		sg_table[i].rd_sg_count = sg_per_table;
    148		sg_table[i].page_start_offset = page_offset;
    149		sg_table[i++].page_end_offset = (page_offset + sg_per_table)
    150						- 1;
    151
    152		for (j = 0; j < sg_per_table; j++) {
    153			pg = alloc_pages(GFP_KERNEL, 0);
    154			if (!pg) {
    155				pr_err("Unable to allocate scatterlist"
    156					" pages for struct rd_dev_sg_table\n");
    157				return -ENOMEM;
    158			}
    159			sg_assign_page(&sg[j], pg);
    160			sg[j].length = PAGE_SIZE;
    161
    162			p = kmap(pg);
    163			memset(p, init_payload, PAGE_SIZE);
    164			kunmap(pg);
    165		}
    166
    167		page_offset += sg_per_table;
    168		total_sg_needed -= sg_per_table;
    169	}
    170
    171	return 0;
    172}
    173
    174static int rd_build_device_space(struct rd_dev *rd_dev)
    175{
    176	struct rd_dev_sg_table *sg_table;
    177	u32 sg_tables, total_sg_needed;
    178	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
    179				sizeof(struct scatterlist));
    180	int rc;
    181
    182	if (rd_dev->rd_page_count <= 0) {
    183		pr_err("Illegal page count: %u for Ramdisk device\n",
    184		       rd_dev->rd_page_count);
    185		return -EINVAL;
    186	}
    187
    188	/* Don't need backing pages for NULLIO */
    189	if (rd_dev->rd_flags & RDF_NULLIO)
    190		return 0;
    191
    192	total_sg_needed = rd_dev->rd_page_count;
    193
    194	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
    195	sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
    196	if (!sg_table)
    197		return -ENOMEM;
    198
    199	rd_dev->sg_table_array = sg_table;
    200	rd_dev->sg_table_count = sg_tables;
    201
    202	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0x00);
    203	if (rc)
    204		return rc;
    205
    206	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
    207		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
    208		 rd_dev->rd_dev_id, rd_dev->rd_page_count,
    209		 rd_dev->sg_table_count);
    210
    211	return 0;
    212}
    213
    214static void rd_release_prot_space(struct rd_dev *rd_dev)
    215{
    216	u32 page_count;
    217
    218	if (!rd_dev->sg_prot_array || !rd_dev->sg_prot_count)
    219		return;
    220
    221	page_count = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array,
    222					  rd_dev->sg_prot_count);
    223
    224	pr_debug("CORE_RD[%u] - Released protection space for Ramdisk"
    225		 " Device ID: %u, pages %u in %u tables total bytes %lu\n",
    226		 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
    227		 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
    228
    229	rd_dev->sg_prot_array = NULL;
    230	rd_dev->sg_prot_count = 0;
    231}
    232
    233static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
    234{
    235	struct rd_dev_sg_table *sg_table;
    236	u32 total_sg_needed, sg_tables;
    237	u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
    238				sizeof(struct scatterlist));
    239	int rc;
    240
    241	if (rd_dev->rd_flags & RDF_NULLIO)
    242		return 0;
    243	/*
    244	 * prot_length=8byte dif data
    245	 * tot sg needed = rd_page_count * (PGSZ/block_size) *
    246	 * 		   (prot_length/block_size) + pad
    247	 * PGSZ canceled each other.
    248	 */
    249	total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
    250
    251	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
    252	sg_table = kcalloc(sg_tables, sizeof(*sg_table), GFP_KERNEL);
    253	if (!sg_table)
    254		return -ENOMEM;
    255
    256	rd_dev->sg_prot_array = sg_table;
    257	rd_dev->sg_prot_count = sg_tables;
    258
    259	rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0xff);
    260	if (rc)
    261		return rc;
    262
    263	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of"
    264		 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
    265		 rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count);
    266
    267	return 0;
    268}
    269
    270static struct se_device *rd_alloc_device(struct se_hba *hba, const char *name)
    271{
    272	struct rd_dev *rd_dev;
    273	struct rd_host *rd_host = hba->hba_ptr;
    274
    275	rd_dev = kzalloc(sizeof(*rd_dev), GFP_KERNEL);
    276	if (!rd_dev)
    277		return NULL;
    278
    279	rd_dev->rd_host = rd_host;
    280
    281	return &rd_dev->dev;
    282}
    283
    284static int rd_configure_device(struct se_device *dev)
    285{
    286	struct rd_dev *rd_dev = RD_DEV(dev);
    287	struct rd_host *rd_host = dev->se_hba->hba_ptr;
    288	int ret;
    289
    290	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
    291		pr_debug("Missing rd_pages= parameter\n");
    292		return -EINVAL;
    293	}
    294
    295	ret = rd_build_device_space(rd_dev);
    296	if (ret < 0)
    297		goto fail;
    298
    299	dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
    300	dev->dev_attrib.hw_max_sectors = UINT_MAX;
    301	dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
    302	dev->dev_attrib.is_nonrot = 1;
    303
    304	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
    305
    306	pr_debug("CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of"
    307		" %u pages in %u tables, %lu total bytes\n",
    308		rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count,
    309		rd_dev->sg_table_count,
    310		(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
    311
    312	return 0;
    313
    314fail:
    315	rd_release_device_space(rd_dev);
    316	return ret;
    317}
    318
    319static void rd_dev_call_rcu(struct rcu_head *p)
    320{
    321	struct se_device *dev = container_of(p, struct se_device, rcu_head);
    322	struct rd_dev *rd_dev = RD_DEV(dev);
    323
    324	kfree(rd_dev);
    325}
    326
    327static void rd_free_device(struct se_device *dev)
    328{
    329	call_rcu(&dev->rcu_head, rd_dev_call_rcu);
    330}
    331
    332static void rd_destroy_device(struct se_device *dev)
    333{
    334	struct rd_dev *rd_dev = RD_DEV(dev);
    335
    336	rd_release_device_space(rd_dev);
    337}
    338
    339static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
    340{
    341	struct rd_dev_sg_table *sg_table;
    342	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
    343				sizeof(struct scatterlist));
    344
    345	i = page / sg_per_table;
    346	if (i < rd_dev->sg_table_count) {
    347		sg_table = &rd_dev->sg_table_array[i];
    348		if ((sg_table->page_start_offset <= page) &&
    349		    (sg_table->page_end_offset >= page))
    350			return sg_table;
    351	}
    352
    353	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
    354			page);
    355
    356	return NULL;
    357}
    358
    359static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev, u32 page)
    360{
    361	struct rd_dev_sg_table *sg_table;
    362	u32 i, sg_per_table = (RD_MAX_ALLOCATION_SIZE /
    363				sizeof(struct scatterlist));
    364
    365	i = page / sg_per_table;
    366	if (i < rd_dev->sg_prot_count) {
    367		sg_table = &rd_dev->sg_prot_array[i];
    368		if ((sg_table->page_start_offset <= page) &&
    369		     (sg_table->page_end_offset >= page))
    370			return sg_table;
    371	}
    372
    373	pr_err("Unable to locate struct prot rd_dev_sg_table for page: %u\n",
    374			page);
    375
    376	return NULL;
    377}
    378
    379static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
    380{
    381	struct se_device *se_dev = cmd->se_dev;
    382	struct rd_dev *dev = RD_DEV(se_dev);
    383	struct rd_dev_sg_table *prot_table;
    384	struct scatterlist *prot_sg;
    385	u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
    386	u32 prot_offset, prot_page;
    387	u32 prot_npages __maybe_unused;
    388	u64 tmp;
    389	sense_reason_t rc = 0;
    390
    391	tmp = cmd->t_task_lba * se_dev->prot_length;
    392	prot_offset = do_div(tmp, PAGE_SIZE);
    393	prot_page = tmp;
    394
    395	prot_table = rd_get_prot_table(dev, prot_page);
    396	if (!prot_table)
    397		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    398
    399	prot_sg = &prot_table->sg_table[prot_page -
    400					prot_table->page_start_offset];
    401
    402	if (se_dev->dev_attrib.pi_prot_verify) {
    403		if (is_read)
    404			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
    405					    prot_sg, prot_offset);
    406		else
    407			rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
    408					    cmd->t_prot_sg, 0);
    409	}
    410	if (!rc)
    411		sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
    412
    413	return rc;
    414}
    415
    416static sense_reason_t
    417rd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
    418	      enum dma_data_direction data_direction)
    419{
    420	struct se_device *se_dev = cmd->se_dev;
    421	struct rd_dev *dev = RD_DEV(se_dev);
    422	struct rd_dev_sg_table *table;
    423	struct scatterlist *rd_sg;
    424	struct sg_mapping_iter m;
    425	u32 rd_offset;
    426	u32 rd_size;
    427	u32 rd_page;
    428	u32 src_len;
    429	u64 tmp;
    430	sense_reason_t rc;
    431
    432	if (dev->rd_flags & RDF_NULLIO) {
    433		target_complete_cmd(cmd, SAM_STAT_GOOD);
    434		return 0;
    435	}
    436
    437	tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
    438	rd_offset = do_div(tmp, PAGE_SIZE);
    439	rd_page = tmp;
    440	rd_size = cmd->data_length;
    441
    442	table = rd_get_sg_table(dev, rd_page);
    443	if (!table)
    444		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    445
    446	rd_sg = &table->sg_table[rd_page - table->page_start_offset];
    447
    448	pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
    449			dev->rd_dev_id,
    450			data_direction == DMA_FROM_DEVICE ? "Read" : "Write",
    451			cmd->t_task_lba, rd_size, rd_page, rd_offset);
    452
    453	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
    454	    data_direction == DMA_TO_DEVICE) {
    455		rc = rd_do_prot_rw(cmd, false);
    456		if (rc)
    457			return rc;
    458	}
    459
    460	src_len = PAGE_SIZE - rd_offset;
    461	sg_miter_start(&m, sgl, sgl_nents,
    462			data_direction == DMA_FROM_DEVICE ?
    463				SG_MITER_TO_SG : SG_MITER_FROM_SG);
    464	while (rd_size) {
    465		u32 len;
    466		void *rd_addr;
    467
    468		sg_miter_next(&m);
    469		if (!(u32)m.length) {
    470			pr_debug("RD[%u]: invalid sgl %p len %zu\n",
    471				 dev->rd_dev_id, m.addr, m.length);
    472			sg_miter_stop(&m);
    473			return TCM_INCORRECT_AMOUNT_OF_DATA;
    474		}
    475		len = min((u32)m.length, src_len);
    476		if (len > rd_size) {
    477			pr_debug("RD[%u]: size underrun page %d offset %d "
    478				 "size %d\n", dev->rd_dev_id,
    479				 rd_page, rd_offset, rd_size);
    480			len = rd_size;
    481		}
    482		m.consumed = len;
    483
    484		rd_addr = sg_virt(rd_sg) + rd_offset;
    485
    486		if (data_direction == DMA_FROM_DEVICE)
    487			memcpy(m.addr, rd_addr, len);
    488		else
    489			memcpy(rd_addr, m.addr, len);
    490
    491		rd_size -= len;
    492		if (!rd_size)
    493			continue;
    494
    495		src_len -= len;
    496		if (src_len) {
    497			rd_offset += len;
    498			continue;
    499		}
    500
    501		/* rd page completed, next one please */
    502		rd_page++;
    503		rd_offset = 0;
    504		src_len = PAGE_SIZE;
    505		if (rd_page <= table->page_end_offset) {
    506			rd_sg++;
    507			continue;
    508		}
    509
    510		table = rd_get_sg_table(dev, rd_page);
    511		if (!table) {
    512			sg_miter_stop(&m);
    513			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
    514		}
    515
    516		/* since we increment, the first sg entry is correct */
    517		rd_sg = table->sg_table;
    518	}
    519	sg_miter_stop(&m);
    520
    521	if (cmd->prot_type && se_dev->dev_attrib.pi_prot_type &&
    522	    data_direction == DMA_FROM_DEVICE) {
    523		rc = rd_do_prot_rw(cmd, true);
    524		if (rc)
    525			return rc;
    526	}
    527
    528	target_complete_cmd(cmd, SAM_STAT_GOOD);
    529	return 0;
    530}
    531
    532enum {
    533	Opt_rd_pages, Opt_rd_nullio, Opt_rd_dummy, Opt_err
    534};
    535
    536static match_table_t tokens = {
    537	{Opt_rd_pages, "rd_pages=%d"},
    538	{Opt_rd_nullio, "rd_nullio=%d"},
    539	{Opt_rd_dummy, "rd_dummy=%d"},
    540	{Opt_err, NULL}
    541};
    542
    543static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
    544		const char *page, ssize_t count)
    545{
    546	struct rd_dev *rd_dev = RD_DEV(dev);
    547	char *orig, *ptr, *opts;
    548	substring_t args[MAX_OPT_ARGS];
    549	int arg, token;
    550
    551	opts = kstrdup(page, GFP_KERNEL);
    552	if (!opts)
    553		return -ENOMEM;
    554
    555	orig = opts;
    556
    557	while ((ptr = strsep(&opts, ",\n")) != NULL) {
    558		if (!*ptr)
    559			continue;
    560
    561		token = match_token(ptr, tokens, args);
    562		switch (token) {
    563		case Opt_rd_pages:
    564			match_int(args, &arg);
    565			rd_dev->rd_page_count = arg;
    566			pr_debug("RAMDISK: Referencing Page"
    567				" Count: %u\n", rd_dev->rd_page_count);
    568			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
    569			break;
    570		case Opt_rd_nullio:
    571			match_int(args, &arg);
    572			if (arg != 1)
    573				break;
    574
    575			pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
    576			rd_dev->rd_flags |= RDF_NULLIO;
    577			break;
    578		case Opt_rd_dummy:
    579			match_int(args, &arg);
    580			if (arg != 1)
    581				break;
    582
    583			pr_debug("RAMDISK: Setting DUMMY flag: %d\n", arg);
    584			rd_dev->rd_flags |= RDF_DUMMY;
    585			break;
    586		default:
    587			break;
    588		}
    589	}
    590
    591	kfree(orig);
    592	return count;
    593}
    594
    595static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
    596{
    597	struct rd_dev *rd_dev = RD_DEV(dev);
    598
    599	ssize_t bl = sprintf(b, "TCM RamDisk ID: %u  RamDisk Makeup: rd_mcp\n",
    600			rd_dev->rd_dev_id);
    601	bl += sprintf(b + bl, "        PAGES/PAGE_SIZE: %u*%lu"
    602			"  SG_table_count: %u  nullio: %d dummy: %d\n",
    603			rd_dev->rd_page_count,
    604			PAGE_SIZE, rd_dev->sg_table_count,
    605			!!(rd_dev->rd_flags & RDF_NULLIO),
    606			!!(rd_dev->rd_flags & RDF_DUMMY));
    607	return bl;
    608}
    609
    610static u32 rd_get_device_type(struct se_device *dev)
    611{
    612	if (RD_DEV(dev)->rd_flags & RDF_DUMMY)
    613		return 0x3f; /* Unknown device type, not connected */
    614	else
    615		return sbc_get_device_type(dev);
    616}
    617
    618static sector_t rd_get_blocks(struct se_device *dev)
    619{
    620	struct rd_dev *rd_dev = RD_DEV(dev);
    621
    622	unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
    623			dev->dev_attrib.block_size) - 1;
    624
    625	return blocks_long;
    626}
    627
    628static int rd_init_prot(struct se_device *dev)
    629{
    630	struct rd_dev *rd_dev = RD_DEV(dev);
    631
    632        if (!dev->dev_attrib.pi_prot_type)
    633		return 0;
    634
    635	return rd_build_prot_space(rd_dev, dev->prot_length,
    636				   dev->dev_attrib.block_size);
    637}
    638
    639static void rd_free_prot(struct se_device *dev)
    640{
    641	struct rd_dev *rd_dev = RD_DEV(dev);
    642
    643	rd_release_prot_space(rd_dev);
    644}
    645
    646static struct sbc_ops rd_sbc_ops = {
    647	.execute_rw		= rd_execute_rw,
    648};
    649
    650static sense_reason_t
    651rd_parse_cdb(struct se_cmd *cmd)
    652{
    653	return sbc_parse_cdb(cmd, &rd_sbc_ops);
    654}
    655
    656static const struct target_backend_ops rd_mcp_ops = {
    657	.name			= "rd_mcp",
    658	.inquiry_prod		= "RAMDISK-MCP",
    659	.inquiry_rev		= RD_MCP_VERSION,
    660	.attach_hba		= rd_attach_hba,
    661	.detach_hba		= rd_detach_hba,
    662	.alloc_device		= rd_alloc_device,
    663	.configure_device	= rd_configure_device,
    664	.destroy_device		= rd_destroy_device,
    665	.free_device		= rd_free_device,
    666	.parse_cdb		= rd_parse_cdb,
    667	.set_configfs_dev_params = rd_set_configfs_dev_params,
    668	.show_configfs_dev_params = rd_show_configfs_dev_params,
    669	.get_device_type	= rd_get_device_type,
    670	.get_blocks		= rd_get_blocks,
    671	.init_prot		= rd_init_prot,
    672	.free_prot		= rd_free_prot,
    673	.tb_dev_attrib_attrs	= sbc_attrib_attrs,
    674};
    675
    676int __init rd_module_init(void)
    677{
    678	return transport_backend_register(&rd_mcp_ops);
    679}
    680
    681void rd_module_exit(void)
    682{
    683	target_backend_unregister(&rd_mcp_ops);
    684}