cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tfc_io.c (9043B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (c) 2010 Cisco Systems, Inc.
      4 *
      5 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
      6 *
      7 * Copyright (c) 2007 Intel Corporation. All rights reserved.
      8 * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
      9 * Copyright (c) 2008 Mike Christie
     10 * Copyright (c) 2009 Rising Tide, Inc.
     11 * Copyright (c) 2009 Linux-iSCSI.org
     12 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
     13 */
     14
     15/* XXX TBD some includes may be extraneous */
     16
     17#include <linux/module.h>
     18#include <linux/moduleparam.h>
     19#include <linux/utsname.h>
     20#include <linux/init.h>
     21#include <linux/slab.h>
     22#include <linux/kthread.h>
     23#include <linux/types.h>
     24#include <linux/string.h>
     25#include <linux/configfs.h>
     26#include <linux/ctype.h>
     27#include <linux/hash.h>
     28#include <linux/ratelimit.h>
     29#include <asm/unaligned.h>
     30#include <scsi/libfc.h>
     31
     32#include <target/target_core_base.h>
     33#include <target/target_core_fabric.h>
     34
     35#include "tcm_fc.h"
     36
     37/*
     38 * Deliver read data back to initiator.
     39 * XXX TBD handle resource problems later.
     40 */
     41int ft_queue_data_in(struct se_cmd *se_cmd)
     42{
     43	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
     44	struct fc_frame *fp = NULL;
     45	struct fc_exch *ep;
     46	struct fc_lport *lport;
     47	struct scatterlist *sg = NULL;
     48	size_t remaining;
     49	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
     50	u32 mem_off = 0;
     51	u32 fh_off = 0;
     52	u32 frame_off = 0;
     53	size_t frame_len = 0;
     54	size_t mem_len = 0;
     55	size_t tlen;
     56	size_t off_in_page;
     57	struct page *page = NULL;
     58	int use_sg;
     59	int error;
     60	void *page_addr;
     61	void *from;
     62	void *to = NULL;
     63
     64	if (cmd->aborted)
     65		return 0;
     66
     67	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
     68		goto queue_status;
     69
     70	ep = fc_seq_exch(cmd->seq);
     71	lport = ep->lp;
     72	cmd->seq = fc_seq_start_next(cmd->seq);
     73
     74	remaining = se_cmd->data_length;
     75
     76	/*
     77	 * Setup to use first mem list entry, unless no data.
     78	 */
     79	BUG_ON(remaining && !se_cmd->t_data_sg);
     80	if (remaining) {
     81		sg = se_cmd->t_data_sg;
     82		mem_len = sg->length;
     83		mem_off = sg->offset;
     84		page = sg_page(sg);
     85	}
     86
     87	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
     88	use_sg = !(remaining % 4);
     89
     90	while (remaining) {
     91		struct fc_seq *seq = cmd->seq;
     92
     93		if (!seq) {
     94			pr_debug("%s: Command aborted, xid 0x%x\n",
     95				 __func__, ep->xid);
     96			break;
     97		}
     98		if (!mem_len) {
     99			sg = sg_next(sg);
    100			mem_len = min((size_t)sg->length, remaining);
    101			mem_off = sg->offset;
    102			page = sg_page(sg);
    103		}
    104		if (!frame_len) {
    105			/*
    106			 * If lport's has capability of Large Send Offload LSO)
    107			 * , then allow 'frame_len' to be as big as 'lso_max'
    108			 * if indicated transfer length is >= lport->lso_max
    109			 */
    110			frame_len = (lport->seq_offload) ? lport->lso_max :
    111							  cmd->sess->max_frame;
    112			frame_len = min(frame_len, remaining);
    113			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
    114			if (!fp)
    115				return -ENOMEM;
    116			to = fc_frame_payload_get(fp, 0);
    117			fh_off = frame_off;
    118			frame_off += frame_len;
    119			/*
    120			 * Setup the frame's max payload which is used by base
    121			 * driver to indicate HW about max frame size, so that
    122			 * HW can do fragmentation appropriately based on
    123			 * "gso_max_size" of underline netdev.
    124			 */
    125			fr_max_payload(fp) = cmd->sess->max_frame;
    126		}
    127		tlen = min(mem_len, frame_len);
    128
    129		if (use_sg) {
    130			off_in_page = mem_off;
    131			BUG_ON(!page);
    132			get_page(page);
    133			skb_fill_page_desc(fp_skb(fp),
    134					   skb_shinfo(fp_skb(fp))->nr_frags,
    135					   page, off_in_page, tlen);
    136			fr_len(fp) += tlen;
    137			fp_skb(fp)->data_len += tlen;
    138			fp_skb(fp)->truesize += page_size(page);
    139		} else {
    140			BUG_ON(!page);
    141			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
    142			page_addr = from;
    143			from += offset_in_page(mem_off);
    144			tlen = min(tlen, (size_t)(PAGE_SIZE -
    145						offset_in_page(mem_off)));
    146			memcpy(to, from, tlen);
    147			kunmap_atomic(page_addr);
    148			to += tlen;
    149		}
    150
    151		mem_off += tlen;
    152		mem_len -= tlen;
    153		frame_len -= tlen;
    154		remaining -= tlen;
    155
    156		if (frame_len &&
    157		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
    158			continue;
    159		if (!remaining)
    160			f_ctl |= FC_FC_END_SEQ;
    161		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
    162			       FC_TYPE_FCP, f_ctl, fh_off);
    163		error = fc_seq_send(lport, seq, fp);
    164		if (error) {
    165			pr_info_ratelimited("%s: Failed to send frame %p, "
    166						"xid <0x%x>, remaining %zu, "
    167						"lso_max <0x%x>\n",
    168						__func__, fp, ep->xid,
    169						remaining, lport->lso_max);
    170			/*
    171			 * Go ahead and set TASK_SET_FULL status ignoring the
    172			 * rest of the DataIN, and immediately attempt to
    173			 * send the response via ft_queue_status() in order
    174			 * to notify the initiator that it should reduce it's
    175			 * per LUN queue_depth.
    176			 */
    177			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
    178			break;
    179		}
    180	}
    181queue_status:
    182	return ft_queue_status(se_cmd);
    183}
    184
    185static void ft_execute_work(struct work_struct *work)
    186{
    187	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
    188
    189	target_execute_cmd(&cmd->se_cmd);
    190}
    191
    192/*
    193 * Receive write data frame.
    194 */
    195void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
    196{
    197	struct se_cmd *se_cmd = &cmd->se_cmd;
    198	struct fc_seq *seq = cmd->seq;
    199	struct fc_exch *ep;
    200	struct fc_lport *lport;
    201	struct fc_frame_header *fh;
    202	struct scatterlist *sg = NULL;
    203	u32 mem_off = 0;
    204	u32 rel_off;
    205	size_t frame_len;
    206	size_t mem_len = 0;
    207	size_t tlen;
    208	struct page *page = NULL;
    209	void *page_addr;
    210	void *from;
    211	void *to;
    212	u32 f_ctl;
    213	void *buf;
    214
    215	fh = fc_frame_header_get(fp);
    216	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
    217		goto drop;
    218
    219	f_ctl = ntoh24(fh->fh_f_ctl);
    220	ep = fc_seq_exch(seq);
    221	lport = ep->lp;
    222	if (cmd->was_ddp_setup) {
    223		BUG_ON(!lport);
    224		/*
    225		 * Since DDP (Large Rx offload) was setup for this request,
    226		 * payload is expected to be copied directly to user buffers.
    227		 */
    228		buf = fc_frame_payload_get(fp, 1);
    229		if (buf)
    230			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
    231				"cmd->sg_cnt 0x%x. DDP was setup"
    232				" hence not expected to receive frame with "
    233				"payload, Frame will be dropped if"
    234				"'Sequence Initiative' bit in f_ctl is"
    235				"not set\n", __func__, ep->xid, f_ctl,
    236				se_cmd->t_data_sg, se_cmd->t_data_nents);
    237		/*
    238		 * Invalidate HW DDP context if it was setup for respective
    239		 * command. Invalidation of HW DDP context is requited in both
    240		 * situation (success and error).
    241		 */
    242		ft_invl_hw_context(cmd);
    243
    244		/*
    245		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
    246		 * write data frame is received successfully where payload is
    247		 * posted directly to user buffer and only the last frame's
    248		 * header is posted in receive queue.
    249		 *
    250		 * If "Sequence Initiative (TSI)" bit is not set, means error
    251		 * condition w.r.t. DDP, hence drop the packet and let explict
    252		 * ABORTS from other end of exchange timer trigger the recovery.
    253		 */
    254		if (f_ctl & FC_FC_SEQ_INIT)
    255			goto last_frame;
    256		else
    257			goto drop;
    258	}
    259
    260	rel_off = ntohl(fh->fh_parm_offset);
    261	frame_len = fr_len(fp);
    262	if (frame_len <= sizeof(*fh))
    263		goto drop;
    264	frame_len -= sizeof(*fh);
    265	from = fc_frame_payload_get(fp, 0);
    266	if (rel_off >= se_cmd->data_length)
    267		goto drop;
    268	if (frame_len + rel_off > se_cmd->data_length)
    269		frame_len = se_cmd->data_length - rel_off;
    270
    271	/*
    272	 * Setup to use first mem list entry, unless no data.
    273	 */
    274	BUG_ON(frame_len && !se_cmd->t_data_sg);
    275	if (frame_len) {
    276		sg = se_cmd->t_data_sg;
    277		mem_len = sg->length;
    278		mem_off = sg->offset;
    279		page = sg_page(sg);
    280	}
    281
    282	while (frame_len) {
    283		if (!mem_len) {
    284			sg = sg_next(sg);
    285			mem_len = sg->length;
    286			mem_off = sg->offset;
    287			page = sg_page(sg);
    288		}
    289		if (rel_off >= mem_len) {
    290			rel_off -= mem_len;
    291			mem_len = 0;
    292			continue;
    293		}
    294		mem_off += rel_off;
    295		mem_len -= rel_off;
    296		rel_off = 0;
    297
    298		tlen = min(mem_len, frame_len);
    299
    300		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
    301		page_addr = to;
    302		to += offset_in_page(mem_off);
    303		tlen = min(tlen, (size_t)(PAGE_SIZE -
    304					  offset_in_page(mem_off)));
    305		memcpy(to, from, tlen);
    306		kunmap_atomic(page_addr);
    307
    308		from += tlen;
    309		frame_len -= tlen;
    310		mem_off += tlen;
    311		mem_len -= tlen;
    312		cmd->write_data_len += tlen;
    313	}
    314last_frame:
    315	if (cmd->write_data_len == se_cmd->data_length) {
    316		INIT_WORK(&cmd->work, ft_execute_work);
    317		queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
    318	}
    319drop:
    320	fc_frame_free(fp);
    321}
    322
    323/*
    324 * Handle and cleanup any HW specific resources if
    325 * received ABORTS, errors, timeouts.
    326 */
    327void ft_invl_hw_context(struct ft_cmd *cmd)
    328{
    329	struct fc_seq *seq;
    330	struct fc_exch *ep = NULL;
    331	struct fc_lport *lport = NULL;
    332
    333	BUG_ON(!cmd);
    334	seq = cmd->seq;
    335
    336	/* Cleanup the DDP context in HW if DDP was setup */
    337	if (cmd->was_ddp_setup && seq) {
    338		ep = fc_seq_exch(seq);
    339		if (ep) {
    340			lport = ep->lp;
    341			if (lport && (ep->xid <= lport->lro_xid)) {
    342				/*
    343				 * "ddp_done" trigger invalidation of HW
    344				 * specific DDP context
    345				 */
    346				cmd->write_data_len = lport->tt.ddp_done(lport,
    347								      ep->xid);
    348
    349				/*
    350				 * Resetting same variable to indicate HW's
    351				 * DDP context has been invalidated to avoid
    352				 * re_invalidation of same context (context is
    353				 * identified using ep->xid)
    354				 */
    355				cmd->was_ddp_setup = 0;
    356			}
    357		}
    358	}
    359}