cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tcp.c (46014B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * NVMe over Fabrics TCP target.
      4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
      5 */
      6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      7#include <linux/module.h>
      8#include <linux/init.h>
      9#include <linux/slab.h>
     10#include <linux/err.h>
     11#include <linux/nvme-tcp.h>
     12#include <net/sock.h>
     13#include <net/tcp.h>
     14#include <linux/inet.h>
     15#include <linux/llist.h>
     16#include <crypto/hash.h>
     17
     18#include "nvmet.h"
     19
     20#define NVMET_TCP_DEF_INLINE_DATA_SIZE	(4 * PAGE_SIZE)
     21
     22/* Define the socket priority to use for connections were it is desirable
     23 * that the NIC consider performing optimized packet processing or filtering.
     24 * A non-zero value being sufficient to indicate general consideration of any
     25 * possible optimization.  Making it a module param allows for alternative
     26 * values that may be unique for some NIC implementations.
     27 */
     28static int so_priority;
     29module_param(so_priority, int, 0644);
     30MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
     31
     32/* Define a time period (in usecs) that io_work() shall sample an activated
     33 * queue before determining it to be idle.  This optional module behavior
     34 * can enable NIC solutions that support socket optimized packet processing
     35 * using advanced interrupt moderation techniques.
     36 */
     37static int idle_poll_period_usecs;
     38module_param(idle_poll_period_usecs, int, 0644);
     39MODULE_PARM_DESC(idle_poll_period_usecs,
     40		"nvmet tcp io_work poll till idle time period in usecs");
     41
     42#define NVMET_TCP_RECV_BUDGET		8
     43#define NVMET_TCP_SEND_BUDGET		8
     44#define NVMET_TCP_IO_WORK_BUDGET	64
     45
     46enum nvmet_tcp_send_state {
     47	NVMET_TCP_SEND_DATA_PDU,
     48	NVMET_TCP_SEND_DATA,
     49	NVMET_TCP_SEND_R2T,
     50	NVMET_TCP_SEND_DDGST,
     51	NVMET_TCP_SEND_RESPONSE
     52};
     53
     54enum nvmet_tcp_recv_state {
     55	NVMET_TCP_RECV_PDU,
     56	NVMET_TCP_RECV_DATA,
     57	NVMET_TCP_RECV_DDGST,
     58	NVMET_TCP_RECV_ERR,
     59};
     60
     61enum {
     62	NVMET_TCP_F_INIT_FAILED = (1 << 0),
     63};
     64
     65struct nvmet_tcp_cmd {
     66	struct nvmet_tcp_queue		*queue;
     67	struct nvmet_req		req;
     68
     69	struct nvme_tcp_cmd_pdu		*cmd_pdu;
     70	struct nvme_tcp_rsp_pdu		*rsp_pdu;
     71	struct nvme_tcp_data_pdu	*data_pdu;
     72	struct nvme_tcp_r2t_pdu		*r2t_pdu;
     73
     74	u32				rbytes_done;
     75	u32				wbytes_done;
     76
     77	u32				pdu_len;
     78	u32				pdu_recv;
     79	int				sg_idx;
     80	int				nr_mapped;
     81	struct msghdr			recv_msg;
     82	struct kvec			*iov;
     83	u32				flags;
     84
     85	struct list_head		entry;
     86	struct llist_node		lentry;
     87
     88	/* send state */
     89	u32				offset;
     90	struct scatterlist		*cur_sg;
     91	enum nvmet_tcp_send_state	state;
     92
     93	__le32				exp_ddgst;
     94	__le32				recv_ddgst;
     95};
     96
     97enum nvmet_tcp_queue_state {
     98	NVMET_TCP_Q_CONNECTING,
     99	NVMET_TCP_Q_LIVE,
    100	NVMET_TCP_Q_DISCONNECTING,
    101};
    102
    103struct nvmet_tcp_queue {
    104	struct socket		*sock;
    105	struct nvmet_tcp_port	*port;
    106	struct work_struct	io_work;
    107	struct nvmet_cq		nvme_cq;
    108	struct nvmet_sq		nvme_sq;
    109
    110	/* send state */
    111	struct nvmet_tcp_cmd	*cmds;
    112	unsigned int		nr_cmds;
    113	struct list_head	free_list;
    114	struct llist_head	resp_list;
    115	struct list_head	resp_send_list;
    116	int			send_list_len;
    117	struct nvmet_tcp_cmd	*snd_cmd;
    118
    119	/* recv state */
    120	int			offset;
    121	int			left;
    122	enum nvmet_tcp_recv_state rcv_state;
    123	struct nvmet_tcp_cmd	*cmd;
    124	union nvme_tcp_pdu	pdu;
    125
    126	/* digest state */
    127	bool			hdr_digest;
    128	bool			data_digest;
    129	struct ahash_request	*snd_hash;
    130	struct ahash_request	*rcv_hash;
    131
    132	unsigned long           poll_end;
    133
    134	spinlock_t		state_lock;
    135	enum nvmet_tcp_queue_state state;
    136
    137	struct sockaddr_storage	sockaddr;
    138	struct sockaddr_storage	sockaddr_peer;
    139	struct work_struct	release_work;
    140
    141	int			idx;
    142	struct list_head	queue_list;
    143
    144	struct nvmet_tcp_cmd	connect;
    145
    146	struct page_frag_cache	pf_cache;
    147
    148	void (*data_ready)(struct sock *);
    149	void (*state_change)(struct sock *);
    150	void (*write_space)(struct sock *);
    151};
    152
    153struct nvmet_tcp_port {
    154	struct socket		*sock;
    155	struct work_struct	accept_work;
    156	struct nvmet_port	*nport;
    157	struct sockaddr_storage addr;
    158	void (*data_ready)(struct sock *);
    159};
    160
    161static DEFINE_IDA(nvmet_tcp_queue_ida);
    162static LIST_HEAD(nvmet_tcp_queue_list);
    163static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
    164
    165static struct workqueue_struct *nvmet_tcp_wq;
    166static const struct nvmet_fabrics_ops nvmet_tcp_ops;
    167static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
    168static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
    169static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
    170static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
    171
    172static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
    173		struct nvmet_tcp_cmd *cmd)
    174{
    175	if (unlikely(!queue->nr_cmds)) {
    176		/* We didn't allocate cmds yet, send 0xffff */
    177		return USHRT_MAX;
    178	}
    179
    180	return cmd - queue->cmds;
    181}
    182
    183static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
    184{
    185	return nvme_is_write(cmd->req.cmd) &&
    186		cmd->rbytes_done < cmd->req.transfer_len;
    187}
    188
    189static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
    190{
    191	return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
    192}
    193
    194static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
    195{
    196	return !nvme_is_write(cmd->req.cmd) &&
    197		cmd->req.transfer_len > 0 &&
    198		!cmd->req.cqe->status;
    199}
    200
    201static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
    202{
    203	return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
    204		!cmd->rbytes_done;
    205}
    206
    207static inline struct nvmet_tcp_cmd *
    208nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
    209{
    210	struct nvmet_tcp_cmd *cmd;
    211
    212	cmd = list_first_entry_or_null(&queue->free_list,
    213				struct nvmet_tcp_cmd, entry);
    214	if (!cmd)
    215		return NULL;
    216	list_del_init(&cmd->entry);
    217
    218	cmd->rbytes_done = cmd->wbytes_done = 0;
    219	cmd->pdu_len = 0;
    220	cmd->pdu_recv = 0;
    221	cmd->iov = NULL;
    222	cmd->flags = 0;
    223	return cmd;
    224}
    225
    226static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
    227{
    228	if (unlikely(cmd == &cmd->queue->connect))
    229		return;
    230
    231	list_add_tail(&cmd->entry, &cmd->queue->free_list);
    232}
    233
    234static inline int queue_cpu(struct nvmet_tcp_queue *queue)
    235{
    236	return queue->sock->sk->sk_incoming_cpu;
    237}
    238
    239static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
    240{
    241	return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
    242}
    243
    244static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
    245{
    246	return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
    247}
    248
    249static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
    250		void *pdu, size_t len)
    251{
    252	struct scatterlist sg;
    253
    254	sg_init_one(&sg, pdu, len);
    255	ahash_request_set_crypt(hash, &sg, pdu + len, len);
    256	crypto_ahash_digest(hash);
    257}
    258
    259static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
    260	void *pdu, size_t len)
    261{
    262	struct nvme_tcp_hdr *hdr = pdu;
    263	__le32 recv_digest;
    264	__le32 exp_digest;
    265
    266	if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
    267		pr_err("queue %d: header digest enabled but no header digest\n",
    268			queue->idx);
    269		return -EPROTO;
    270	}
    271
    272	recv_digest = *(__le32 *)(pdu + hdr->hlen);
    273	nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
    274	exp_digest = *(__le32 *)(pdu + hdr->hlen);
    275	if (recv_digest != exp_digest) {
    276		pr_err("queue %d: header digest error: recv %#x expected %#x\n",
    277			queue->idx, le32_to_cpu(recv_digest),
    278			le32_to_cpu(exp_digest));
    279		return -EPROTO;
    280	}
    281
    282	return 0;
    283}
    284
    285static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
    286{
    287	struct nvme_tcp_hdr *hdr = pdu;
    288	u8 digest_len = nvmet_tcp_hdgst_len(queue);
    289	u32 len;
    290
    291	len = le32_to_cpu(hdr->plen) - hdr->hlen -
    292		(hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
    293
    294	if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
    295		pr_err("queue %d: data digest flag is cleared\n", queue->idx);
    296		return -EPROTO;
    297	}
    298
    299	return 0;
    300}
    301
    302static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
    303{
    304	WARN_ON(unlikely(cmd->nr_mapped > 0));
    305
    306	kfree(cmd->iov);
    307	sgl_free(cmd->req.sg);
    308	cmd->iov = NULL;
    309	cmd->req.sg = NULL;
    310}
    311
    312static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
    313{
    314	struct scatterlist *sg;
    315	int i;
    316
    317	sg = &cmd->req.sg[cmd->sg_idx];
    318
    319	for (i = 0; i < cmd->nr_mapped; i++)
    320		kunmap(sg_page(&sg[i]));
    321
    322	cmd->nr_mapped = 0;
    323}
    324
    325static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
    326{
    327	struct kvec *iov = cmd->iov;
    328	struct scatterlist *sg;
    329	u32 length, offset, sg_offset;
    330
    331	length = cmd->pdu_len;
    332	cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
    333	offset = cmd->rbytes_done;
    334	cmd->sg_idx = offset / PAGE_SIZE;
    335	sg_offset = offset % PAGE_SIZE;
    336	sg = &cmd->req.sg[cmd->sg_idx];
    337
    338	while (length) {
    339		u32 iov_len = min_t(u32, length, sg->length - sg_offset);
    340
    341		iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
    342		iov->iov_len = iov_len;
    343
    344		length -= iov_len;
    345		sg = sg_next(sg);
    346		iov++;
    347		sg_offset = 0;
    348	}
    349
    350	iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
    351		cmd->nr_mapped, cmd->pdu_len);
    352}
    353
    354static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
    355{
    356	queue->rcv_state = NVMET_TCP_RECV_ERR;
    357	if (queue->nvme_sq.ctrl)
    358		nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
    359	else
    360		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
    361}
    362
    363static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
    364{
    365	if (status == -EPIPE || status == -ECONNRESET)
    366		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
    367	else
    368		nvmet_tcp_fatal_error(queue);
    369}
    370
    371static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
    372{
    373	struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
    374	u32 len = le32_to_cpu(sgl->length);
    375
    376	if (!len)
    377		return 0;
    378
    379	if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
    380			  NVME_SGL_FMT_OFFSET)) {
    381		if (!nvme_is_write(cmd->req.cmd))
    382			return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
    383
    384		if (len > cmd->req.port->inline_data_size)
    385			return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
    386		cmd->pdu_len = len;
    387	}
    388	cmd->req.transfer_len += len;
    389
    390	cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
    391	if (!cmd->req.sg)
    392		return NVME_SC_INTERNAL;
    393	cmd->cur_sg = cmd->req.sg;
    394
    395	if (nvmet_tcp_has_data_in(cmd)) {
    396		cmd->iov = kmalloc_array(cmd->req.sg_cnt,
    397				sizeof(*cmd->iov), GFP_KERNEL);
    398		if (!cmd->iov)
    399			goto err;
    400	}
    401
    402	return 0;
    403err:
    404	nvmet_tcp_free_cmd_buffers(cmd);
    405	return NVME_SC_INTERNAL;
    406}
    407
    408static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
    409		struct nvmet_tcp_cmd *cmd)
    410{
    411	ahash_request_set_crypt(hash, cmd->req.sg,
    412		(void *)&cmd->exp_ddgst, cmd->req.transfer_len);
    413	crypto_ahash_digest(hash);
    414}
    415
    416static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
    417{
    418	struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
    419	struct nvmet_tcp_queue *queue = cmd->queue;
    420	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    421	u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
    422
    423	cmd->offset = 0;
    424	cmd->state = NVMET_TCP_SEND_DATA_PDU;
    425
    426	pdu->hdr.type = nvme_tcp_c2h_data;
    427	pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
    428						NVME_TCP_F_DATA_SUCCESS : 0);
    429	pdu->hdr.hlen = sizeof(*pdu);
    430	pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
    431	pdu->hdr.plen =
    432		cpu_to_le32(pdu->hdr.hlen + hdgst +
    433				cmd->req.transfer_len + ddgst);
    434	pdu->command_id = cmd->req.cqe->command_id;
    435	pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
    436	pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
    437
    438	if (queue->data_digest) {
    439		pdu->hdr.flags |= NVME_TCP_F_DDGST;
    440		nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
    441	}
    442
    443	if (cmd->queue->hdr_digest) {
    444		pdu->hdr.flags |= NVME_TCP_F_HDGST;
    445		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
    446	}
    447}
    448
    449static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
    450{
    451	struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
    452	struct nvmet_tcp_queue *queue = cmd->queue;
    453	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    454
    455	cmd->offset = 0;
    456	cmd->state = NVMET_TCP_SEND_R2T;
    457
    458	pdu->hdr.type = nvme_tcp_r2t;
    459	pdu->hdr.flags = 0;
    460	pdu->hdr.hlen = sizeof(*pdu);
    461	pdu->hdr.pdo = 0;
    462	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
    463
    464	pdu->command_id = cmd->req.cmd->common.command_id;
    465	pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
    466	pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
    467	pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
    468	if (cmd->queue->hdr_digest) {
    469		pdu->hdr.flags |= NVME_TCP_F_HDGST;
    470		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
    471	}
    472}
    473
    474static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
    475{
    476	struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
    477	struct nvmet_tcp_queue *queue = cmd->queue;
    478	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    479
    480	cmd->offset = 0;
    481	cmd->state = NVMET_TCP_SEND_RESPONSE;
    482
    483	pdu->hdr.type = nvme_tcp_rsp;
    484	pdu->hdr.flags = 0;
    485	pdu->hdr.hlen = sizeof(*pdu);
    486	pdu->hdr.pdo = 0;
    487	pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
    488	if (cmd->queue->hdr_digest) {
    489		pdu->hdr.flags |= NVME_TCP_F_HDGST;
    490		nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
    491	}
    492}
    493
    494static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
    495{
    496	struct llist_node *node;
    497	struct nvmet_tcp_cmd *cmd;
    498
    499	for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
    500		cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
    501		list_add(&cmd->entry, &queue->resp_send_list);
    502		queue->send_list_len++;
    503	}
    504}
    505
    506static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
    507{
    508	queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
    509				struct nvmet_tcp_cmd, entry);
    510	if (!queue->snd_cmd) {
    511		nvmet_tcp_process_resp_list(queue);
    512		queue->snd_cmd =
    513			list_first_entry_or_null(&queue->resp_send_list,
    514					struct nvmet_tcp_cmd, entry);
    515		if (unlikely(!queue->snd_cmd))
    516			return NULL;
    517	}
    518
    519	list_del_init(&queue->snd_cmd->entry);
    520	queue->send_list_len--;
    521
    522	if (nvmet_tcp_need_data_out(queue->snd_cmd))
    523		nvmet_setup_c2h_data_pdu(queue->snd_cmd);
    524	else if (nvmet_tcp_need_data_in(queue->snd_cmd))
    525		nvmet_setup_r2t_pdu(queue->snd_cmd);
    526	else
    527		nvmet_setup_response_pdu(queue->snd_cmd);
    528
    529	return queue->snd_cmd;
    530}
    531
    532static void nvmet_tcp_queue_response(struct nvmet_req *req)
    533{
    534	struct nvmet_tcp_cmd *cmd =
    535		container_of(req, struct nvmet_tcp_cmd, req);
    536	struct nvmet_tcp_queue	*queue = cmd->queue;
    537	struct nvme_sgl_desc *sgl;
    538	u32 len;
    539
    540	if (unlikely(cmd == queue->cmd)) {
    541		sgl = &cmd->req.cmd->common.dptr.sgl;
    542		len = le32_to_cpu(sgl->length);
    543
    544		/*
    545		 * Wait for inline data before processing the response.
    546		 * Avoid using helpers, this might happen before
    547		 * nvmet_req_init is completed.
    548		 */
    549		if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
    550		    len && len <= cmd->req.port->inline_data_size &&
    551		    nvme_is_write(cmd->req.cmd))
    552			return;
    553	}
    554
    555	llist_add(&cmd->lentry, &queue->resp_list);
    556	queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
    557}
    558
    559static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
    560{
    561	if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
    562		nvmet_tcp_queue_response(&cmd->req);
    563	else
    564		cmd->req.execute(&cmd->req);
    565}
    566
    567static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
    568{
    569	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    570	int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
    571	int ret;
    572
    573	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
    574			offset_in_page(cmd->data_pdu) + cmd->offset,
    575			left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
    576	if (ret <= 0)
    577		return ret;
    578
    579	cmd->offset += ret;
    580	left -= ret;
    581
    582	if (left)
    583		return -EAGAIN;
    584
    585	cmd->state = NVMET_TCP_SEND_DATA;
    586	cmd->offset  = 0;
    587	return 1;
    588}
    589
    590static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
    591{
    592	struct nvmet_tcp_queue *queue = cmd->queue;
    593	int ret;
    594
    595	while (cmd->cur_sg) {
    596		struct page *page = sg_page(cmd->cur_sg);
    597		u32 left = cmd->cur_sg->length - cmd->offset;
    598		int flags = MSG_DONTWAIT;
    599
    600		if ((!last_in_batch && cmd->queue->send_list_len) ||
    601		    cmd->wbytes_done + left < cmd->req.transfer_len ||
    602		    queue->data_digest || !queue->nvme_sq.sqhd_disabled)
    603			flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
    604
    605		ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
    606					left, flags);
    607		if (ret <= 0)
    608			return ret;
    609
    610		cmd->offset += ret;
    611		cmd->wbytes_done += ret;
    612
    613		/* Done with sg?*/
    614		if (cmd->offset == cmd->cur_sg->length) {
    615			cmd->cur_sg = sg_next(cmd->cur_sg);
    616			cmd->offset = 0;
    617		}
    618	}
    619
    620	if (queue->data_digest) {
    621		cmd->state = NVMET_TCP_SEND_DDGST;
    622		cmd->offset = 0;
    623	} else {
    624		if (queue->nvme_sq.sqhd_disabled) {
    625			cmd->queue->snd_cmd = NULL;
    626			nvmet_tcp_put_cmd(cmd);
    627		} else {
    628			nvmet_setup_response_pdu(cmd);
    629		}
    630	}
    631
    632	if (queue->nvme_sq.sqhd_disabled)
    633		nvmet_tcp_free_cmd_buffers(cmd);
    634
    635	return 1;
    636
    637}
    638
    639static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
    640		bool last_in_batch)
    641{
    642	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    643	int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
    644	int flags = MSG_DONTWAIT;
    645	int ret;
    646
    647	if (!last_in_batch && cmd->queue->send_list_len)
    648		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
    649	else
    650		flags |= MSG_EOR;
    651
    652	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
    653		offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
    654	if (ret <= 0)
    655		return ret;
    656	cmd->offset += ret;
    657	left -= ret;
    658
    659	if (left)
    660		return -EAGAIN;
    661
    662	nvmet_tcp_free_cmd_buffers(cmd);
    663	cmd->queue->snd_cmd = NULL;
    664	nvmet_tcp_put_cmd(cmd);
    665	return 1;
    666}
    667
    668static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
    669{
    670	u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
    671	int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
    672	int flags = MSG_DONTWAIT;
    673	int ret;
    674
    675	if (!last_in_batch && cmd->queue->send_list_len)
    676		flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
    677	else
    678		flags |= MSG_EOR;
    679
    680	ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
    681		offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
    682	if (ret <= 0)
    683		return ret;
    684	cmd->offset += ret;
    685	left -= ret;
    686
    687	if (left)
    688		return -EAGAIN;
    689
    690	cmd->queue->snd_cmd = NULL;
    691	return 1;
    692}
    693
    694static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
    695{
    696	struct nvmet_tcp_queue *queue = cmd->queue;
    697	int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
    698	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
    699	struct kvec iov = {
    700		.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
    701		.iov_len = left
    702	};
    703	int ret;
    704
    705	if (!last_in_batch && cmd->queue->send_list_len)
    706		msg.msg_flags |= MSG_MORE;
    707	else
    708		msg.msg_flags |= MSG_EOR;
    709
    710	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
    711	if (unlikely(ret <= 0))
    712		return ret;
    713
    714	cmd->offset += ret;
    715	left -= ret;
    716
    717	if (left)
    718		return -EAGAIN;
    719
    720	if (queue->nvme_sq.sqhd_disabled) {
    721		cmd->queue->snd_cmd = NULL;
    722		nvmet_tcp_put_cmd(cmd);
    723	} else {
    724		nvmet_setup_response_pdu(cmd);
    725	}
    726	return 1;
    727}
    728
    729static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
    730		bool last_in_batch)
    731{
    732	struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
    733	int ret = 0;
    734
    735	if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
    736		cmd = nvmet_tcp_fetch_cmd(queue);
    737		if (unlikely(!cmd))
    738			return 0;
    739	}
    740
    741	if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
    742		ret = nvmet_try_send_data_pdu(cmd);
    743		if (ret <= 0)
    744			goto done_send;
    745	}
    746
    747	if (cmd->state == NVMET_TCP_SEND_DATA) {
    748		ret = nvmet_try_send_data(cmd, last_in_batch);
    749		if (ret <= 0)
    750			goto done_send;
    751	}
    752
    753	if (cmd->state == NVMET_TCP_SEND_DDGST) {
    754		ret = nvmet_try_send_ddgst(cmd, last_in_batch);
    755		if (ret <= 0)
    756			goto done_send;
    757	}
    758
    759	if (cmd->state == NVMET_TCP_SEND_R2T) {
    760		ret = nvmet_try_send_r2t(cmd, last_in_batch);
    761		if (ret <= 0)
    762			goto done_send;
    763	}
    764
    765	if (cmd->state == NVMET_TCP_SEND_RESPONSE)
    766		ret = nvmet_try_send_response(cmd, last_in_batch);
    767
    768done_send:
    769	if (ret < 0) {
    770		if (ret == -EAGAIN)
    771			return 0;
    772		return ret;
    773	}
    774
    775	return 1;
    776}
    777
    778static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
    779		int budget, int *sends)
    780{
    781	int i, ret = 0;
    782
    783	for (i = 0; i < budget; i++) {
    784		ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
    785		if (unlikely(ret < 0)) {
    786			nvmet_tcp_socket_error(queue, ret);
    787			goto done;
    788		} else if (ret == 0) {
    789			break;
    790		}
    791		(*sends)++;
    792	}
    793done:
    794	return ret;
    795}
    796
    797static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
    798{
    799	queue->offset = 0;
    800	queue->left = sizeof(struct nvme_tcp_hdr);
    801	queue->cmd = NULL;
    802	queue->rcv_state = NVMET_TCP_RECV_PDU;
    803}
    804
    805static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
    806{
    807	struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
    808
    809	ahash_request_free(queue->rcv_hash);
    810	ahash_request_free(queue->snd_hash);
    811	crypto_free_ahash(tfm);
    812}
    813
    814static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
    815{
    816	struct crypto_ahash *tfm;
    817
    818	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
    819	if (IS_ERR(tfm))
    820		return PTR_ERR(tfm);
    821
    822	queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
    823	if (!queue->snd_hash)
    824		goto free_tfm;
    825	ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
    826
    827	queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
    828	if (!queue->rcv_hash)
    829		goto free_snd_hash;
    830	ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
    831
    832	return 0;
    833free_snd_hash:
    834	ahash_request_free(queue->snd_hash);
    835free_tfm:
    836	crypto_free_ahash(tfm);
    837	return -ENOMEM;
    838}
    839
    840
    841static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
    842{
    843	struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
    844	struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
    845	struct msghdr msg = {};
    846	struct kvec iov;
    847	int ret;
    848
    849	if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
    850		pr_err("bad nvme-tcp pdu length (%d)\n",
    851			le32_to_cpu(icreq->hdr.plen));
    852		nvmet_tcp_fatal_error(queue);
    853	}
    854
    855	if (icreq->pfv != NVME_TCP_PFV_1_0) {
    856		pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
    857		return -EPROTO;
    858	}
    859
    860	if (icreq->hpda != 0) {
    861		pr_err("queue %d: unsupported hpda %d\n", queue->idx,
    862			icreq->hpda);
    863		return -EPROTO;
    864	}
    865
    866	queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
    867	queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
    868	if (queue->hdr_digest || queue->data_digest) {
    869		ret = nvmet_tcp_alloc_crypto(queue);
    870		if (ret)
    871			return ret;
    872	}
    873
    874	memset(icresp, 0, sizeof(*icresp));
    875	icresp->hdr.type = nvme_tcp_icresp;
    876	icresp->hdr.hlen = sizeof(*icresp);
    877	icresp->hdr.pdo = 0;
    878	icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
    879	icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
    880	icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
    881	icresp->cpda = 0;
    882	if (queue->hdr_digest)
    883		icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
    884	if (queue->data_digest)
    885		icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
    886
    887	iov.iov_base = icresp;
    888	iov.iov_len = sizeof(*icresp);
    889	ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
    890	if (ret < 0)
    891		goto free_crypto;
    892
    893	queue->state = NVMET_TCP_Q_LIVE;
    894	nvmet_prepare_receive_pdu(queue);
    895	return 0;
    896free_crypto:
    897	if (queue->hdr_digest || queue->data_digest)
    898		nvmet_tcp_free_crypto(queue);
    899	return ret;
    900}
    901
    902static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
    903		struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
    904{
    905	size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
    906	int ret;
    907
    908	/*
    909	 * This command has not been processed yet, hence we are trying to
    910	 * figure out if there is still pending data left to receive. If
    911	 * we don't, we can simply prepare for the next pdu and bail out,
    912	 * otherwise we will need to prepare a buffer and receive the
    913	 * stale data before continuing forward.
    914	 */
    915	if (!nvme_is_write(cmd->req.cmd) || !data_len ||
    916	    data_len > cmd->req.port->inline_data_size) {
    917		nvmet_prepare_receive_pdu(queue);
    918		return;
    919	}
    920
    921	ret = nvmet_tcp_map_data(cmd);
    922	if (unlikely(ret)) {
    923		pr_err("queue %d: failed to map data\n", queue->idx);
    924		nvmet_tcp_fatal_error(queue);
    925		return;
    926	}
    927
    928	queue->rcv_state = NVMET_TCP_RECV_DATA;
    929	nvmet_tcp_map_pdu_iovec(cmd);
    930	cmd->flags |= NVMET_TCP_F_INIT_FAILED;
    931}
    932
    933static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
    934{
    935	struct nvme_tcp_data_pdu *data = &queue->pdu.data;
    936	struct nvmet_tcp_cmd *cmd;
    937
    938	if (likely(queue->nr_cmds))
    939		cmd = &queue->cmds[data->ttag];
    940	else
    941		cmd = &queue->connect;
    942
    943	if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
    944		pr_err("ttag %u unexpected data offset %u (expected %u)\n",
    945			data->ttag, le32_to_cpu(data->data_offset),
    946			cmd->rbytes_done);
    947		/* FIXME: use path and transport errors */
    948		nvmet_req_complete(&cmd->req,
    949			NVME_SC_INVALID_FIELD | NVME_SC_DNR);
    950		return -EPROTO;
    951	}
    952
    953	cmd->pdu_len = le32_to_cpu(data->data_length);
    954	cmd->pdu_recv = 0;
    955	nvmet_tcp_map_pdu_iovec(cmd);
    956	queue->cmd = cmd;
    957	queue->rcv_state = NVMET_TCP_RECV_DATA;
    958
    959	return 0;
    960}
    961
    962static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
    963{
    964	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
    965	struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
    966	struct nvmet_req *req;
    967	int ret;
    968
    969	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
    970		if (hdr->type != nvme_tcp_icreq) {
    971			pr_err("unexpected pdu type (%d) before icreq\n",
    972				hdr->type);
    973			nvmet_tcp_fatal_error(queue);
    974			return -EPROTO;
    975		}
    976		return nvmet_tcp_handle_icreq(queue);
    977	}
    978
    979	if (hdr->type == nvme_tcp_h2c_data) {
    980		ret = nvmet_tcp_handle_h2c_data_pdu(queue);
    981		if (unlikely(ret))
    982			return ret;
    983		return 0;
    984	}
    985
    986	queue->cmd = nvmet_tcp_get_cmd(queue);
    987	if (unlikely(!queue->cmd)) {
    988		/* This should never happen */
    989		pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
    990			queue->idx, queue->nr_cmds, queue->send_list_len,
    991			nvme_cmd->common.opcode);
    992		nvmet_tcp_fatal_error(queue);
    993		return -ENOMEM;
    994	}
    995
    996	req = &queue->cmd->req;
    997	memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
    998
    999	if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
   1000			&queue->nvme_sq, &nvmet_tcp_ops))) {
   1001		pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
   1002			req->cmd, req->cmd->common.command_id,
   1003			req->cmd->common.opcode,
   1004			le32_to_cpu(req->cmd->common.dptr.sgl.length));
   1005
   1006		nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
   1007		return 0;
   1008	}
   1009
   1010	ret = nvmet_tcp_map_data(queue->cmd);
   1011	if (unlikely(ret)) {
   1012		pr_err("queue %d: failed to map data\n", queue->idx);
   1013		if (nvmet_tcp_has_inline_data(queue->cmd))
   1014			nvmet_tcp_fatal_error(queue);
   1015		else
   1016			nvmet_req_complete(req, ret);
   1017		ret = -EAGAIN;
   1018		goto out;
   1019	}
   1020
   1021	if (nvmet_tcp_need_data_in(queue->cmd)) {
   1022		if (nvmet_tcp_has_inline_data(queue->cmd)) {
   1023			queue->rcv_state = NVMET_TCP_RECV_DATA;
   1024			nvmet_tcp_map_pdu_iovec(queue->cmd);
   1025			return 0;
   1026		}
   1027		/* send back R2T */
   1028		nvmet_tcp_queue_response(&queue->cmd->req);
   1029		goto out;
   1030	}
   1031
   1032	queue->cmd->req.execute(&queue->cmd->req);
   1033out:
   1034	nvmet_prepare_receive_pdu(queue);
   1035	return ret;
   1036}
   1037
   1038static const u8 nvme_tcp_pdu_sizes[] = {
   1039	[nvme_tcp_icreq]	= sizeof(struct nvme_tcp_icreq_pdu),
   1040	[nvme_tcp_cmd]		= sizeof(struct nvme_tcp_cmd_pdu),
   1041	[nvme_tcp_h2c_data]	= sizeof(struct nvme_tcp_data_pdu),
   1042};
   1043
   1044static inline u8 nvmet_tcp_pdu_size(u8 type)
   1045{
   1046	size_t idx = type;
   1047
   1048	return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
   1049		nvme_tcp_pdu_sizes[idx]) ?
   1050			nvme_tcp_pdu_sizes[idx] : 0;
   1051}
   1052
   1053static inline bool nvmet_tcp_pdu_valid(u8 type)
   1054{
   1055	switch (type) {
   1056	case nvme_tcp_icreq:
   1057	case nvme_tcp_cmd:
   1058	case nvme_tcp_h2c_data:
   1059		/* fallthru */
   1060		return true;
   1061	}
   1062
   1063	return false;
   1064}
   1065
   1066static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
   1067{
   1068	struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
   1069	int len;
   1070	struct kvec iov;
   1071	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
   1072
   1073recv:
   1074	iov.iov_base = (void *)&queue->pdu + queue->offset;
   1075	iov.iov_len = queue->left;
   1076	len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
   1077			iov.iov_len, msg.msg_flags);
   1078	if (unlikely(len < 0))
   1079		return len;
   1080
   1081	queue->offset += len;
   1082	queue->left -= len;
   1083	if (queue->left)
   1084		return -EAGAIN;
   1085
   1086	if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
   1087		u8 hdgst = nvmet_tcp_hdgst_len(queue);
   1088
   1089		if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
   1090			pr_err("unexpected pdu type %d\n", hdr->type);
   1091			nvmet_tcp_fatal_error(queue);
   1092			return -EIO;
   1093		}
   1094
   1095		if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
   1096			pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
   1097			return -EIO;
   1098		}
   1099
   1100		queue->left = hdr->hlen - queue->offset + hdgst;
   1101		goto recv;
   1102	}
   1103
   1104	if (queue->hdr_digest &&
   1105	    nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
   1106		nvmet_tcp_fatal_error(queue); /* fatal */
   1107		return -EPROTO;
   1108	}
   1109
   1110	if (queue->data_digest &&
   1111	    nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
   1112		nvmet_tcp_fatal_error(queue); /* fatal */
   1113		return -EPROTO;
   1114	}
   1115
   1116	return nvmet_tcp_done_recv_pdu(queue);
   1117}
   1118
   1119static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
   1120{
   1121	struct nvmet_tcp_queue *queue = cmd->queue;
   1122
   1123	nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
   1124	queue->offset = 0;
   1125	queue->left = NVME_TCP_DIGEST_LENGTH;
   1126	queue->rcv_state = NVMET_TCP_RECV_DDGST;
   1127}
   1128
   1129static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
   1130{
   1131	struct nvmet_tcp_cmd  *cmd = queue->cmd;
   1132	int ret;
   1133
   1134	while (msg_data_left(&cmd->recv_msg)) {
   1135		ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
   1136			cmd->recv_msg.msg_flags);
   1137		if (ret <= 0)
   1138			return ret;
   1139
   1140		cmd->pdu_recv += ret;
   1141		cmd->rbytes_done += ret;
   1142	}
   1143
   1144	nvmet_tcp_unmap_pdu_iovec(cmd);
   1145	if (queue->data_digest) {
   1146		nvmet_tcp_prep_recv_ddgst(cmd);
   1147		return 0;
   1148	}
   1149
   1150	if (cmd->rbytes_done == cmd->req.transfer_len)
   1151		nvmet_tcp_execute_request(cmd);
   1152
   1153	nvmet_prepare_receive_pdu(queue);
   1154	return 0;
   1155}
   1156
   1157static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
   1158{
   1159	struct nvmet_tcp_cmd *cmd = queue->cmd;
   1160	int ret;
   1161	struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
   1162	struct kvec iov = {
   1163		.iov_base = (void *)&cmd->recv_ddgst + queue->offset,
   1164		.iov_len = queue->left
   1165	};
   1166
   1167	ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
   1168			iov.iov_len, msg.msg_flags);
   1169	if (unlikely(ret < 0))
   1170		return ret;
   1171
   1172	queue->offset += ret;
   1173	queue->left -= ret;
   1174	if (queue->left)
   1175		return -EAGAIN;
   1176
   1177	if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
   1178		pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
   1179			queue->idx, cmd->req.cmd->common.command_id,
   1180			queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
   1181			le32_to_cpu(cmd->exp_ddgst));
   1182		nvmet_tcp_finish_cmd(cmd);
   1183		nvmet_tcp_fatal_error(queue);
   1184		ret = -EPROTO;
   1185		goto out;
   1186	}
   1187
   1188	if (cmd->rbytes_done == cmd->req.transfer_len)
   1189		nvmet_tcp_execute_request(cmd);
   1190
   1191	ret = 0;
   1192out:
   1193	nvmet_prepare_receive_pdu(queue);
   1194	return ret;
   1195}
   1196
   1197static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
   1198{
   1199	int result = 0;
   1200
   1201	if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
   1202		return 0;
   1203
   1204	if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
   1205		result = nvmet_tcp_try_recv_pdu(queue);
   1206		if (result != 0)
   1207			goto done_recv;
   1208	}
   1209
   1210	if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
   1211		result = nvmet_tcp_try_recv_data(queue);
   1212		if (result != 0)
   1213			goto done_recv;
   1214	}
   1215
   1216	if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
   1217		result = nvmet_tcp_try_recv_ddgst(queue);
   1218		if (result != 0)
   1219			goto done_recv;
   1220	}
   1221
   1222done_recv:
   1223	if (result < 0) {
   1224		if (result == -EAGAIN)
   1225			return 0;
   1226		return result;
   1227	}
   1228	return 1;
   1229}
   1230
   1231static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
   1232		int budget, int *recvs)
   1233{
   1234	int i, ret = 0;
   1235
   1236	for (i = 0; i < budget; i++) {
   1237		ret = nvmet_tcp_try_recv_one(queue);
   1238		if (unlikely(ret < 0)) {
   1239			nvmet_tcp_socket_error(queue, ret);
   1240			goto done;
   1241		} else if (ret == 0) {
   1242			break;
   1243		}
   1244		(*recvs)++;
   1245	}
   1246done:
   1247	return ret;
   1248}
   1249
   1250static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
   1251{
   1252	spin_lock(&queue->state_lock);
   1253	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
   1254		queue->state = NVMET_TCP_Q_DISCONNECTING;
   1255		queue_work(nvmet_wq, &queue->release_work);
   1256	}
   1257	spin_unlock(&queue->state_lock);
   1258}
   1259
   1260static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
   1261{
   1262	queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
   1263}
   1264
   1265static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
   1266		int ops)
   1267{
   1268	if (!idle_poll_period_usecs)
   1269		return false;
   1270
   1271	if (ops)
   1272		nvmet_tcp_arm_queue_deadline(queue);
   1273
   1274	return !time_after(jiffies, queue->poll_end);
   1275}
   1276
   1277static void nvmet_tcp_io_work(struct work_struct *w)
   1278{
   1279	struct nvmet_tcp_queue *queue =
   1280		container_of(w, struct nvmet_tcp_queue, io_work);
   1281	bool pending;
   1282	int ret, ops = 0;
   1283
   1284	do {
   1285		pending = false;
   1286
   1287		ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
   1288		if (ret > 0)
   1289			pending = true;
   1290		else if (ret < 0)
   1291			return;
   1292
   1293		ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
   1294		if (ret > 0)
   1295			pending = true;
   1296		else if (ret < 0)
   1297			return;
   1298
   1299	} while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
   1300
   1301	/*
   1302	 * Requeue the worker if idle deadline period is in progress or any
   1303	 * ops activity was recorded during the do-while loop above.
   1304	 */
   1305	if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
   1306		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
   1307}
   1308
   1309static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
   1310		struct nvmet_tcp_cmd *c)
   1311{
   1312	u8 hdgst = nvmet_tcp_hdgst_len(queue);
   1313
   1314	c->queue = queue;
   1315	c->req.port = queue->port->nport;
   1316
   1317	c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
   1318			sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
   1319	if (!c->cmd_pdu)
   1320		return -ENOMEM;
   1321	c->req.cmd = &c->cmd_pdu->cmd;
   1322
   1323	c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
   1324			sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
   1325	if (!c->rsp_pdu)
   1326		goto out_free_cmd;
   1327	c->req.cqe = &c->rsp_pdu->cqe;
   1328
   1329	c->data_pdu = page_frag_alloc(&queue->pf_cache,
   1330			sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
   1331	if (!c->data_pdu)
   1332		goto out_free_rsp;
   1333
   1334	c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
   1335			sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
   1336	if (!c->r2t_pdu)
   1337		goto out_free_data;
   1338
   1339	c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
   1340
   1341	list_add_tail(&c->entry, &queue->free_list);
   1342
   1343	return 0;
   1344out_free_data:
   1345	page_frag_free(c->data_pdu);
   1346out_free_rsp:
   1347	page_frag_free(c->rsp_pdu);
   1348out_free_cmd:
   1349	page_frag_free(c->cmd_pdu);
   1350	return -ENOMEM;
   1351}
   1352
   1353static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
   1354{
   1355	page_frag_free(c->r2t_pdu);
   1356	page_frag_free(c->data_pdu);
   1357	page_frag_free(c->rsp_pdu);
   1358	page_frag_free(c->cmd_pdu);
   1359}
   1360
   1361static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
   1362{
   1363	struct nvmet_tcp_cmd *cmds;
   1364	int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
   1365
   1366	cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
   1367	if (!cmds)
   1368		goto out;
   1369
   1370	for (i = 0; i < nr_cmds; i++) {
   1371		ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
   1372		if (ret)
   1373			goto out_free;
   1374	}
   1375
   1376	queue->cmds = cmds;
   1377
   1378	return 0;
   1379out_free:
   1380	while (--i >= 0)
   1381		nvmet_tcp_free_cmd(cmds + i);
   1382	kfree(cmds);
   1383out:
   1384	return ret;
   1385}
   1386
   1387static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
   1388{
   1389	struct nvmet_tcp_cmd *cmds = queue->cmds;
   1390	int i;
   1391
   1392	for (i = 0; i < queue->nr_cmds; i++)
   1393		nvmet_tcp_free_cmd(cmds + i);
   1394
   1395	nvmet_tcp_free_cmd(&queue->connect);
   1396	kfree(cmds);
   1397}
   1398
   1399static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
   1400{
   1401	struct socket *sock = queue->sock;
   1402
   1403	write_lock_bh(&sock->sk->sk_callback_lock);
   1404	sock->sk->sk_data_ready =  queue->data_ready;
   1405	sock->sk->sk_state_change = queue->state_change;
   1406	sock->sk->sk_write_space = queue->write_space;
   1407	sock->sk->sk_user_data = NULL;
   1408	write_unlock_bh(&sock->sk->sk_callback_lock);
   1409}
   1410
   1411static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
   1412{
   1413	nvmet_req_uninit(&cmd->req);
   1414	nvmet_tcp_unmap_pdu_iovec(cmd);
   1415	nvmet_tcp_free_cmd_buffers(cmd);
   1416}
   1417
   1418static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
   1419{
   1420	struct nvmet_tcp_cmd *cmd = queue->cmds;
   1421	int i;
   1422
   1423	for (i = 0; i < queue->nr_cmds; i++, cmd++) {
   1424		if (nvmet_tcp_need_data_in(cmd))
   1425			nvmet_req_uninit(&cmd->req);
   1426
   1427		nvmet_tcp_unmap_pdu_iovec(cmd);
   1428		nvmet_tcp_free_cmd_buffers(cmd);
   1429	}
   1430
   1431	if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
   1432		/* failed in connect */
   1433		nvmet_tcp_finish_cmd(&queue->connect);
   1434	}
   1435}
   1436
   1437static void nvmet_tcp_release_queue_work(struct work_struct *w)
   1438{
   1439	struct page *page;
   1440	struct nvmet_tcp_queue *queue =
   1441		container_of(w, struct nvmet_tcp_queue, release_work);
   1442
   1443	mutex_lock(&nvmet_tcp_queue_mutex);
   1444	list_del_init(&queue->queue_list);
   1445	mutex_unlock(&nvmet_tcp_queue_mutex);
   1446
   1447	nvmet_tcp_restore_socket_callbacks(queue);
   1448	cancel_work_sync(&queue->io_work);
   1449	/* stop accepting incoming data */
   1450	queue->rcv_state = NVMET_TCP_RECV_ERR;
   1451
   1452	nvmet_tcp_uninit_data_in_cmds(queue);
   1453	nvmet_sq_destroy(&queue->nvme_sq);
   1454	cancel_work_sync(&queue->io_work);
   1455	sock_release(queue->sock);
   1456	nvmet_tcp_free_cmds(queue);
   1457	if (queue->hdr_digest || queue->data_digest)
   1458		nvmet_tcp_free_crypto(queue);
   1459	ida_free(&nvmet_tcp_queue_ida, queue->idx);
   1460
   1461	page = virt_to_head_page(queue->pf_cache.va);
   1462	__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
   1463	kfree(queue);
   1464}
   1465
   1466static void nvmet_tcp_data_ready(struct sock *sk)
   1467{
   1468	struct nvmet_tcp_queue *queue;
   1469
   1470	read_lock_bh(&sk->sk_callback_lock);
   1471	queue = sk->sk_user_data;
   1472	if (likely(queue))
   1473		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
   1474	read_unlock_bh(&sk->sk_callback_lock);
   1475}
   1476
   1477static void nvmet_tcp_write_space(struct sock *sk)
   1478{
   1479	struct nvmet_tcp_queue *queue;
   1480
   1481	read_lock_bh(&sk->sk_callback_lock);
   1482	queue = sk->sk_user_data;
   1483	if (unlikely(!queue))
   1484		goto out;
   1485
   1486	if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
   1487		queue->write_space(sk);
   1488		goto out;
   1489	}
   1490
   1491	if (sk_stream_is_writeable(sk)) {
   1492		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
   1493		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
   1494	}
   1495out:
   1496	read_unlock_bh(&sk->sk_callback_lock);
   1497}
   1498
   1499static void nvmet_tcp_state_change(struct sock *sk)
   1500{
   1501	struct nvmet_tcp_queue *queue;
   1502
   1503	read_lock_bh(&sk->sk_callback_lock);
   1504	queue = sk->sk_user_data;
   1505	if (!queue)
   1506		goto done;
   1507
   1508	switch (sk->sk_state) {
   1509	case TCP_FIN_WAIT1:
   1510	case TCP_CLOSE_WAIT:
   1511	case TCP_CLOSE:
   1512		/* FALLTHRU */
   1513		nvmet_tcp_schedule_release_queue(queue);
   1514		break;
   1515	default:
   1516		pr_warn("queue %d unhandled state %d\n",
   1517			queue->idx, sk->sk_state);
   1518	}
   1519done:
   1520	read_unlock_bh(&sk->sk_callback_lock);
   1521}
   1522
   1523static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
   1524{
   1525	struct socket *sock = queue->sock;
   1526	struct inet_sock *inet = inet_sk(sock->sk);
   1527	int ret;
   1528
   1529	ret = kernel_getsockname(sock,
   1530		(struct sockaddr *)&queue->sockaddr);
   1531	if (ret < 0)
   1532		return ret;
   1533
   1534	ret = kernel_getpeername(sock,
   1535		(struct sockaddr *)&queue->sockaddr_peer);
   1536	if (ret < 0)
   1537		return ret;
   1538
   1539	/*
   1540	 * Cleanup whatever is sitting in the TCP transmit queue on socket
   1541	 * close. This is done to prevent stale data from being sent should
   1542	 * the network connection be restored before TCP times out.
   1543	 */
   1544	sock_no_linger(sock->sk);
   1545
   1546	if (so_priority > 0)
   1547		sock_set_priority(sock->sk, so_priority);
   1548
   1549	/* Set socket type of service */
   1550	if (inet->rcv_tos > 0)
   1551		ip_sock_set_tos(sock->sk, inet->rcv_tos);
   1552
   1553	ret = 0;
   1554	write_lock_bh(&sock->sk->sk_callback_lock);
   1555	if (sock->sk->sk_state != TCP_ESTABLISHED) {
   1556		/*
   1557		 * If the socket is already closing, don't even start
   1558		 * consuming it
   1559		 */
   1560		ret = -ENOTCONN;
   1561	} else {
   1562		sock->sk->sk_user_data = queue;
   1563		queue->data_ready = sock->sk->sk_data_ready;
   1564		sock->sk->sk_data_ready = nvmet_tcp_data_ready;
   1565		queue->state_change = sock->sk->sk_state_change;
   1566		sock->sk->sk_state_change = nvmet_tcp_state_change;
   1567		queue->write_space = sock->sk->sk_write_space;
   1568		sock->sk->sk_write_space = nvmet_tcp_write_space;
   1569		if (idle_poll_period_usecs)
   1570			nvmet_tcp_arm_queue_deadline(queue);
   1571		queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
   1572	}
   1573	write_unlock_bh(&sock->sk->sk_callback_lock);
   1574
   1575	return ret;
   1576}
   1577
   1578static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
   1579		struct socket *newsock)
   1580{
   1581	struct nvmet_tcp_queue *queue;
   1582	int ret;
   1583
   1584	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
   1585	if (!queue)
   1586		return -ENOMEM;
   1587
   1588	INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
   1589	INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
   1590	queue->sock = newsock;
   1591	queue->port = port;
   1592	queue->nr_cmds = 0;
   1593	spin_lock_init(&queue->state_lock);
   1594	queue->state = NVMET_TCP_Q_CONNECTING;
   1595	INIT_LIST_HEAD(&queue->free_list);
   1596	init_llist_head(&queue->resp_list);
   1597	INIT_LIST_HEAD(&queue->resp_send_list);
   1598
   1599	queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
   1600	if (queue->idx < 0) {
   1601		ret = queue->idx;
   1602		goto out_free_queue;
   1603	}
   1604
   1605	ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
   1606	if (ret)
   1607		goto out_ida_remove;
   1608
   1609	ret = nvmet_sq_init(&queue->nvme_sq);
   1610	if (ret)
   1611		goto out_free_connect;
   1612
   1613	nvmet_prepare_receive_pdu(queue);
   1614
   1615	mutex_lock(&nvmet_tcp_queue_mutex);
   1616	list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
   1617	mutex_unlock(&nvmet_tcp_queue_mutex);
   1618
   1619	ret = nvmet_tcp_set_queue_sock(queue);
   1620	if (ret)
   1621		goto out_destroy_sq;
   1622
   1623	return 0;
   1624out_destroy_sq:
   1625	mutex_lock(&nvmet_tcp_queue_mutex);
   1626	list_del_init(&queue->queue_list);
   1627	mutex_unlock(&nvmet_tcp_queue_mutex);
   1628	nvmet_sq_destroy(&queue->nvme_sq);
   1629out_free_connect:
   1630	nvmet_tcp_free_cmd(&queue->connect);
   1631out_ida_remove:
   1632	ida_free(&nvmet_tcp_queue_ida, queue->idx);
   1633out_free_queue:
   1634	kfree(queue);
   1635	return ret;
   1636}
   1637
   1638static void nvmet_tcp_accept_work(struct work_struct *w)
   1639{
   1640	struct nvmet_tcp_port *port =
   1641		container_of(w, struct nvmet_tcp_port, accept_work);
   1642	struct socket *newsock;
   1643	int ret;
   1644
   1645	while (true) {
   1646		ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
   1647		if (ret < 0) {
   1648			if (ret != -EAGAIN)
   1649				pr_warn("failed to accept err=%d\n", ret);
   1650			return;
   1651		}
   1652		ret = nvmet_tcp_alloc_queue(port, newsock);
   1653		if (ret) {
   1654			pr_err("failed to allocate queue\n");
   1655			sock_release(newsock);
   1656		}
   1657	}
   1658}
   1659
   1660static void nvmet_tcp_listen_data_ready(struct sock *sk)
   1661{
   1662	struct nvmet_tcp_port *port;
   1663
   1664	read_lock_bh(&sk->sk_callback_lock);
   1665	port = sk->sk_user_data;
   1666	if (!port)
   1667		goto out;
   1668
   1669	if (sk->sk_state == TCP_LISTEN)
   1670		queue_work(nvmet_wq, &port->accept_work);
   1671out:
   1672	read_unlock_bh(&sk->sk_callback_lock);
   1673}
   1674
   1675static int nvmet_tcp_add_port(struct nvmet_port *nport)
   1676{
   1677	struct nvmet_tcp_port *port;
   1678	__kernel_sa_family_t af;
   1679	int ret;
   1680
   1681	port = kzalloc(sizeof(*port), GFP_KERNEL);
   1682	if (!port)
   1683		return -ENOMEM;
   1684
   1685	switch (nport->disc_addr.adrfam) {
   1686	case NVMF_ADDR_FAMILY_IP4:
   1687		af = AF_INET;
   1688		break;
   1689	case NVMF_ADDR_FAMILY_IP6:
   1690		af = AF_INET6;
   1691		break;
   1692	default:
   1693		pr_err("address family %d not supported\n",
   1694				nport->disc_addr.adrfam);
   1695		ret = -EINVAL;
   1696		goto err_port;
   1697	}
   1698
   1699	ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
   1700			nport->disc_addr.trsvcid, &port->addr);
   1701	if (ret) {
   1702		pr_err("malformed ip/port passed: %s:%s\n",
   1703			nport->disc_addr.traddr, nport->disc_addr.trsvcid);
   1704		goto err_port;
   1705	}
   1706
   1707	port->nport = nport;
   1708	INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
   1709	if (port->nport->inline_data_size < 0)
   1710		port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
   1711
   1712	ret = sock_create(port->addr.ss_family, SOCK_STREAM,
   1713				IPPROTO_TCP, &port->sock);
   1714	if (ret) {
   1715		pr_err("failed to create a socket\n");
   1716		goto err_port;
   1717	}
   1718
   1719	port->sock->sk->sk_user_data = port;
   1720	port->data_ready = port->sock->sk->sk_data_ready;
   1721	port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
   1722	sock_set_reuseaddr(port->sock->sk);
   1723	tcp_sock_set_nodelay(port->sock->sk);
   1724	if (so_priority > 0)
   1725		sock_set_priority(port->sock->sk, so_priority);
   1726
   1727	ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
   1728			sizeof(port->addr));
   1729	if (ret) {
   1730		pr_err("failed to bind port socket %d\n", ret);
   1731		goto err_sock;
   1732	}
   1733
   1734	ret = kernel_listen(port->sock, 128);
   1735	if (ret) {
   1736		pr_err("failed to listen %d on port sock\n", ret);
   1737		goto err_sock;
   1738	}
   1739
   1740	nport->priv = port;
   1741	pr_info("enabling port %d (%pISpc)\n",
   1742		le16_to_cpu(nport->disc_addr.portid), &port->addr);
   1743
   1744	return 0;
   1745
   1746err_sock:
   1747	sock_release(port->sock);
   1748err_port:
   1749	kfree(port);
   1750	return ret;
   1751}
   1752
   1753static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
   1754{
   1755	struct nvmet_tcp_queue *queue;
   1756
   1757	mutex_lock(&nvmet_tcp_queue_mutex);
   1758	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
   1759		if (queue->port == port)
   1760			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
   1761	mutex_unlock(&nvmet_tcp_queue_mutex);
   1762}
   1763
   1764static void nvmet_tcp_remove_port(struct nvmet_port *nport)
   1765{
   1766	struct nvmet_tcp_port *port = nport->priv;
   1767
   1768	write_lock_bh(&port->sock->sk->sk_callback_lock);
   1769	port->sock->sk->sk_data_ready = port->data_ready;
   1770	port->sock->sk->sk_user_data = NULL;
   1771	write_unlock_bh(&port->sock->sk->sk_callback_lock);
   1772	cancel_work_sync(&port->accept_work);
   1773	/*
   1774	 * Destroy the remaining queues, which are not belong to any
   1775	 * controller yet.
   1776	 */
   1777	nvmet_tcp_destroy_port_queues(port);
   1778
   1779	sock_release(port->sock);
   1780	kfree(port);
   1781}
   1782
   1783static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
   1784{
   1785	struct nvmet_tcp_queue *queue;
   1786
   1787	mutex_lock(&nvmet_tcp_queue_mutex);
   1788	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
   1789		if (queue->nvme_sq.ctrl == ctrl)
   1790			kernel_sock_shutdown(queue->sock, SHUT_RDWR);
   1791	mutex_unlock(&nvmet_tcp_queue_mutex);
   1792}
   1793
   1794static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
   1795{
   1796	struct nvmet_tcp_queue *queue =
   1797		container_of(sq, struct nvmet_tcp_queue, nvme_sq);
   1798
   1799	if (sq->qid == 0) {
   1800		/* Let inflight controller teardown complete */
   1801		flush_workqueue(nvmet_wq);
   1802	}
   1803
   1804	queue->nr_cmds = sq->size * 2;
   1805	if (nvmet_tcp_alloc_cmds(queue))
   1806		return NVME_SC_INTERNAL;
   1807	return 0;
   1808}
   1809
   1810static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
   1811		struct nvmet_port *nport, char *traddr)
   1812{
   1813	struct nvmet_tcp_port *port = nport->priv;
   1814
   1815	if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
   1816		struct nvmet_tcp_cmd *cmd =
   1817			container_of(req, struct nvmet_tcp_cmd, req);
   1818		struct nvmet_tcp_queue *queue = cmd->queue;
   1819
   1820		sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
   1821	} else {
   1822		memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
   1823	}
   1824}
   1825
   1826static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
   1827	.owner			= THIS_MODULE,
   1828	.type			= NVMF_TRTYPE_TCP,
   1829	.msdbd			= 1,
   1830	.add_port		= nvmet_tcp_add_port,
   1831	.remove_port		= nvmet_tcp_remove_port,
   1832	.queue_response		= nvmet_tcp_queue_response,
   1833	.delete_ctrl		= nvmet_tcp_delete_ctrl,
   1834	.install_queue		= nvmet_tcp_install_queue,
   1835	.disc_traddr		= nvmet_tcp_disc_port_addr,
   1836};
   1837
   1838static int __init nvmet_tcp_init(void)
   1839{
   1840	int ret;
   1841
   1842	nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
   1843	if (!nvmet_tcp_wq)
   1844		return -ENOMEM;
   1845
   1846	ret = nvmet_register_transport(&nvmet_tcp_ops);
   1847	if (ret)
   1848		goto err;
   1849
   1850	return 0;
   1851err:
   1852	destroy_workqueue(nvmet_tcp_wq);
   1853	return ret;
   1854}
   1855
   1856static void __exit nvmet_tcp_exit(void)
   1857{
   1858	struct nvmet_tcp_queue *queue;
   1859
   1860	nvmet_unregister_transport(&nvmet_tcp_ops);
   1861
   1862	flush_workqueue(nvmet_wq);
   1863	mutex_lock(&nvmet_tcp_queue_mutex);
   1864	list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
   1865		kernel_sock_shutdown(queue->sock, SHUT_RDWR);
   1866	mutex_unlock(&nvmet_tcp_queue_mutex);
   1867	flush_workqueue(nvmet_wq);
   1868
   1869	destroy_workqueue(nvmet_tcp_wq);
   1870}
   1871
   1872module_init(nvmet_tcp_init);
   1873module_exit(nvmet_tcp_exit);
   1874
   1875MODULE_LICENSE("GPL v2");
   1876MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */