cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

chcr_ktls.c (63824B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* Copyright (C) 2020 Chelsio Communications.  All rights reserved. */
      3
      4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      5
      6#include <linux/skbuff.h>
      7#include <linux/module.h>
      8#include <linux/highmem.h>
      9#include <linux/ip.h>
     10#include <net/ipv6.h>
     11#include <linux/netdevice.h>
     12#include <crypto/aes.h>
     13#include "chcr_ktls.h"
     14
     15static LIST_HEAD(uld_ctx_list);
     16static DEFINE_MUTEX(dev_mutex);
     17
     18/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
     19 * @skb: skb
     20 * @start: start offset.
     21 * @len: how much data to send after @start
     22 */
     23static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
     24{
     25	struct skb_shared_info *si = skb_shinfo(skb);
     26	u32 frag_size, skb_linear_data_len = skb_headlen(skb);
     27	u8 nfrags = 0, frag_idx = 0;
     28	skb_frag_t *frag;
     29
     30	/* if its a linear skb then return 1 */
     31	if (!skb_is_nonlinear(skb))
     32		return 1;
     33
     34	if (unlikely(start < skb_linear_data_len)) {
     35		frag_size = min(len, skb_linear_data_len - start);
     36	} else {
     37		start -= skb_linear_data_len;
     38
     39		frag = &si->frags[frag_idx];
     40		frag_size = skb_frag_size(frag);
     41		while (start >= frag_size) {
     42			start -= frag_size;
     43			frag_idx++;
     44			frag = &si->frags[frag_idx];
     45			frag_size = skb_frag_size(frag);
     46		}
     47		frag_size = min(len, skb_frag_size(frag) - start);
     48	}
     49	len -= frag_size;
     50	nfrags++;
     51
     52	while (len) {
     53		frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
     54		len -= frag_size;
     55		nfrags++;
     56		frag_idx++;
     57	}
     58	return nfrags;
     59}
     60
     61static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
     62static void clear_conn_resources(struct chcr_ktls_info *tx_info);
     63/*
     64 * chcr_ktls_save_keys: calculate and save crypto keys.
     65 * @tx_info - driver specific tls info.
     66 * @crypto_info - tls crypto information.
     67 * @direction - TX/RX direction.
     68 * return - SUCCESS/FAILURE.
     69 */
     70static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info,
     71			       struct tls_crypto_info *crypto_info,
     72			       enum tls_offload_ctx_dir direction)
     73{
     74	int ck_size, key_ctx_size, mac_key_size, keylen, ghash_size, ret;
     75	unsigned char ghash_h[TLS_CIPHER_AES_GCM_256_TAG_SIZE];
     76	struct tls12_crypto_info_aes_gcm_128 *info_128_gcm;
     77	struct ktls_key_ctx *kctx = &tx_info->key_ctx;
     78	struct crypto_aes_ctx aes_ctx;
     79	unsigned char *key, *salt;
     80
     81	switch (crypto_info->cipher_type) {
     82	case TLS_CIPHER_AES_GCM_128:
     83		info_128_gcm =
     84			(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
     85		keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
     86		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
     87		tx_info->salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
     88		mac_key_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
     89		tx_info->iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
     90		tx_info->iv = be64_to_cpu(*(__be64 *)info_128_gcm->iv);
     91
     92		ghash_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
     93		key = info_128_gcm->key;
     94		salt = info_128_gcm->salt;
     95		tx_info->record_no = *(u64 *)info_128_gcm->rec_seq;
     96
     97		/* The SCMD fields used when encrypting a full TLS
     98		 * record. Its a one time calculation till the
     99		 * connection exists.
    100		 */
    101		tx_info->scmd0_seqno_numivs =
    102			SCMD_SEQ_NO_CTRL_V(CHCR_SCMD_SEQ_NO_CTRL_64BIT) |
    103			SCMD_CIPH_AUTH_SEQ_CTRL_F |
    104			SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_TLS) |
    105			SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_GCM) |
    106			SCMD_AUTH_MODE_V(CHCR_SCMD_AUTH_MODE_GHASH) |
    107			SCMD_IV_SIZE_V(TLS_CIPHER_AES_GCM_128_IV_SIZE >> 1) |
    108			SCMD_NUM_IVS_V(1);
    109
    110		/* keys will be sent inline. */
    111		tx_info->scmd0_ivgen_hdrlen = SCMD_KEY_CTX_INLINE_F;
    112
    113		/* The SCMD fields used when encrypting a partial TLS
    114		 * record (no trailer and possibly a truncated payload).
    115		 */
    116		tx_info->scmd0_short_seqno_numivs =
    117			SCMD_CIPH_AUTH_SEQ_CTRL_F |
    118			SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) |
    119			SCMD_CIPH_MODE_V(CHCR_SCMD_CIPHER_MODE_AES_CTR) |
    120			SCMD_IV_SIZE_V(AES_BLOCK_LEN >> 1);
    121
    122		tx_info->scmd0_short_ivgen_hdrlen =
    123			tx_info->scmd0_ivgen_hdrlen | SCMD_AADIVDROP_F;
    124
    125		break;
    126
    127	default:
    128		pr_err("GCM: cipher type 0x%x not supported\n",
    129		       crypto_info->cipher_type);
    130		ret = -EINVAL;
    131		goto out;
    132	}
    133
    134	key_ctx_size = CHCR_KTLS_KEY_CTX_LEN +
    135		       roundup(keylen, 16) + ghash_size;
    136	/* Calculate the H = CIPH(K, 0 repeated 16 times).
    137	 * It will go in key context
    138	 */
    139
    140	ret = aes_expandkey(&aes_ctx, key, keylen);
    141	if (ret)
    142		goto out;
    143
    144	memset(ghash_h, 0, ghash_size);
    145	aes_encrypt(&aes_ctx, ghash_h, ghash_h);
    146	memzero_explicit(&aes_ctx, sizeof(aes_ctx));
    147
    148	/* fill the Key context */
    149	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
    150		kctx->ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
    151						 mac_key_size,
    152						 key_ctx_size >> 4);
    153	} else {
    154		ret = -EINVAL;
    155		goto out;
    156	}
    157
    158	memcpy(kctx->salt, salt, tx_info->salt_size);
    159	memcpy(kctx->key, key, keylen);
    160	memcpy(kctx->key + keylen, ghash_h, ghash_size);
    161	tx_info->key_ctx_len = key_ctx_size;
    162
    163out:
    164	return ret;
    165}
    166
    167/*
    168 * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
    169 * @sk - tcp socket.
    170 * @tx_info - driver specific tls info.
    171 * @atid - connection active tid.
    172 * return - send success/failure.
    173 */
    174static int chcr_ktls_act_open_req(struct sock *sk,
    175				  struct chcr_ktls_info *tx_info,
    176				  int atid)
    177{
    178	struct inet_sock *inet = inet_sk(sk);
    179	struct cpl_t6_act_open_req *cpl6;
    180	struct cpl_act_open_req *cpl;
    181	struct sk_buff *skb;
    182	unsigned int len;
    183	int qid_atid;
    184	u64 options;
    185
    186	len = sizeof(*cpl6);
    187	skb = alloc_skb(len, GFP_KERNEL);
    188	if (unlikely(!skb))
    189		return -ENOMEM;
    190	/* mark it a control pkt */
    191	set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
    192
    193	cpl6 = __skb_put_zero(skb, len);
    194	cpl = (struct cpl_act_open_req *)cpl6;
    195	INIT_TP_WR(cpl6, 0);
    196	qid_atid = TID_QID_V(tx_info->rx_qid) |
    197		   TID_TID_V(atid);
    198	OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_atid));
    199	cpl->local_port = inet->inet_sport;
    200	cpl->peer_port = inet->inet_dport;
    201	cpl->local_ip = inet->inet_rcv_saddr;
    202	cpl->peer_ip = inet->inet_daddr;
    203
    204	/* fill first 64 bit option field. */
    205	options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
    206		  SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
    207	cpl->opt0 = cpu_to_be64(options);
    208
    209	/* next 64 bit option field. */
    210	options =
    211		TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
    212	cpl->opt2 = htonl(options);
    213
    214	return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
    215}
    216
    217#if IS_ENABLED(CONFIG_IPV6)
    218/*
    219 * chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
    220 * @sk - tcp socket.
    221 * @tx_info - driver specific tls info.
    222 * @atid - connection active tid.
    223 * return - send success/failure.
    224 */
    225static int chcr_ktls_act_open_req6(struct sock *sk,
    226				   struct chcr_ktls_info *tx_info,
    227				   int atid)
    228{
    229	struct inet_sock *inet = inet_sk(sk);
    230	struct cpl_t6_act_open_req6 *cpl6;
    231	struct cpl_act_open_req6 *cpl;
    232	struct sk_buff *skb;
    233	unsigned int len;
    234	int qid_atid;
    235	u64 options;
    236
    237	len = sizeof(*cpl6);
    238	skb = alloc_skb(len, GFP_KERNEL);
    239	if (unlikely(!skb))
    240		return -ENOMEM;
    241	/* mark it a control pkt */
    242	set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
    243
    244	cpl6 = __skb_put_zero(skb, len);
    245	cpl = (struct cpl_act_open_req6 *)cpl6;
    246	INIT_TP_WR(cpl6, 0);
    247	qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
    248	OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
    249	cpl->local_port = inet->inet_sport;
    250	cpl->peer_port = inet->inet_dport;
    251	cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
    252	cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
    253	cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
    254	cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
    255
    256	/* first 64 bit option field. */
    257	options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
    258		  SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
    259	cpl->opt0 = cpu_to_be64(options);
    260	/* next 64 bit option field. */
    261	options =
    262		TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
    263	cpl->opt2 = htonl(options);
    264
    265	return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
    266}
    267#endif /* #if IS_ENABLED(CONFIG_IPV6) */
    268
    269/*
    270 * chcr_setup_connection:  create a TCB entry so that TP will form tcp packets.
    271 * @sk - tcp socket.
    272 * @tx_info - driver specific tls info.
    273 * return: NET_TX_OK/NET_XMIT_DROP
    274 */
    275static int chcr_setup_connection(struct sock *sk,
    276				 struct chcr_ktls_info *tx_info)
    277{
    278	struct tid_info *t = &tx_info->adap->tids;
    279	int atid, ret = 0;
    280
    281	atid = cxgb4_alloc_atid(t, tx_info);
    282	if (atid == -1)
    283		return -EINVAL;
    284
    285	tx_info->atid = atid;
    286
    287	if (tx_info->ip_family == AF_INET) {
    288		ret = chcr_ktls_act_open_req(sk, tx_info, atid);
    289#if IS_ENABLED(CONFIG_IPV6)
    290	} else {
    291		ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
    292				     &sk->sk_v6_rcv_saddr,
    293				     1);
    294		if (ret)
    295			return ret;
    296		ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
    297#endif
    298	}
    299
    300	/* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
    301	 * success, if any other return type clear atid and return that failure.
    302	 */
    303	if (ret) {
    304		if (ret == NET_XMIT_CN) {
    305			ret = 0;
    306		} else {
    307#if IS_ENABLED(CONFIG_IPV6)
    308			/* clear clip entry */
    309			if (tx_info->ip_family == AF_INET6)
    310				cxgb4_clip_release(tx_info->netdev,
    311						   (const u32 *)
    312						   &sk->sk_v6_rcv_saddr,
    313						   1);
    314#endif
    315			cxgb4_free_atid(t, atid);
    316		}
    317	}
    318
    319	return ret;
    320}
    321
    322/*
    323 * chcr_set_tcb_field: update tcb fields.
    324 * @tx_info - driver specific tls info.
    325 * @word - TCB word.
    326 * @mask - TCB word related mask.
    327 * @val - TCB word related value.
    328 * @no_reply - set 1 if not looking for TP response.
    329 */
    330static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
    331			      u64 mask, u64 val, int no_reply)
    332{
    333	struct cpl_set_tcb_field *req;
    334	struct sk_buff *skb;
    335
    336	skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
    337	if (!skb)
    338		return -ENOMEM;
    339
    340	req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
    341	INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, tx_info->tid);
    342	req->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
    343				NO_REPLY_V(no_reply));
    344	req->word_cookie = htons(TCB_WORD_V(word));
    345	req->mask = cpu_to_be64(mask);
    346	req->val = cpu_to_be64(val);
    347
    348	set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
    349	return cxgb4_ofld_send(tx_info->netdev, skb);
    350}
    351
    352/*
    353 * chcr_ktls_dev_del:  call back for tls_dev_del.
    354 * Remove the tid and l2t entry and close the connection.
    355 * it per connection basis.
    356 * @netdev - net device.
    357 * @tls_cts - tls context.
    358 * @direction - TX/RX crypto direction
    359 */
    360static void chcr_ktls_dev_del(struct net_device *netdev,
    361			      struct tls_context *tls_ctx,
    362			      enum tls_offload_ctx_dir direction)
    363{
    364	struct chcr_ktls_ofld_ctx_tx *tx_ctx =
    365				chcr_get_ktls_tx_context(tls_ctx);
    366	struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
    367	struct ch_ktls_port_stats_debug *port_stats;
    368	struct chcr_ktls_uld_ctx *u_ctx;
    369
    370	if (!tx_info)
    371		return;
    372
    373	u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
    374	if (u_ctx && u_ctx->detach)
    375		return;
    376	/* clear l2t entry */
    377	if (tx_info->l2te)
    378		cxgb4_l2t_release(tx_info->l2te);
    379
    380#if IS_ENABLED(CONFIG_IPV6)
    381	/* clear clip entry */
    382	if (tx_info->ip_family == AF_INET6)
    383		cxgb4_clip_release(netdev, (const u32 *)
    384				   &tx_info->sk->sk_v6_rcv_saddr,
    385				   1);
    386#endif
    387
    388	/* clear tid */
    389	if (tx_info->tid != -1) {
    390		cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
    391				 tx_info->tid, tx_info->ip_family);
    392
    393		xa_erase(&u_ctx->tid_list, tx_info->tid);
    394	}
    395
    396	port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
    397	atomic64_inc(&port_stats->ktls_tx_connection_close);
    398	kvfree(tx_info);
    399	tx_ctx->chcr_info = NULL;
    400	/* release module refcount */
    401	module_put(THIS_MODULE);
    402}
    403
    404/*
    405 * chcr_ktls_dev_add:  call back for tls_dev_add.
    406 * Create a tcb entry for TP. Also add l2t entry for the connection. And
    407 * generate keys & save those keys locally.
    408 * @netdev - net device.
    409 * @tls_cts - tls context.
    410 * @direction - TX/RX crypto direction
    411 * return: SUCCESS/FAILURE.
    412 */
    413static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
    414			     enum tls_offload_ctx_dir direction,
    415			     struct tls_crypto_info *crypto_info,
    416			     u32 start_offload_tcp_sn)
    417{
    418	struct tls_context *tls_ctx = tls_get_ctx(sk);
    419	struct ch_ktls_port_stats_debug *port_stats;
    420	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
    421	struct chcr_ktls_uld_ctx *u_ctx;
    422	struct chcr_ktls_info *tx_info;
    423	struct dst_entry *dst;
    424	struct adapter *adap;
    425	struct port_info *pi;
    426	struct neighbour *n;
    427	u8 daaddr[16];
    428	int ret = -1;
    429
    430	tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
    431
    432	pi = netdev_priv(netdev);
    433	adap = pi->adapter;
    434	port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
    435	atomic64_inc(&port_stats->ktls_tx_connection_open);
    436	u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
    437
    438	if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
    439		pr_err("not expecting for RX direction\n");
    440		goto out;
    441	}
    442
    443	if (tx_ctx->chcr_info)
    444		goto out;
    445
    446	if (u_ctx && u_ctx->detach)
    447		goto out;
    448
    449	tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
    450	if (!tx_info)
    451		goto out;
    452
    453	tx_info->sk = sk;
    454	spin_lock_init(&tx_info->lock);
    455	/* initialize tid and atid to -1, 0 is a also a valid id. */
    456	tx_info->tid = -1;
    457	tx_info->atid = -1;
    458
    459	tx_info->adap = adap;
    460	tx_info->netdev = netdev;
    461	tx_info->first_qset = pi->first_qset;
    462	tx_info->tx_chan = pi->tx_chan;
    463	tx_info->smt_idx = pi->smt_idx;
    464	tx_info->port_id = pi->port_id;
    465	tx_info->prev_ack = 0;
    466	tx_info->prev_win = 0;
    467
    468	tx_info->rx_qid = chcr_get_first_rx_qid(adap);
    469	if (unlikely(tx_info->rx_qid < 0))
    470		goto free_tx_info;
    471
    472	tx_info->prev_seq = start_offload_tcp_sn;
    473	tx_info->tcp_start_seq_number = start_offload_tcp_sn;
    474
    475	/* save crypto keys */
    476	ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
    477	if (ret < 0)
    478		goto free_tx_info;
    479
    480	/* get peer ip */
    481	if (sk->sk_family == AF_INET) {
    482		memcpy(daaddr, &sk->sk_daddr, 4);
    483		tx_info->ip_family = AF_INET;
    484#if IS_ENABLED(CONFIG_IPV6)
    485	} else {
    486		if (!ipv6_only_sock(sk) &&
    487		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
    488			memcpy(daaddr, &sk->sk_daddr, 4);
    489			tx_info->ip_family = AF_INET;
    490		} else {
    491			memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
    492			tx_info->ip_family = AF_INET6;
    493		}
    494#endif
    495	}
    496
    497	/* get the l2t index */
    498	dst = sk_dst_get(sk);
    499	if (!dst) {
    500		pr_err("DST entry not found\n");
    501		goto free_tx_info;
    502	}
    503	n = dst_neigh_lookup(dst, daaddr);
    504	if (!n || !n->dev) {
    505		pr_err("neighbour not found\n");
    506		dst_release(dst);
    507		goto free_tx_info;
    508	}
    509	tx_info->l2te  = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
    510
    511	neigh_release(n);
    512	dst_release(dst);
    513
    514	if (!tx_info->l2te) {
    515		pr_err("l2t entry not found\n");
    516		goto free_tx_info;
    517	}
    518
    519	/* Driver shouldn't be removed until any single connection exists */
    520	if (!try_module_get(THIS_MODULE))
    521		goto free_l2t;
    522
    523	init_completion(&tx_info->completion);
    524	/* create a filter and call cxgb4_l2t_send to send the packet out, which
    525	 * will take care of updating l2t entry in hw if not already done.
    526	 */
    527	tx_info->open_state = CH_KTLS_OPEN_PENDING;
    528
    529	if (chcr_setup_connection(sk, tx_info))
    530		goto put_module;
    531
    532	/* Wait for reply */
    533	wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
    534	spin_lock_bh(&tx_info->lock);
    535	if (tx_info->open_state) {
    536		/* need to wait for hw response, can't free tx_info yet. */
    537		if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
    538			tx_info->pending_close = true;
    539		else
    540			spin_unlock_bh(&tx_info->lock);
    541		/* if in pending close, free the lock after the cleanup */
    542		goto put_module;
    543	}
    544	spin_unlock_bh(&tx_info->lock);
    545
    546	/* initialize tcb */
    547	reinit_completion(&tx_info->completion);
    548	/* mark it pending for hw response */
    549	tx_info->open_state = CH_KTLS_OPEN_PENDING;
    550
    551	if (chcr_init_tcb_fields(tx_info))
    552		goto free_tid;
    553
    554	/* Wait for reply */
    555	wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
    556	spin_lock_bh(&tx_info->lock);
    557	if (tx_info->open_state) {
    558		/* need to wait for hw response, can't free tx_info yet. */
    559		tx_info->pending_close = true;
    560		/* free the lock after cleanup */
    561		goto free_tid;
    562	}
    563	spin_unlock_bh(&tx_info->lock);
    564
    565	if (!cxgb4_check_l2t_valid(tx_info->l2te))
    566		goto free_tid;
    567
    568	atomic64_inc(&port_stats->ktls_tx_ctx);
    569	tx_ctx->chcr_info = tx_info;
    570
    571	return 0;
    572
    573free_tid:
    574#if IS_ENABLED(CONFIG_IPV6)
    575	/* clear clip entry */
    576	if (tx_info->ip_family == AF_INET6)
    577		cxgb4_clip_release(netdev, (const u32 *)
    578				   &sk->sk_v6_rcv_saddr,
    579				   1);
    580#endif
    581	cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
    582			 tx_info->tid, tx_info->ip_family);
    583
    584	xa_erase(&u_ctx->tid_list, tx_info->tid);
    585
    586put_module:
    587	/* release module refcount */
    588	module_put(THIS_MODULE);
    589free_l2t:
    590	cxgb4_l2t_release(tx_info->l2te);
    591free_tx_info:
    592	if (tx_info->pending_close)
    593		spin_unlock_bh(&tx_info->lock);
    594	else
    595		kvfree(tx_info);
    596out:
    597	atomic64_inc(&port_stats->ktls_tx_connection_fail);
    598	return -1;
    599}
    600
    601/*
    602 * chcr_init_tcb_fields:  Initialize tcb fields to handle TCP seq number
    603 *			  handling.
    604 * @tx_info - driver specific tls info.
    605 * return: NET_TX_OK/NET_XMIT_DROP
    606 */
    607static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
    608{
    609	int  ret = 0;
    610
    611	/* set tcb in offload and bypass */
    612	ret =
    613	chcr_set_tcb_field(tx_info, TCB_T_FLAGS_W,
    614			   TCB_T_FLAGS_V(TF_CORE_BYPASS_F | TF_NON_OFFLOAD_F),
    615			   TCB_T_FLAGS_V(TF_CORE_BYPASS_F), 1);
    616	if (ret)
    617		return ret;
    618	/* reset snd_una and snd_next fields in tcb */
    619	ret = chcr_set_tcb_field(tx_info, TCB_SND_UNA_RAW_W,
    620				 TCB_SND_NXT_RAW_V(TCB_SND_NXT_RAW_M) |
    621				 TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
    622				 0, 1);
    623	if (ret)
    624		return ret;
    625
    626	/* reset send max */
    627	ret = chcr_set_tcb_field(tx_info, TCB_SND_MAX_RAW_W,
    628				 TCB_SND_MAX_RAW_V(TCB_SND_MAX_RAW_M),
    629				 0, 1);
    630	if (ret)
    631		return ret;
    632
    633	/* update l2t index and request for tp reply to confirm tcb is
    634	 * initialised to handle tx traffic.
    635	 */
    636	ret = chcr_set_tcb_field(tx_info, TCB_L2T_IX_W,
    637				 TCB_L2T_IX_V(TCB_L2T_IX_M),
    638				 TCB_L2T_IX_V(tx_info->l2te->idx), 0);
    639	return ret;
    640}
    641
    642/*
    643 * chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
    644 */
    645static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
    646				      unsigned char *input)
    647{
    648	const struct cpl_act_open_rpl *p = (void *)input;
    649	struct chcr_ktls_info *tx_info = NULL;
    650	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
    651	struct chcr_ktls_uld_ctx *u_ctx;
    652	unsigned int atid, tid, status;
    653	struct tls_context *tls_ctx;
    654	struct tid_info *t;
    655	int ret = 0;
    656
    657	tid = GET_TID(p);
    658	status = AOPEN_STATUS_G(ntohl(p->atid_status));
    659	atid = TID_TID_G(AOPEN_ATID_G(ntohl(p->atid_status)));
    660
    661	t = &adap->tids;
    662	tx_info = lookup_atid(t, atid);
    663
    664	if (!tx_info || tx_info->atid != atid) {
    665		pr_err("%s: incorrect tx_info or atid\n", __func__);
    666		return -1;
    667	}
    668
    669	cxgb4_free_atid(t, atid);
    670	tx_info->atid = -1;
    671
    672	spin_lock(&tx_info->lock);
    673	/* HW response is very close, finish pending cleanup */
    674	if (tx_info->pending_close) {
    675		spin_unlock(&tx_info->lock);
    676		if (!status) {
    677			cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
    678					 tid, tx_info->ip_family);
    679		}
    680		kvfree(tx_info);
    681		return 0;
    682	}
    683
    684	if (!status) {
    685		tx_info->tid = tid;
    686		cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
    687		/* Adding tid */
    688		tls_ctx = tls_get_ctx(tx_info->sk);
    689		tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
    690		u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
    691		if (u_ctx) {
    692			ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
    693					   GFP_NOWAIT);
    694			if (ret < 0) {
    695				pr_err("%s: Failed to allocate tid XA entry = %d\n",
    696				       __func__, tx_info->tid);
    697				tx_info->open_state = CH_KTLS_OPEN_FAILURE;
    698				goto out;
    699			}
    700		}
    701		tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
    702	} else {
    703		tx_info->open_state = CH_KTLS_OPEN_FAILURE;
    704	}
    705out:
    706	spin_unlock(&tx_info->lock);
    707
    708	complete(&tx_info->completion);
    709	return ret;
    710}
    711
    712/*
    713 * chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
    714 */
    715static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
    716{
    717	const struct cpl_set_tcb_rpl *p = (void *)input;
    718	struct chcr_ktls_info *tx_info = NULL;
    719	struct tid_info *t;
    720	u32 tid;
    721
    722	tid = GET_TID(p);
    723
    724	t = &adap->tids;
    725	tx_info = lookup_tid(t, tid);
    726
    727	if (!tx_info || tx_info->tid != tid) {
    728		pr_err("%s: incorrect tx_info or tid\n", __func__);
    729		return -1;
    730	}
    731
    732	spin_lock(&tx_info->lock);
    733	if (tx_info->pending_close) {
    734		spin_unlock(&tx_info->lock);
    735		kvfree(tx_info);
    736		return 0;
    737	}
    738	tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
    739	spin_unlock(&tx_info->lock);
    740
    741	complete(&tx_info->completion);
    742	return 0;
    743}
    744
    745static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
    746					u32 tid, void *pos, u16 word,
    747					struct sge_eth_txq *q, u64 mask,
    748					u64 val, u32 reply)
    749{
    750	struct cpl_set_tcb_field_core *cpl;
    751	struct ulptx_idata *idata;
    752	struct ulp_txpkt *txpkt;
    753
    754	/* ULP_TXPKT */
    755	txpkt = pos;
    756	txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
    757				ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
    758				ULP_TXPKT_FID_V(q->q.cntxt_id) |
    759				ULP_TXPKT_RO_F);
    760	txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
    761
    762	/* ULPTX_IDATA sub-command */
    763	idata = (struct ulptx_idata *)(txpkt + 1);
    764	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
    765	idata->len = htonl(sizeof(*cpl));
    766	pos = idata + 1;
    767
    768	cpl = pos;
    769	/* CPL_SET_TCB_FIELD */
    770	OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
    771	cpl->reply_ctrl = htons(QUEUENO_V(tx_info->rx_qid) |
    772			NO_REPLY_V(!reply));
    773	cpl->word_cookie = htons(TCB_WORD_V(word));
    774	cpl->mask = cpu_to_be64(mask);
    775	cpl->val = cpu_to_be64(val);
    776
    777	/* ULPTX_NOOP */
    778	idata = (struct ulptx_idata *)(cpl + 1);
    779	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
    780	idata->len = htonl(0);
    781	pos = idata + 1;
    782
    783	return pos;
    784}
    785
    786
    787/*
    788 * chcr_write_cpl_set_tcb_ulp: update tcb values.
    789 * TCB is responsible to create tcp headers, so all the related values
    790 * should be correctly updated.
    791 * @tx_info - driver specific tls info.
    792 * @q - tx queue on which packet is going out.
    793 * @tid - TCB identifier.
    794 * @pos - current index where should we start writing.
    795 * @word - TCB word.
    796 * @mask - TCB word related mask.
    797 * @val - TCB word related value.
    798 * @reply - set 1 if looking for TP response.
    799 * return - next position to write.
    800 */
    801static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
    802					struct sge_eth_txq *q, u32 tid,
    803					void *pos, u16 word, u64 mask,
    804					u64 val, u32 reply)
    805{
    806	int left = (void *)q->q.stat - pos;
    807
    808	if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
    809		if (!left) {
    810			pos = q->q.desc;
    811		} else {
    812			u8 buf[48] = {0};
    813
    814			__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
    815						     mask, val, reply);
    816
    817			return chcr_copy_to_txd(buf, &q->q, pos,
    818						CHCR_SET_TCB_FIELD_LEN);
    819		}
    820	}
    821
    822	pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
    823					   mask, val, reply);
    824
    825	/* check again if we are at the end of the queue */
    826	if (left == CHCR_SET_TCB_FIELD_LEN)
    827		pos = q->q.desc;
    828
    829	return pos;
    830}
    831
    832/*
    833 * chcr_ktls_xmit_tcb_cpls: update tcb entry so that TP will create the header
    834 * with updated values like tcp seq, ack, window etc.
    835 * @tx_info - driver specific tls info.
    836 * @q - TX queue.
    837 * @tcp_seq
    838 * @tcp_ack
    839 * @tcp_win
    840 * return: NETDEV_TX_BUSY/NET_TX_OK.
    841 */
    842static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
    843				   struct sge_eth_txq *q, u64 tcp_seq,
    844				   u64 tcp_ack, u64 tcp_win, bool offset)
    845{
    846	bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
    847	struct ch_ktls_port_stats_debug *port_stats;
    848	u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
    849	struct fw_ulptx_wr *wr;
    850	int credits;
    851	void *pos;
    852
    853	wr_len = sizeof(*wr);
    854	/* there can be max 4 cpls, check if we have enough credits */
    855	len = wr_len + 4 * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
    856	ndesc = DIV_ROUND_UP(len, 64);
    857
    858	credits = chcr_txq_avail(&q->q) - ndesc;
    859	if (unlikely(credits < 0)) {
    860		chcr_eth_txq_stop(q);
    861		return NETDEV_TX_BUSY;
    862	}
    863
    864	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
    865		chcr_eth_txq_stop(q);
    866		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
    867	}
    868
    869	pos = &q->q.desc[q->q.pidx];
    870	/* make space for WR, we'll fill it later when we know all the cpls
    871	 * being sent out and have complete length.
    872	 */
    873	wr = pos;
    874	pos += wr_len;
    875	/* update tx_max if its a re-transmit or the first wr */
    876	if (first_wr || tcp_seq != tx_info->prev_seq) {
    877		pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
    878						 TCB_TX_MAX_W,
    879						 TCB_TX_MAX_V(TCB_TX_MAX_M),
    880						 TCB_TX_MAX_V(tcp_seq), 0);
    881		cpl++;
    882	}
    883	/* reset snd una if it's a re-transmit pkt */
    884	if (tcp_seq != tx_info->prev_seq || offset) {
    885		/* reset snd_una */
    886		port_stats =
    887			&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
    888		pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
    889						 TCB_SND_UNA_RAW_W,
    890						 TCB_SND_UNA_RAW_V
    891						 (TCB_SND_UNA_RAW_M),
    892						 TCB_SND_UNA_RAW_V(0), 0);
    893		if (tcp_seq != tx_info->prev_seq)
    894			atomic64_inc(&port_stats->ktls_tx_ooo);
    895		cpl++;
    896	}
    897	/* update ack */
    898	if (first_wr || tx_info->prev_ack != tcp_ack) {
    899		pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
    900						 TCB_RCV_NXT_W,
    901						 TCB_RCV_NXT_V(TCB_RCV_NXT_M),
    902						 TCB_RCV_NXT_V(tcp_ack), 0);
    903		tx_info->prev_ack = tcp_ack;
    904		cpl++;
    905	}
    906	/* update receive window */
    907	if (first_wr || tx_info->prev_win != tcp_win) {
    908		chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
    909					   TCB_RCV_WND_W,
    910					   TCB_RCV_WND_V(TCB_RCV_WND_M),
    911					   TCB_RCV_WND_V(tcp_win), 0);
    912		tx_info->prev_win = tcp_win;
    913		cpl++;
    914	}
    915
    916	if (cpl) {
    917		/* get the actual length */
    918		len = wr_len + cpl * roundup(CHCR_SET_TCB_FIELD_LEN, 16);
    919		/* ULPTX wr */
    920		wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
    921		wr->cookie = 0;
    922		/* fill len in wr field */
    923		wr->flowid_len16 = htonl(wr_mid |
    924					 FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
    925
    926		ndesc = DIV_ROUND_UP(len, 64);
    927		chcr_txq_advance(&q->q, ndesc);
    928		cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
    929	}
    930	return 0;
    931}
    932
    933/*
    934 * chcr_ktls_get_tx_flits
    935 * returns number of flits to be sent out, it includes key context length, WR
    936 * size and skb fragments.
    937 */
    938static unsigned int
    939chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
    940{
    941	return chcr_sgl_len(nr_frags) +
    942	       DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
    943}
    944
    945/*
    946 * chcr_ktls_check_tcp_options: To check if there is any TCP option available
    947 * other than timestamp.
    948 * @skb - skb contains partial record..
    949 * return: 1 / 0
    950 */
    951static int
    952chcr_ktls_check_tcp_options(struct tcphdr *tcp)
    953{
    954	int cnt, opt, optlen;
    955	u_char *cp;
    956
    957	cp = (u_char *)(tcp + 1);
    958	cnt = (tcp->doff << 2) - sizeof(struct tcphdr);
    959	for (; cnt > 0; cnt -= optlen, cp += optlen) {
    960		opt = cp[0];
    961		if (opt == TCPOPT_EOL)
    962			break;
    963		if (opt == TCPOPT_NOP) {
    964			optlen = 1;
    965		} else {
    966			if (cnt < 2)
    967				break;
    968			optlen = cp[1];
    969			if (optlen < 2 || optlen > cnt)
    970				break;
    971		}
    972		switch (opt) {
    973		case TCPOPT_NOP:
    974			break;
    975		default:
    976			return 1;
    977		}
    978	}
    979	return 0;
    980}
    981
    982/*
    983 * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
    984 * send out separately.
    985 * @tx_info - driver specific tls info.
    986 * @skb - skb contains partial record..
    987 * @q - TX queue.
    988 * @tx_chan - channel number.
    989 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
    990 */
    991static int
    992chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
    993			    struct sge_eth_txq *q, uint32_t tx_chan)
    994{
    995	struct fw_eth_tx_pkt_wr *wr;
    996	struct cpl_tx_pkt_core *cpl;
    997	u32 ctrl, iplen, maclen;
    998	struct ipv6hdr *ip6;
    999	unsigned int ndesc;
   1000	struct tcphdr *tcp;
   1001	int len16, pktlen;
   1002	struct iphdr *ip;
   1003	u32 wr_mid = 0;
   1004	int credits;
   1005	u8 buf[150];
   1006	u64 cntrl1;
   1007	void *pos;
   1008
   1009	iplen = skb_network_header_len(skb);
   1010	maclen = skb_mac_header_len(skb);
   1011
   1012	/* packet length = eth hdr len + ip hdr len + tcp hdr len
   1013	 * (including options).
   1014	 */
   1015	pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
   1016
   1017	ctrl = sizeof(*cpl) + pktlen;
   1018	len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
   1019	/* check how many descriptors needed */
   1020	ndesc = DIV_ROUND_UP(len16, 4);
   1021
   1022	credits = chcr_txq_avail(&q->q) - ndesc;
   1023	if (unlikely(credits < 0)) {
   1024		chcr_eth_txq_stop(q);
   1025		return NETDEV_TX_BUSY;
   1026	}
   1027
   1028	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
   1029		chcr_eth_txq_stop(q);
   1030		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
   1031	}
   1032
   1033	pos = &q->q.desc[q->q.pidx];
   1034	wr = pos;
   1035
   1036	/* Firmware work request header */
   1037	wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
   1038			       FW_WR_IMMDLEN_V(ctrl));
   1039
   1040	wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
   1041	wr->r3 = 0;
   1042
   1043	cpl = (void *)(wr + 1);
   1044
   1045	/* CPL header */
   1046	cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) | TXPKT_INTF_V(tx_chan) |
   1047			   TXPKT_PF_V(tx_info->adap->pf));
   1048	cpl->pack = 0;
   1049	cpl->len = htons(pktlen);
   1050
   1051	memcpy(buf, skb->data, pktlen);
   1052	if (!IS_ENABLED(CONFIG_IPV6) || tx_info->ip_family == AF_INET) {
   1053		/* we need to correct ip header len */
   1054		ip = (struct iphdr *)(buf + maclen);
   1055		ip->tot_len = htons(pktlen - maclen);
   1056		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
   1057	} else {
   1058		ip6 = (struct ipv6hdr *)(buf + maclen);
   1059		ip6->payload_len = htons(pktlen - maclen - iplen);
   1060		cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
   1061	}
   1062
   1063	cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
   1064		  TXPKT_IPHDR_LEN_V(iplen);
   1065	/* checksum offload */
   1066	cpl->ctrl1 = cpu_to_be64(cntrl1);
   1067
   1068	pos = cpl + 1;
   1069
   1070	/* now take care of the tcp header, if fin is not set then clear push
   1071	 * bit as well, and if fin is set, it will be sent at the last so we
   1072	 * need to update the tcp sequence number as per the last packet.
   1073	 */
   1074	tcp = (struct tcphdr *)(buf + maclen + iplen);
   1075
   1076	if (!tcp->fin)
   1077		tcp->psh = 0;
   1078	else
   1079		tcp->seq = htonl(tx_info->prev_seq);
   1080
   1081	chcr_copy_to_txd(buf, &q->q, pos, pktlen);
   1082
   1083	chcr_txq_advance(&q->q, ndesc);
   1084	cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
   1085	return 0;
   1086}
   1087
   1088/*
   1089 * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
   1090 * received has partial end part of the record, send out the complete record, so
   1091 * that crypto block will be able to generate TAG/HASH.
   1092 * @skb - segment which has complete or partial end part.
   1093 * @tx_info - driver specific tls info.
   1094 * @q - TX queue.
   1095 * @tcp_seq
   1096 * @tcp_push - tcp push bit.
   1097 * @mss - segment size.
   1098 * return: NETDEV_TX_BUSY/NET_TX_OK.
   1099 */
   1100static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
   1101				      struct chcr_ktls_info *tx_info,
   1102				      struct sge_eth_txq *q, u32 tcp_seq,
   1103				      bool is_last_wr, u32 data_len,
   1104				      u32 skb_offset, u32 nfrags,
   1105				      bool tcp_push, u32 mss)
   1106{
   1107	u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
   1108	struct adapter *adap = tx_info->adap;
   1109	int credits, left, last_desc;
   1110	struct tx_sw_desc *sgl_sdesc;
   1111	struct cpl_tx_data *tx_data;
   1112	struct cpl_tx_sec_pdu *cpl;
   1113	struct ulptx_idata *idata;
   1114	struct ulp_txpkt *ulptx;
   1115	struct fw_ulptx_wr *wr;
   1116	void *pos;
   1117	u64 *end;
   1118
   1119	/* get the number of flits required */
   1120	flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
   1121	/* number of descriptors */
   1122	ndesc = chcr_flits_to_desc(flits);
   1123	/* check if enough credits available */
   1124	credits = chcr_txq_avail(&q->q) - ndesc;
   1125	if (unlikely(credits < 0)) {
   1126		chcr_eth_txq_stop(q);
   1127		return NETDEV_TX_BUSY;
   1128	}
   1129
   1130	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
   1131		/* Credits are below the threshold values, stop the queue after
   1132		 * injecting the Work Request for this packet.
   1133		 */
   1134		chcr_eth_txq_stop(q);
   1135		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
   1136	}
   1137
   1138	last_desc = q->q.pidx + ndesc - 1;
   1139	if (last_desc >= q->q.size)
   1140		last_desc -= q->q.size;
   1141	sgl_sdesc = &q->q.sdesc[last_desc];
   1142
   1143	if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
   1144		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
   1145		q->mapping_err++;
   1146		return NETDEV_TX_BUSY;
   1147	}
   1148
   1149	if (!is_last_wr)
   1150		skb_get(skb);
   1151
   1152	pos = &q->q.desc[q->q.pidx];
   1153	end = (u64 *)pos + flits;
   1154	/* FW_ULPTX_WR */
   1155	wr = pos;
   1156	/* WR will need len16 */
   1157	len16 = DIV_ROUND_UP(flits, 2);
   1158	wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
   1159	wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
   1160	wr->cookie = 0;
   1161	pos += sizeof(*wr);
   1162	/* ULP_TXPKT */
   1163	ulptx = pos;
   1164	ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
   1165				ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
   1166				ULP_TXPKT_FID_V(q->q.cntxt_id) |
   1167				ULP_TXPKT_RO_F);
   1168	ulptx->len = htonl(len16 - 1);
   1169	/* ULPTX_IDATA sub-command */
   1170	idata = (struct ulptx_idata *)(ulptx + 1);
   1171	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
   1172	/* idata length will include cpl_tx_sec_pdu + key context size +
   1173	 * cpl_tx_data header.
   1174	 */
   1175	idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
   1176			   sizeof(*tx_data));
   1177	/* SEC CPL */
   1178	cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
   1179	cpl->op_ivinsrtofst =
   1180		htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
   1181		      CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
   1182		      CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
   1183		      CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
   1184	cpl->pldlen = htonl(data_len);
   1185
   1186	/* encryption should start after tls header size + iv size */
   1187	cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
   1188
   1189	cpl->aadstart_cipherstop_hi =
   1190		htonl(CPL_TX_SEC_PDU_AADSTART_V(1) |
   1191		      CPL_TX_SEC_PDU_AADSTOP_V(TLS_HEADER_SIZE) |
   1192		      CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
   1193
   1194	/* authentication will also start after tls header + iv size */
   1195	cpl->cipherstop_lo_authinsert =
   1196	htonl(CPL_TX_SEC_PDU_AUTHSTART_V(cipher_start) |
   1197	      CPL_TX_SEC_PDU_AUTHSTOP_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE) |
   1198	      CPL_TX_SEC_PDU_AUTHINSERT_V(TLS_CIPHER_AES_GCM_128_TAG_SIZE));
   1199
   1200	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
   1201	cpl->seqno_numivs = htonl(tx_info->scmd0_seqno_numivs);
   1202	cpl->ivgen_hdrlen = htonl(tx_info->scmd0_ivgen_hdrlen);
   1203	cpl->scmd1 = cpu_to_be64(tx_info->record_no);
   1204
   1205	pos = cpl + 1;
   1206	/* check if space left to fill the keys */
   1207	left = (void *)q->q.stat - pos;
   1208	if (!left) {
   1209		left = (void *)end - (void *)q->q.stat;
   1210		pos = q->q.desc;
   1211		end = pos + left;
   1212	}
   1213
   1214	pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
   1215			       tx_info->key_ctx_len);
   1216	left = (void *)q->q.stat - pos;
   1217
   1218	if (!left) {
   1219		left = (void *)end - (void *)q->q.stat;
   1220		pos = q->q.desc;
   1221		end = pos + left;
   1222	}
   1223	/* CPL_TX_DATA */
   1224	tx_data = (void *)pos;
   1225	OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
   1226	tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
   1227
   1228	tx_data->rsvd = htonl(tcp_seq);
   1229
   1230	tx_data->flags = htonl(TX_BYPASS_F);
   1231	if (tcp_push)
   1232		tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
   1233
   1234	/* check left again, it might go beyond queue limit */
   1235	pos = tx_data + 1;
   1236	left = (void *)q->q.stat - pos;
   1237
   1238	/* check the position again */
   1239	if (!left) {
   1240		left = (void *)end - (void *)q->q.stat;
   1241		pos = q->q.desc;
   1242		end = pos + left;
   1243	}
   1244
   1245	/* send the complete packet except the header */
   1246	cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
   1247				skb_offset, data_len);
   1248	sgl_sdesc->skb = skb;
   1249
   1250	chcr_txq_advance(&q->q, ndesc);
   1251	cxgb4_ring_tx_db(adap, &q->q, ndesc);
   1252	atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
   1253
   1254	return 0;
   1255}
   1256
   1257/*
   1258 * chcr_ktls_xmit_wr_short: This is to send out partial records. If its
   1259 * a middle part of a record, fetch the prior data to make it 16 byte aligned
   1260 * and then only send it out.
   1261 *
   1262 * @skb - skb contains partial record..
   1263 * @tx_info - driver specific tls info.
   1264 * @q - TX queue.
   1265 * @tcp_seq
   1266 * @tcp_push - tcp push bit.
   1267 * @mss - segment size.
   1268 * @tls_rec_offset - offset from start of the tls record.
   1269 * @perior_data - data before the current segment, required to make this record
   1270 *		  16 byte aligned.
   1271 * @prior_data_len - prior_data length (less than 16)
   1272 * return: NETDEV_TX_BUSY/NET_TX_OK.
   1273 */
   1274static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
   1275				   struct chcr_ktls_info *tx_info,
   1276				   struct sge_eth_txq *q,
   1277				   u32 tcp_seq, bool tcp_push, u32 mss,
   1278				   u32 tls_rec_offset, u8 *prior_data,
   1279				   u32 prior_data_len, u32 data_len,
   1280				   u32 skb_offset)
   1281{
   1282	u32 len16, wr_mid = 0, cipher_start, nfrags;
   1283	struct adapter *adap = tx_info->adap;
   1284	unsigned int flits = 0, ndesc;
   1285	int credits, left, last_desc;
   1286	struct tx_sw_desc *sgl_sdesc;
   1287	struct cpl_tx_data *tx_data;
   1288	struct cpl_tx_sec_pdu *cpl;
   1289	struct ulptx_idata *idata;
   1290	struct ulp_txpkt *ulptx;
   1291	struct fw_ulptx_wr *wr;
   1292	__be64 iv_record;
   1293	void *pos;
   1294	u64 *end;
   1295
   1296	nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
   1297	/* get the number of flits required, it's a partial record so 2 flits
   1298	 * (AES_BLOCK_SIZE) will be added.
   1299	 */
   1300	flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
   1301	/* get the correct 8 byte IV of this record */
   1302	iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
   1303	/* If it's a middle record and not 16 byte aligned to run AES CTR, need
   1304	 * to make it 16 byte aligned. So atleadt 2 extra flits of immediate
   1305	 * data will be added.
   1306	 */
   1307	if (prior_data_len)
   1308		flits += 2;
   1309	/* number of descriptors */
   1310	ndesc = chcr_flits_to_desc(flits);
   1311	/* check if enough credits available */
   1312	credits = chcr_txq_avail(&q->q) - ndesc;
   1313	if (unlikely(credits < 0)) {
   1314		chcr_eth_txq_stop(q);
   1315		return NETDEV_TX_BUSY;
   1316	}
   1317
   1318	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
   1319		chcr_eth_txq_stop(q);
   1320		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
   1321	}
   1322
   1323	last_desc = q->q.pidx + ndesc - 1;
   1324	if (last_desc >= q->q.size)
   1325		last_desc -= q->q.size;
   1326	sgl_sdesc = &q->q.sdesc[last_desc];
   1327
   1328	if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
   1329		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
   1330		q->mapping_err++;
   1331		return NETDEV_TX_BUSY;
   1332	}
   1333
   1334	pos = &q->q.desc[q->q.pidx];
   1335	end = (u64 *)pos + flits;
   1336	/* FW_ULPTX_WR */
   1337	wr = pos;
   1338	/* WR will need len16 */
   1339	len16 = DIV_ROUND_UP(flits, 2);
   1340	wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
   1341	wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
   1342	wr->cookie = 0;
   1343	pos += sizeof(*wr);
   1344	/* ULP_TXPKT */
   1345	ulptx = pos;
   1346	ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
   1347				ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
   1348				ULP_TXPKT_FID_V(q->q.cntxt_id) |
   1349				ULP_TXPKT_RO_F);
   1350	ulptx->len = htonl(len16 - 1);
   1351	/* ULPTX_IDATA sub-command */
   1352	idata = (struct ulptx_idata *)(ulptx + 1);
   1353	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
   1354	/* idata length will include cpl_tx_sec_pdu + key context size +
   1355	 * cpl_tx_data header.
   1356	 */
   1357	idata->len = htonl(sizeof(*cpl) + tx_info->key_ctx_len +
   1358			   sizeof(*tx_data) + AES_BLOCK_LEN + prior_data_len);
   1359	/* SEC CPL */
   1360	cpl = (struct cpl_tx_sec_pdu *)(idata + 1);
   1361	/* cipher start will have tls header + iv size extra if its a header
   1362	 * part of tls record. else only 16 byte IV will be added.
   1363	 */
   1364	cipher_start =
   1365		AES_BLOCK_LEN + 1 +
   1366		(!tls_rec_offset ? TLS_HEADER_SIZE + tx_info->iv_size : 0);
   1367
   1368	cpl->op_ivinsrtofst =
   1369		htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
   1370		      CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
   1371		      CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
   1372	cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
   1373	cpl->aadstart_cipherstop_hi =
   1374		htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
   1375	cpl->cipherstop_lo_authinsert = 0;
   1376	/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
   1377	cpl->seqno_numivs = htonl(tx_info->scmd0_short_seqno_numivs);
   1378	cpl->ivgen_hdrlen = htonl(tx_info->scmd0_short_ivgen_hdrlen);
   1379	cpl->scmd1 = 0;
   1380
   1381	pos = cpl + 1;
   1382	/* check if space left to fill the keys */
   1383	left = (void *)q->q.stat - pos;
   1384	if (!left) {
   1385		left = (void *)end - (void *)q->q.stat;
   1386		pos = q->q.desc;
   1387		end = pos + left;
   1388	}
   1389
   1390	pos = chcr_copy_to_txd(&tx_info->key_ctx, &q->q, pos,
   1391			       tx_info->key_ctx_len);
   1392	left = (void *)q->q.stat - pos;
   1393
   1394	if (!left) {
   1395		left = (void *)end - (void *)q->q.stat;
   1396		pos = q->q.desc;
   1397		end = pos + left;
   1398	}
   1399	/* CPL_TX_DATA */
   1400	tx_data = (void *)pos;
   1401	OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
   1402	tx_data->len = htonl(TX_DATA_MSS_V(mss) |
   1403			     TX_LENGTH_V(data_len + prior_data_len));
   1404	tx_data->rsvd = htonl(tcp_seq);
   1405	tx_data->flags = htonl(TX_BYPASS_F);
   1406	if (tcp_push)
   1407		tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
   1408
   1409	/* check left again, it might go beyond queue limit */
   1410	pos = tx_data + 1;
   1411	left = (void *)q->q.stat - pos;
   1412
   1413	/* check the position again */
   1414	if (!left) {
   1415		left = (void *)end - (void *)q->q.stat;
   1416		pos = q->q.desc;
   1417		end = pos + left;
   1418	}
   1419	/* copy the 16 byte IV for AES-CTR, which includes 4 bytes of salt, 8
   1420	 * bytes of actual IV and 4 bytes of 16 byte-sequence.
   1421	 */
   1422	memcpy(pos, tx_info->key_ctx.salt, tx_info->salt_size);
   1423	memcpy(pos + tx_info->salt_size, &iv_record, tx_info->iv_size);
   1424	*(__be32 *)(pos + tx_info->salt_size + tx_info->iv_size) =
   1425		htonl(2 + (tls_rec_offset ? ((tls_rec_offset -
   1426		(TLS_HEADER_SIZE + tx_info->iv_size)) / AES_BLOCK_LEN) : 0));
   1427
   1428	pos += 16;
   1429	/* Prior_data_len will always be less than 16 bytes, fill the
   1430	 * prio_data_len after AES_CTRL_BLOCK and clear the remaining length
   1431	 * to 0.
   1432	 */
   1433	if (prior_data_len)
   1434		pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
   1435	/* send the complete packet except the header */
   1436	cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
   1437				skb_offset, data_len);
   1438	sgl_sdesc->skb = skb;
   1439
   1440	chcr_txq_advance(&q->q, ndesc);
   1441	cxgb4_ring_tx_db(adap, &q->q, ndesc);
   1442
   1443	return 0;
   1444}
   1445
   1446/*
   1447 * chcr_ktls_tx_plaintxt: This handler will take care of the records which has
   1448 * only plain text (only tls header and iv)
   1449 * @tx_info - driver specific tls info.
   1450 * @skb - skb contains partial record..
   1451 * @tcp_seq
   1452 * @mss - segment size.
   1453 * @tcp_push - tcp push bit.
   1454 * @q - TX queue.
   1455 * @port_id : port number
   1456 * @perior_data - data before the current segment, required to make this record
   1457 *		 16 byte aligned.
   1458 * @prior_data_len - prior_data length (less than 16)
   1459 * return: NETDEV_TX_BUSY/NET_TX_OK.
   1460 */
   1461static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
   1462				 struct sk_buff *skb, u32 tcp_seq, u32 mss,
   1463				 bool tcp_push, struct sge_eth_txq *q,
   1464				 u32 port_id, u8 *prior_data,
   1465				 u32 data_len, u32 skb_offset,
   1466				 u32 prior_data_len)
   1467{
   1468	int credits, left, len16, last_desc;
   1469	unsigned int flits = 0, ndesc;
   1470	struct tx_sw_desc *sgl_sdesc;
   1471	struct cpl_tx_data *tx_data;
   1472	struct ulptx_idata *idata;
   1473	struct ulp_txpkt *ulptx;
   1474	struct fw_ulptx_wr *wr;
   1475	u32 wr_mid = 0, nfrags;
   1476	void *pos;
   1477	u64 *end;
   1478
   1479	flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
   1480	nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
   1481	flits += chcr_sgl_len(nfrags);
   1482	if (prior_data_len)
   1483		flits += 2;
   1484
   1485	/* WR will need len16 */
   1486	len16 = DIV_ROUND_UP(flits, 2);
   1487	/* check how many descriptors needed */
   1488	ndesc = DIV_ROUND_UP(flits, 8);
   1489
   1490	credits = chcr_txq_avail(&q->q) - ndesc;
   1491	if (unlikely(credits < 0)) {
   1492		chcr_eth_txq_stop(q);
   1493		return NETDEV_TX_BUSY;
   1494	}
   1495
   1496	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
   1497		chcr_eth_txq_stop(q);
   1498		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
   1499	}
   1500
   1501	last_desc = q->q.pidx + ndesc - 1;
   1502	if (last_desc >= q->q.size)
   1503		last_desc -= q->q.size;
   1504	sgl_sdesc = &q->q.sdesc[last_desc];
   1505
   1506	if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
   1507				   sgl_sdesc->addr) < 0)) {
   1508		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
   1509		q->mapping_err++;
   1510		return NETDEV_TX_BUSY;
   1511	}
   1512
   1513	pos = &q->q.desc[q->q.pidx];
   1514	end = (u64 *)pos + flits;
   1515	/* FW_ULPTX_WR */
   1516	wr = pos;
   1517	wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
   1518	wr->flowid_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
   1519	wr->cookie = 0;
   1520	/* ULP_TXPKT */
   1521	ulptx = (struct ulp_txpkt *)(wr + 1);
   1522	ulptx->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
   1523			ULP_TXPKT_DATAMODIFY_V(0) |
   1524			ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
   1525			ULP_TXPKT_DEST_V(0) |
   1526			ULP_TXPKT_FID_V(q->q.cntxt_id) | ULP_TXPKT_RO_V(1));
   1527	ulptx->len = htonl(len16 - 1);
   1528	/* ULPTX_IDATA sub-command */
   1529	idata = (struct ulptx_idata *)(ulptx + 1);
   1530	idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) | ULP_TX_SC_MORE_F);
   1531	idata->len = htonl(sizeof(*tx_data) + prior_data_len);
   1532	/* CPL_TX_DATA */
   1533	tx_data = (struct cpl_tx_data *)(idata + 1);
   1534	OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
   1535	tx_data->len = htonl(TX_DATA_MSS_V(mss) |
   1536			     TX_LENGTH_V(data_len + prior_data_len));
   1537	/* set tcp seq number */
   1538	tx_data->rsvd = htonl(tcp_seq);
   1539	tx_data->flags = htonl(TX_BYPASS_F);
   1540	if (tcp_push)
   1541		tx_data->flags |= htonl(TX_PUSH_F | TX_SHOVE_F);
   1542
   1543	pos = tx_data + 1;
   1544	/* apart from prior_data_len, we should set remaining part of 16 bytes
   1545	 * to be zero.
   1546	 */
   1547	if (prior_data_len)
   1548		pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
   1549
   1550	/* check left again, it might go beyond queue limit */
   1551	left = (void *)q->q.stat - pos;
   1552
   1553	/* check the position again */
   1554	if (!left) {
   1555		left = (void *)end - (void *)q->q.stat;
   1556		pos = q->q.desc;
   1557		end = pos + left;
   1558	}
   1559	/* send the complete packet including the header */
   1560	cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
   1561				skb_offset, data_len);
   1562	sgl_sdesc->skb = skb;
   1563
   1564	chcr_txq_advance(&q->q, ndesc);
   1565	cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
   1566	return 0;
   1567}
   1568
   1569static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
   1570				struct sk_buff *skb,
   1571				struct sge_eth_txq *q)
   1572{
   1573	u32 ctrl, iplen, maclen, wr_mid = 0, len16;
   1574	struct tx_sw_desc *sgl_sdesc;
   1575	struct fw_eth_tx_pkt_wr *wr;
   1576	struct cpl_tx_pkt_core *cpl;
   1577	unsigned int flits, ndesc;
   1578	int credits, last_desc;
   1579	u64 cntrl1, *end;
   1580	void *pos;
   1581
   1582	ctrl = sizeof(*cpl);
   1583	flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
   1584
   1585	flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
   1586	len16 = DIV_ROUND_UP(flits, 2);
   1587	/* check how many descriptors needed */
   1588	ndesc = DIV_ROUND_UP(flits, 8);
   1589
   1590	credits = chcr_txq_avail(&q->q) - ndesc;
   1591	if (unlikely(credits < 0)) {
   1592		chcr_eth_txq_stop(q);
   1593		return -ENOMEM;
   1594	}
   1595
   1596	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
   1597		chcr_eth_txq_stop(q);
   1598		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
   1599	}
   1600
   1601	last_desc = q->q.pidx + ndesc - 1;
   1602	if (last_desc >= q->q.size)
   1603		last_desc -= q->q.size;
   1604	sgl_sdesc = &q->q.sdesc[last_desc];
   1605
   1606	if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
   1607				   sgl_sdesc->addr) < 0)) {
   1608		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
   1609		q->mapping_err++;
   1610		return -ENOMEM;
   1611	}
   1612
   1613	iplen = skb_network_header_len(skb);
   1614	maclen = skb_mac_header_len(skb);
   1615
   1616	pos = &q->q.desc[q->q.pidx];
   1617	end = (u64 *)pos + flits;
   1618	wr = pos;
   1619
   1620	/* Firmware work request header */
   1621	wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
   1622			       FW_WR_IMMDLEN_V(ctrl));
   1623
   1624	wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
   1625	wr->r3 = 0;
   1626
   1627	cpl = (void *)(wr + 1);
   1628
   1629	/* CPL header */
   1630	cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
   1631			   TXPKT_INTF_V(tx_info->tx_chan) |
   1632			   TXPKT_PF_V(tx_info->adap->pf));
   1633	cpl->pack = 0;
   1634	cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
   1635				   TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
   1636	cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
   1637		  TXPKT_IPHDR_LEN_V(iplen);
   1638	/* checksum offload */
   1639	cpl->ctrl1 = cpu_to_be64(cntrl1);
   1640	cpl->len = htons(skb->len);
   1641
   1642	pos = cpl + 1;
   1643
   1644	cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
   1645	sgl_sdesc->skb = skb;
   1646	chcr_txq_advance(&q->q, ndesc);
   1647	cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
   1648	return 0;
   1649}
   1650
   1651/*
   1652 * chcr_ktls_copy_record_in_skb
   1653 * @nskb - new skb where the frags to be added.
   1654 * @skb - old skb, to copy socket and destructor details.
   1655 * @record - specific record which has complete 16k record in frags.
   1656 */
   1657static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
   1658					 struct sk_buff *skb,
   1659					 struct tls_record_info *record)
   1660{
   1661	int i = 0;
   1662
   1663	for (i = 0; i < record->num_frags; i++) {
   1664		skb_shinfo(nskb)->frags[i] = record->frags[i];
   1665		/* increase the frag ref count */
   1666		__skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
   1667	}
   1668
   1669	skb_shinfo(nskb)->nr_frags = record->num_frags;
   1670	nskb->data_len = record->len;
   1671	nskb->len += record->len;
   1672	nskb->truesize += record->len;
   1673	nskb->sk = skb->sk;
   1674	nskb->destructor = skb->destructor;
   1675	refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
   1676}
   1677
   1678/*
   1679 * chcr_end_part_handler: This handler will handle the record which
   1680 * is complete or if record's end part is received. T6 adapter has a issue that
   1681 * it can't send out TAG with partial record so if its an end part then we have
   1682 * to send TAG as well and for which we need to fetch the complete record and
   1683 * send it to crypto module.
   1684 * @tx_info - driver specific tls info.
   1685 * @skb - skb contains partial record.
   1686 * @record - complete record of 16K size.
   1687 * @tcp_seq
   1688 * @mss - segment size in which TP needs to chop a packet.
   1689 * @tcp_push_no_fin - tcp push if fin is not set.
   1690 * @q - TX queue.
   1691 * @tls_end_offset - offset from end of the record.
   1692 * @last wr : check if this is the last part of the skb going out.
   1693 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
   1694 */
   1695static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
   1696				 struct sk_buff *skb,
   1697				 struct tls_record_info *record,
   1698				 u32 tcp_seq, int mss, bool tcp_push_no_fin,
   1699				 struct sge_eth_txq *q, u32 skb_offset,
   1700				 u32 tls_end_offset, bool last_wr)
   1701{
   1702	bool free_skb_if_tx_fails = false;
   1703	struct sk_buff *nskb = NULL;
   1704
   1705	/* check if it is a complete record */
   1706	if (tls_end_offset == record->len) {
   1707		nskb = skb;
   1708		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
   1709	} else {
   1710		nskb = alloc_skb(0, GFP_ATOMIC);
   1711		if (!nskb) {
   1712			dev_kfree_skb_any(skb);
   1713			return NETDEV_TX_BUSY;
   1714		}
   1715
   1716		/* copy complete record in skb */
   1717		chcr_ktls_copy_record_in_skb(nskb, skb, record);
   1718		/* packet is being sent from the beginning, update the tcp_seq
   1719		 * accordingly.
   1720		 */
   1721		tcp_seq = tls_record_start_seq(record);
   1722		/* reset skb offset */
   1723		skb_offset = 0;
   1724
   1725		if (last_wr)
   1726			dev_kfree_skb_any(skb);
   1727		else
   1728			free_skb_if_tx_fails = true;
   1729
   1730		last_wr = true;
   1731
   1732		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
   1733	}
   1734
   1735	if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
   1736				       last_wr, record->len, skb_offset,
   1737				       record->num_frags,
   1738				       (last_wr && tcp_push_no_fin),
   1739				       mss)) {
   1740		if (free_skb_if_tx_fails)
   1741			dev_kfree_skb_any(skb);
   1742		goto out;
   1743	}
   1744	tx_info->prev_seq = record->end_seq;
   1745	return 0;
   1746out:
   1747	dev_kfree_skb_any(nskb);
   1748	return NETDEV_TX_BUSY;
   1749}
   1750
   1751/*
   1752 * chcr_short_record_handler: This handler will take care of the records which
   1753 * doesn't have end part (1st part or the middle part(/s) of a record). In such
   1754 * cases, AES CTR will be used in place of AES GCM to send out partial packet.
   1755 * This partial record might be the first part of the record, or the middle
   1756 * part. In case of middle record we should fetch the prior data to make it 16
   1757 * byte aligned. If it has a partial tls header or iv then get to the start of
   1758 * tls header. And if it has partial TAG, then remove the complete TAG and send
   1759 * only the payload.
   1760 * There is one more possibility that it gets a partial header, send that
   1761 * portion as a plaintext.
   1762 * @tx_info - driver specific tls info.
   1763 * @skb - skb contains partial record..
   1764 * @record - complete record of 16K size.
   1765 * @tcp_seq
   1766 * @mss - segment size in which TP needs to chop a packet.
   1767 * @tcp_push_no_fin - tcp push if fin is not set.
   1768 * @q - TX queue.
   1769 * @tls_end_offset - offset from end of the record.
   1770 * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
   1771 */
   1772static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
   1773				     struct sk_buff *skb,
   1774				     struct tls_record_info *record,
   1775				     u32 tcp_seq, int mss, bool tcp_push_no_fin,
   1776				     u32 data_len, u32 skb_offset,
   1777				     struct sge_eth_txq *q, u32 tls_end_offset)
   1778{
   1779	u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
   1780	u8 prior_data[16] = {0};
   1781	u32 prior_data_len = 0;
   1782
   1783	/* check if the skb is ending in middle of tag/HASH, its a big
   1784	 * trouble, send the packet before the HASH.
   1785	 */
   1786	int remaining_record = tls_end_offset - data_len;
   1787
   1788	if (remaining_record > 0 &&
   1789	    remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
   1790		int trimmed_len = 0;
   1791
   1792		if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
   1793			trimmed_len = data_len -
   1794				      (TLS_CIPHER_AES_GCM_128_TAG_SIZE -
   1795				       remaining_record);
   1796		if (!trimmed_len)
   1797			return FALLBACK;
   1798
   1799		WARN_ON(trimmed_len > data_len);
   1800
   1801		data_len = trimmed_len;
   1802		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
   1803	}
   1804
   1805	/* check if it is only the header part. */
   1806	if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
   1807		if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
   1808					  tcp_push_no_fin, q,
   1809					  tx_info->port_id, prior_data,
   1810					  data_len, skb_offset, prior_data_len))
   1811			goto out;
   1812
   1813		tx_info->prev_seq = tcp_seq + data_len;
   1814		return 0;
   1815	}
   1816
   1817	/* check if the middle record's start point is 16 byte aligned. CTR
   1818	 * needs 16 byte aligned start point to start encryption.
   1819	 */
   1820	if (tls_rec_offset) {
   1821		/* there is an offset from start, means its a middle record */
   1822		int remaining = 0;
   1823
   1824		if (tls_rec_offset < (TLS_HEADER_SIZE + tx_info->iv_size)) {
   1825			prior_data_len = tls_rec_offset;
   1826			tls_rec_offset = 0;
   1827			remaining = 0;
   1828		} else {
   1829			prior_data_len =
   1830				(tls_rec_offset -
   1831				(TLS_HEADER_SIZE + tx_info->iv_size))
   1832				% AES_BLOCK_LEN;
   1833			remaining = tls_rec_offset - prior_data_len;
   1834		}
   1835
   1836		/* if prior_data_len is not zero, means we need to fetch prior
   1837		 * data to make this record 16 byte aligned, or we need to reach
   1838		 * to start offset.
   1839		 */
   1840		if (prior_data_len) {
   1841			int i = 0;
   1842			u8 *data = NULL;
   1843			skb_frag_t *f;
   1844			u8 *vaddr;
   1845			int frag_size = 0, frag_delta = 0;
   1846
   1847			while (remaining > 0) {
   1848				frag_size = skb_frag_size(&record->frags[i]);
   1849				if (remaining < frag_size)
   1850					break;
   1851
   1852				remaining -= frag_size;
   1853				i++;
   1854			}
   1855			f = &record->frags[i];
   1856			vaddr = kmap_atomic(skb_frag_page(f));
   1857
   1858			data = vaddr + skb_frag_off(f)  + remaining;
   1859			frag_delta = skb_frag_size(f) - remaining;
   1860
   1861			if (frag_delta >= prior_data_len) {
   1862				memcpy(prior_data, data, prior_data_len);
   1863				kunmap_atomic(vaddr);
   1864			} else {
   1865				memcpy(prior_data, data, frag_delta);
   1866				kunmap_atomic(vaddr);
   1867				/* get the next page */
   1868				f = &record->frags[i + 1];
   1869				vaddr = kmap_atomic(skb_frag_page(f));
   1870				data = vaddr + skb_frag_off(f);
   1871				memcpy(prior_data + frag_delta,
   1872				       data, (prior_data_len - frag_delta));
   1873				kunmap_atomic(vaddr);
   1874			}
   1875			/* reset tcp_seq as per the prior_data_required len */
   1876			tcp_seq -= prior_data_len;
   1877		}
   1878		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
   1879	} else {
   1880		atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
   1881	}
   1882
   1883	if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
   1884				    mss, tls_rec_offset, prior_data,
   1885				    prior_data_len, data_len, skb_offset)) {
   1886		goto out;
   1887	}
   1888
   1889	tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
   1890	return 0;
   1891out:
   1892	dev_kfree_skb_any(skb);
   1893	return NETDEV_TX_BUSY;
   1894}
   1895
   1896static int chcr_ktls_sw_fallback(struct sk_buff *skb,
   1897				 struct chcr_ktls_info *tx_info,
   1898				 struct sge_eth_txq *q)
   1899{
   1900	u32 data_len, skb_offset;
   1901	struct sk_buff *nskb;
   1902	struct tcphdr *th;
   1903
   1904	nskb = tls_encrypt_skb(skb);
   1905
   1906	if (!nskb)
   1907		return 0;
   1908
   1909	th = tcp_hdr(nskb);
   1910	skb_offset =  skb_transport_offset(nskb) + tcp_hdrlen(nskb);
   1911	data_len = nskb->len - skb_offset;
   1912	skb_tx_timestamp(nskb);
   1913
   1914	if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
   1915		goto out;
   1916
   1917	tx_info->prev_seq = ntohl(th->seq) + data_len;
   1918	atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
   1919	return 0;
   1920out:
   1921	dev_kfree_skb_any(nskb);
   1922	return 0;
   1923}
   1924/* nic tls TX handler */
   1925static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
   1926{
   1927	u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
   1928	struct ch_ktls_port_stats_debug *port_stats;
   1929	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
   1930	struct ch_ktls_stats_debug *stats;
   1931	struct tcphdr *th = tcp_hdr(skb);
   1932	int data_len, qidx, ret = 0, mss;
   1933	struct tls_record_info *record;
   1934	struct chcr_ktls_info *tx_info;
   1935	struct tls_context *tls_ctx;
   1936	struct sge_eth_txq *q;
   1937	struct adapter *adap;
   1938	unsigned long flags;
   1939
   1940	tcp_seq = ntohl(th->seq);
   1941	skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
   1942	skb_data_len = skb->len - skb_offset;
   1943	data_len = skb_data_len;
   1944
   1945	mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
   1946
   1947	tls_ctx = tls_get_ctx(skb->sk);
   1948	if (unlikely(tls_ctx->netdev != dev))
   1949		goto out;
   1950
   1951	tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
   1952	tx_info = tx_ctx->chcr_info;
   1953
   1954	if (unlikely(!tx_info))
   1955		goto out;
   1956
   1957	adap = tx_info->adap;
   1958	stats = &adap->ch_ktls_stats;
   1959	port_stats = &stats->ktls_port[tx_info->port_id];
   1960
   1961	qidx = skb->queue_mapping;
   1962	q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
   1963	cxgb4_reclaim_completed_tx(adap, &q->q, true);
   1964	/* if tcp options are set but finish is not send the options first */
   1965	if (!th->fin && chcr_ktls_check_tcp_options(th)) {
   1966		ret = chcr_ktls_write_tcp_options(tx_info, skb, q,
   1967						  tx_info->tx_chan);
   1968		if (ret)
   1969			return NETDEV_TX_BUSY;
   1970	}
   1971
   1972	/* TCP segments can be in received either complete or partial.
   1973	 * chcr_end_part_handler will handle cases if complete record or end
   1974	 * part of the record is received. In case of partial end part of record,
   1975	 * we will send the complete record again.
   1976	 */
   1977
   1978	spin_lock_irqsave(&tx_ctx->base.lock, flags);
   1979
   1980	do {
   1981
   1982		cxgb4_reclaim_completed_tx(adap, &q->q, true);
   1983		/* fetch the tls record */
   1984		record = tls_get_record(&tx_ctx->base, tcp_seq,
   1985					&tx_info->record_no);
   1986		/* By the time packet reached to us, ACK is received, and record
   1987		 * won't be found in that case, handle it gracefully.
   1988		 */
   1989		if (unlikely(!record)) {
   1990			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
   1991			atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
   1992			goto out;
   1993		}
   1994
   1995		tls_end_offset = record->end_seq - tcp_seq;
   1996
   1997		pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
   1998			 tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
   1999		/* update tcb for the skb */
   2000		if (skb_data_len == data_len) {
   2001			u32 tx_max = tcp_seq;
   2002
   2003			if (!tls_record_is_start_marker(record) &&
   2004			    tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
   2005				tx_max = record->end_seq -
   2006					TLS_CIPHER_AES_GCM_128_TAG_SIZE;
   2007
   2008			ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
   2009						      ntohl(th->ack_seq),
   2010						      ntohs(th->window),
   2011						      tls_end_offset !=
   2012						      record->len);
   2013			if (ret) {
   2014				spin_unlock_irqrestore(&tx_ctx->base.lock,
   2015						       flags);
   2016				goto out;
   2017			}
   2018
   2019			if (th->fin)
   2020				skb_get(skb);
   2021		}
   2022
   2023		if (unlikely(tls_record_is_start_marker(record))) {
   2024			atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
   2025			/* If tls_end_offset < data_len, means there is some
   2026			 * data after start marker, which needs encryption, send
   2027			 * plaintext first and take skb refcount. else send out
   2028			 * complete pkt as plaintext.
   2029			 */
   2030			if (tls_end_offset < data_len)
   2031				skb_get(skb);
   2032			else
   2033				tls_end_offset = data_len;
   2034
   2035			ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
   2036						    (!th->fin && th->psh), q,
   2037						    tx_info->port_id, NULL,
   2038						    tls_end_offset, skb_offset,
   2039						    0);
   2040
   2041			if (ret) {
   2042				/* free the refcount taken earlier */
   2043				if (tls_end_offset < data_len)
   2044					dev_kfree_skb_any(skb);
   2045				spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
   2046				goto out;
   2047			}
   2048
   2049			data_len -= tls_end_offset;
   2050			tcp_seq = record->end_seq;
   2051			skb_offset += tls_end_offset;
   2052			continue;
   2053		}
   2054
   2055		/* if a tls record is finishing in this SKB */
   2056		if (tls_end_offset <= data_len) {
   2057			ret = chcr_end_part_handler(tx_info, skb, record,
   2058						    tcp_seq, mss,
   2059						    (!th->fin && th->psh), q,
   2060						    skb_offset,
   2061						    tls_end_offset,
   2062						    skb_offset +
   2063						    tls_end_offset == skb->len);
   2064
   2065			data_len -= tls_end_offset;
   2066			/* tcp_seq increment is required to handle next record.
   2067			 */
   2068			tcp_seq += tls_end_offset;
   2069			skb_offset += tls_end_offset;
   2070		} else {
   2071			ret = chcr_short_record_handler(tx_info, skb,
   2072							record, tcp_seq, mss,
   2073							(!th->fin && th->psh),
   2074							data_len, skb_offset,
   2075							q, tls_end_offset);
   2076			data_len = 0;
   2077		}
   2078
   2079		/* if any failure, come out from the loop. */
   2080		if (ret) {
   2081			spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
   2082			if (th->fin)
   2083				dev_kfree_skb_any(skb);
   2084
   2085			if (ret == FALLBACK)
   2086				return chcr_ktls_sw_fallback(skb, tx_info, q);
   2087
   2088			return NETDEV_TX_OK;
   2089		}
   2090
   2091		/* length should never be less than 0 */
   2092		WARN_ON(data_len < 0);
   2093
   2094	} while (data_len > 0);
   2095
   2096	spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
   2097	atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
   2098	atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
   2099
   2100	/* tcp finish is set, send a separate tcp msg including all the options
   2101	 * as well.
   2102	 */
   2103	if (th->fin) {
   2104		chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
   2105		dev_kfree_skb_any(skb);
   2106	}
   2107
   2108	return NETDEV_TX_OK;
   2109out:
   2110	dev_kfree_skb_any(skb);
   2111	return NETDEV_TX_OK;
   2112}
   2113
   2114static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
   2115{
   2116	struct chcr_ktls_uld_ctx *u_ctx;
   2117
   2118	pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
   2119		     CHCR_KTLS_DRV_VERSION);
   2120	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
   2121	if (!u_ctx) {
   2122		u_ctx = ERR_PTR(-ENOMEM);
   2123		goto out;
   2124	}
   2125	u_ctx->lldi = *lldi;
   2126	u_ctx->detach = false;
   2127	xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
   2128out:
   2129	return u_ctx;
   2130}
   2131
   2132static const struct tlsdev_ops chcr_ktls_ops = {
   2133	.tls_dev_add = chcr_ktls_dev_add,
   2134	.tls_dev_del = chcr_ktls_dev_del,
   2135};
   2136
   2137static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
   2138	[CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
   2139	[CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
   2140};
   2141
   2142static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
   2143				    const struct pkt_gl *pgl)
   2144{
   2145	const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
   2146	struct chcr_ktls_uld_ctx *u_ctx = handle;
   2147	u8 opcode = rpl->ot.opcode;
   2148	struct adapter *adap;
   2149
   2150	adap = pci_get_drvdata(u_ctx->lldi.pdev);
   2151
   2152	if (!work_handlers[opcode]) {
   2153		pr_err("Unsupported opcode %d received\n", opcode);
   2154		return 0;
   2155	}
   2156
   2157	work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
   2158	return 0;
   2159}
   2160
   2161static void clear_conn_resources(struct chcr_ktls_info *tx_info)
   2162{
   2163	/* clear l2t entry */
   2164	if (tx_info->l2te)
   2165		cxgb4_l2t_release(tx_info->l2te);
   2166
   2167#if IS_ENABLED(CONFIG_IPV6)
   2168	/* clear clip entry */
   2169	if (tx_info->ip_family == AF_INET6)
   2170		cxgb4_clip_release(tx_info->netdev, (const u32 *)
   2171				   &tx_info->sk->sk_v6_rcv_saddr,
   2172				   1);
   2173#endif
   2174
   2175	/* clear tid */
   2176	if (tx_info->tid != -1)
   2177		cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
   2178				 tx_info->tid, tx_info->ip_family);
   2179}
   2180
   2181static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
   2182{
   2183	struct ch_ktls_port_stats_debug *port_stats;
   2184	struct chcr_ktls_ofld_ctx_tx *tx_ctx;
   2185	struct chcr_ktls_info *tx_info;
   2186	unsigned long index;
   2187
   2188	xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
   2189		tx_info = tx_ctx->chcr_info;
   2190		clear_conn_resources(tx_info);
   2191		port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
   2192		atomic64_inc(&port_stats->ktls_tx_connection_close);
   2193		kvfree(tx_info);
   2194		tx_ctx->chcr_info = NULL;
   2195		/* release module refcount */
   2196		module_put(THIS_MODULE);
   2197	}
   2198}
   2199
   2200static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
   2201{
   2202	struct chcr_ktls_uld_ctx *u_ctx = handle;
   2203
   2204	switch (new_state) {
   2205	case CXGB4_STATE_UP:
   2206		pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
   2207		mutex_lock(&dev_mutex);
   2208		list_add_tail(&u_ctx->entry, &uld_ctx_list);
   2209		mutex_unlock(&dev_mutex);
   2210		break;
   2211	case CXGB4_STATE_START_RECOVERY:
   2212	case CXGB4_STATE_DOWN:
   2213	case CXGB4_STATE_DETACH:
   2214		pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
   2215		mutex_lock(&dev_mutex);
   2216		u_ctx->detach = true;
   2217		list_del(&u_ctx->entry);
   2218		ch_ktls_reset_all_conn(u_ctx);
   2219		xa_destroy(&u_ctx->tid_list);
   2220		mutex_unlock(&dev_mutex);
   2221		break;
   2222	default:
   2223		break;
   2224	}
   2225
   2226	return 0;
   2227}
   2228
   2229static struct cxgb4_uld_info chcr_ktls_uld_info = {
   2230	.name = CHCR_KTLS_DRV_MODULE_NAME,
   2231	.nrxq = 1,
   2232	.rxq_size = 1024,
   2233	.add = chcr_ktls_uld_add,
   2234	.tx_handler = chcr_ktls_xmit,
   2235	.rx_handler = chcr_ktls_uld_rx_handler,
   2236	.state_change = chcr_ktls_uld_state_change,
   2237	.tlsdev_ops = &chcr_ktls_ops,
   2238};
   2239
   2240static int __init chcr_ktls_init(void)
   2241{
   2242	cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
   2243	return 0;
   2244}
   2245
   2246static void __exit chcr_ktls_exit(void)
   2247{
   2248	struct chcr_ktls_uld_ctx *u_ctx, *tmp;
   2249	struct adapter *adap;
   2250
   2251	pr_info("driver unloaded\n");
   2252
   2253	mutex_lock(&dev_mutex);
   2254	list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
   2255		adap = pci_get_drvdata(u_ctx->lldi.pdev);
   2256		memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
   2257		list_del(&u_ctx->entry);
   2258		xa_destroy(&u_ctx->tid_list);
   2259		kfree(u_ctx);
   2260	}
   2261	mutex_unlock(&dev_mutex);
   2262	cxgb4_unregister_uld(CXGB4_ULD_KTLS);
   2263}
   2264
   2265module_init(chcr_ktls_init);
   2266module_exit(chcr_ktls_exit);
   2267
   2268MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
   2269MODULE_LICENSE("GPL");
   2270MODULE_AUTHOR("Chelsio Communications");
   2271MODULE_VERSION(CHCR_KTLS_DRV_VERSION);