cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cm.c (122493B)


      1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
      2/* Copyright (c) 2015 - 2021 Intel Corporation */
      3#include "main.h"
      4#include "trace.h"
      5
      6static void irdma_cm_post_event(struct irdma_cm_event *event);
      7static void irdma_disconnect_worker(struct work_struct *work);
      8
      9/**
     10 * irdma_free_sqbuf - put back puda buffer if refcount is 0
     11 * @vsi: The VSI structure of the device
     12 * @bufp: puda buffer to free
     13 */
     14void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
     15{
     16	struct irdma_puda_buf *buf = bufp;
     17	struct irdma_puda_rsrc *ilq = vsi->ilq;
     18
     19	if (refcount_dec_and_test(&buf->refcount))
     20		irdma_puda_ret_bufpool(ilq, buf);
     21}
     22
     23/**
     24 * irdma_record_ird_ord - Record IRD/ORD passed in
     25 * @cm_node: connection's node
     26 * @conn_ird: connection IRD
     27 * @conn_ord: connection ORD
     28 */
     29static void irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird,
     30				 u32 conn_ord)
     31{
     32	if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird)
     33		conn_ird = cm_node->dev->hw_attrs.max_hw_ird;
     34
     35	if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord)
     36		conn_ord = cm_node->dev->hw_attrs.max_hw_ord;
     37	else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
     38		conn_ord = 1;
     39	cm_node->ird_size = conn_ird;
     40	cm_node->ord_size = conn_ord;
     41}
     42
     43/**
     44 * irdma_copy_ip_ntohl - copy IP address from  network to host
     45 * @dst: IP address in host order
     46 * @src: IP address in network order (big endian)
     47 */
     48void irdma_copy_ip_ntohl(u32 *dst, __be32 *src)
     49{
     50	*dst++ = ntohl(*src++);
     51	*dst++ = ntohl(*src++);
     52	*dst++ = ntohl(*src++);
     53	*dst = ntohl(*src);
     54}
     55
     56/**
     57 * irdma_copy_ip_htonl - copy IP address from host to network order
     58 * @dst: IP address in network order (big endian)
     59 * @src: IP address in host order
     60 */
     61void irdma_copy_ip_htonl(__be32 *dst, u32 *src)
     62{
     63	*dst++ = htonl(*src++);
     64	*dst++ = htonl(*src++);
     65	*dst++ = htonl(*src++);
     66	*dst = htonl(*src);
     67}
     68
     69/**
     70 * irdma_get_addr_info
     71 * @cm_node: contains ip/tcp info
     72 * @cm_info: to get a copy of the cm_node ip/tcp info
     73 */
     74static void irdma_get_addr_info(struct irdma_cm_node *cm_node,
     75				struct irdma_cm_info *cm_info)
     76{
     77	memset(cm_info, 0, sizeof(*cm_info));
     78	cm_info->ipv4 = cm_node->ipv4;
     79	cm_info->vlan_id = cm_node->vlan_id;
     80	memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
     81	memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
     82	cm_info->loc_port = cm_node->loc_port;
     83	cm_info->rem_port = cm_node->rem_port;
     84}
     85
     86/**
     87 * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection
     88 * @cm_node: connection's node
     89 * @event: upper layer's cm event
     90 */
     91static inline void irdma_fill_sockaddr4(struct irdma_cm_node *cm_node,
     92					struct iw_cm_event *event)
     93{
     94	struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
     95	struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
     96
     97	laddr->sin_family = AF_INET;
     98	raddr->sin_family = AF_INET;
     99
    100	laddr->sin_port = htons(cm_node->loc_port);
    101	raddr->sin_port = htons(cm_node->rem_port);
    102
    103	laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
    104	raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
    105}
    106
    107/**
    108 * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection
    109 * @cm_node: connection's node
    110 * @event: upper layer's cm event
    111 */
    112static inline void irdma_fill_sockaddr6(struct irdma_cm_node *cm_node,
    113					struct iw_cm_event *event)
    114{
    115	struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
    116	struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
    117
    118	laddr6->sin6_family = AF_INET6;
    119	raddr6->sin6_family = AF_INET6;
    120
    121	laddr6->sin6_port = htons(cm_node->loc_port);
    122	raddr6->sin6_port = htons(cm_node->rem_port);
    123
    124	irdma_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
    125			    cm_node->loc_addr);
    126	irdma_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
    127			    cm_node->rem_addr);
    128}
    129
    130/**
    131 * irdma_get_cmevent_info - for cm event upcall
    132 * @cm_node: connection's node
    133 * @cm_id: upper layers cm struct for the event
    134 * @event: upper layer's cm event
    135 */
    136static inline void irdma_get_cmevent_info(struct irdma_cm_node *cm_node,
    137					  struct iw_cm_id *cm_id,
    138					  struct iw_cm_event *event)
    139{
    140	memcpy(&event->local_addr, &cm_id->m_local_addr,
    141	       sizeof(event->local_addr));
    142	memcpy(&event->remote_addr, &cm_id->m_remote_addr,
    143	       sizeof(event->remote_addr));
    144	if (cm_node) {
    145		event->private_data = cm_node->pdata_buf;
    146		event->private_data_len = (u8)cm_node->pdata.size;
    147		event->ird = cm_node->ird_size;
    148		event->ord = cm_node->ord_size;
    149	}
    150}
    151
    152/**
    153 * irdma_send_cm_event - upcall cm's event handler
    154 * @cm_node: connection's node
    155 * @cm_id: upper layer's cm info struct
    156 * @type: Event type to indicate
    157 * @status: status for the event type
    158 */
    159static int irdma_send_cm_event(struct irdma_cm_node *cm_node,
    160			       struct iw_cm_id *cm_id,
    161			       enum iw_cm_event_type type, int status)
    162{
    163	struct iw_cm_event event = {};
    164
    165	event.event = type;
    166	event.status = status;
    167	trace_irdma_send_cm_event(cm_node, cm_id, type, status,
    168				  __builtin_return_address(0));
    169
    170	ibdev_dbg(&cm_node->iwdev->ibdev,
    171		  "CM: cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
    172		  cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
    173		  status);
    174
    175	switch (type) {
    176	case IW_CM_EVENT_CONNECT_REQUEST:
    177		if (cm_node->ipv4)
    178			irdma_fill_sockaddr4(cm_node, &event);
    179		else
    180			irdma_fill_sockaddr6(cm_node, &event);
    181		event.provider_data = cm_node;
    182		event.private_data = cm_node->pdata_buf;
    183		event.private_data_len = (u8)cm_node->pdata.size;
    184		event.ird = cm_node->ird_size;
    185		break;
    186	case IW_CM_EVENT_CONNECT_REPLY:
    187		irdma_get_cmevent_info(cm_node, cm_id, &event);
    188		break;
    189	case IW_CM_EVENT_ESTABLISHED:
    190		event.ird = cm_node->ird_size;
    191		event.ord = cm_node->ord_size;
    192		break;
    193	case IW_CM_EVENT_DISCONNECT:
    194	case IW_CM_EVENT_CLOSE:
    195		/* Wait if we are in RTS but havent issued the iwcm event upcall */
    196		if (!cm_node->accelerated)
    197			wait_for_completion(&cm_node->establish_comp);
    198		break;
    199	default:
    200		return -EINVAL;
    201	}
    202
    203	return cm_id->event_handler(cm_id, &event);
    204}
    205
    206/**
    207 * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks
    208 * @cm_core: cm's core
    209 * @timer_list: a timer list to which cm_node will be selected
    210 */
    211static void irdma_timer_list_prep(struct irdma_cm_core *cm_core,
    212				  struct list_head *timer_list)
    213{
    214	struct irdma_cm_node *cm_node;
    215	int bkt;
    216
    217	hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
    218		if ((cm_node->close_entry || cm_node->send_entry) &&
    219		    refcount_inc_not_zero(&cm_node->refcnt))
    220			list_add(&cm_node->timer_entry, timer_list);
    221	}
    222}
    223
    224/**
    225 * irdma_create_event - create cm event
    226 * @cm_node: connection's node
    227 * @type: Event type to generate
    228 */
    229static struct irdma_cm_event *irdma_create_event(struct irdma_cm_node *cm_node,
    230						 enum irdma_cm_event_type type)
    231{
    232	struct irdma_cm_event *event;
    233
    234	if (!cm_node->cm_id)
    235		return NULL;
    236
    237	event = kzalloc(sizeof(*event), GFP_ATOMIC);
    238
    239	if (!event)
    240		return NULL;
    241
    242	event->type = type;
    243	event->cm_node = cm_node;
    244	memcpy(event->cm_info.rem_addr, cm_node->rem_addr,
    245	       sizeof(event->cm_info.rem_addr));
    246	memcpy(event->cm_info.loc_addr, cm_node->loc_addr,
    247	       sizeof(event->cm_info.loc_addr));
    248	event->cm_info.rem_port = cm_node->rem_port;
    249	event->cm_info.loc_port = cm_node->loc_port;
    250	event->cm_info.cm_id = cm_node->cm_id;
    251	ibdev_dbg(&cm_node->iwdev->ibdev,
    252		  "CM: node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
    253		  event, type, event->cm_info.loc_addr,
    254		  event->cm_info.rem_addr);
    255	trace_irdma_create_event(cm_node, type, __builtin_return_address(0));
    256	irdma_cm_post_event(event);
    257
    258	return event;
    259}
    260
    261/**
    262 * irdma_free_retrans_entry - free send entry
    263 * @cm_node: connection's node
    264 */
    265static void irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
    266{
    267	struct irdma_device *iwdev = cm_node->iwdev;
    268	struct irdma_timer_entry *send_entry;
    269
    270	send_entry = cm_node->send_entry;
    271	if (!send_entry)
    272		return;
    273
    274	cm_node->send_entry = NULL;
    275	irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
    276	kfree(send_entry);
    277	refcount_dec(&cm_node->refcnt);
    278}
    279
    280/**
    281 * irdma_cleanup_retrans_entry - free send entry with lock
    282 * @cm_node: connection's node
    283 */
    284static void irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node)
    285{
    286	unsigned long flags;
    287
    288	spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
    289	irdma_free_retrans_entry(cm_node);
    290	spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
    291}
    292
    293/**
    294 * irdma_form_ah_cm_frame - get a free packet and build frame with address handle
    295 * @cm_node: connection's node ionfo to use in frame
    296 * @options: pointer to options info
    297 * @hdr: pointer mpa header
    298 * @pdata: pointer to private data
    299 * @flags:  indicates FIN or ACK
    300 */
    301static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
    302						     struct irdma_kmem_info *options,
    303						     struct irdma_kmem_info *hdr,
    304						     struct irdma_mpa_priv_info *pdata,
    305						     u8 flags)
    306{
    307	struct irdma_puda_buf *sqbuf;
    308	struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
    309	u8 *buf;
    310	struct tcphdr *tcph;
    311	u16 pktsize;
    312	u32 opts_len = 0;
    313	u32 pd_len = 0;
    314	u32 hdr_len = 0;
    315
    316	if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
    317		ibdev_dbg(&cm_node->iwdev->ibdev, "CM: AH invalid\n");
    318		return NULL;
    319	}
    320
    321	sqbuf = irdma_puda_get_bufpool(vsi->ilq);
    322	if (!sqbuf) {
    323		ibdev_dbg(&cm_node->iwdev->ibdev, "CM: SQ buf NULL\n");
    324		return NULL;
    325	}
    326
    327	sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
    328	buf = sqbuf->mem.va;
    329	if (options)
    330		opts_len = (u32)options->size;
    331
    332	if (hdr)
    333		hdr_len = hdr->size;
    334
    335	if (pdata)
    336		pd_len = pdata->size;
    337
    338	pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
    339
    340	memset(buf, 0, pktsize);
    341
    342	sqbuf->totallen = pktsize;
    343	sqbuf->tcphlen = sizeof(*tcph) + opts_len;
    344	sqbuf->scratch = cm_node;
    345
    346	tcph = (struct tcphdr *)buf;
    347	buf += sizeof(*tcph);
    348
    349	tcph->source = htons(cm_node->loc_port);
    350	tcph->dest = htons(cm_node->rem_port);
    351	tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
    352
    353	if (flags & SET_ACK) {
    354		cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
    355		tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
    356		tcph->ack = 1;
    357	} else {
    358		tcph->ack_seq = 0;
    359	}
    360
    361	if (flags & SET_SYN) {
    362		cm_node->tcp_cntxt.loc_seq_num++;
    363		tcph->syn = 1;
    364	} else {
    365		cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
    366	}
    367
    368	if (flags & SET_FIN) {
    369		cm_node->tcp_cntxt.loc_seq_num++;
    370		tcph->fin = 1;
    371	}
    372
    373	if (flags & SET_RST)
    374		tcph->rst = 1;
    375
    376	tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
    377	sqbuf->tcphlen = tcph->doff << 2;
    378	tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
    379	tcph->urg_ptr = 0;
    380
    381	if (opts_len) {
    382		memcpy(buf, options->addr, opts_len);
    383		buf += opts_len;
    384	}
    385
    386	if (hdr_len) {
    387		memcpy(buf, hdr->addr, hdr_len);
    388		buf += hdr_len;
    389	}
    390
    391	if (pdata && pdata->addr)
    392		memcpy(buf, pdata->addr, pdata->size);
    393
    394	refcount_set(&sqbuf->refcount, 1);
    395
    396	print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
    397			     16, 8, sqbuf->mem.va, sqbuf->totallen, false);
    398
    399	return sqbuf;
    400}
    401
    402/**
    403 * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet
    404 * @cm_node: connection's node ionfo to use in frame
    405 * @options: pointer to options info
    406 * @hdr: pointer mpa header
    407 * @pdata: pointer to private data
    408 * @flags:  indicates FIN or ACK
    409 */
    410static struct irdma_puda_buf *irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
    411						      struct irdma_kmem_info *options,
    412						      struct irdma_kmem_info *hdr,
    413						      struct irdma_mpa_priv_info *pdata,
    414						      u8 flags)
    415{
    416	struct irdma_puda_buf *sqbuf;
    417	struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
    418	u8 *buf;
    419
    420	struct tcphdr *tcph;
    421	struct iphdr *iph;
    422	struct ipv6hdr *ip6h;
    423	struct ethhdr *ethh;
    424	u16 pktsize;
    425	u16 eth_hlen = ETH_HLEN;
    426	u32 opts_len = 0;
    427	u32 pd_len = 0;
    428	u32 hdr_len = 0;
    429
    430	u16 vtag;
    431
    432	sqbuf = irdma_puda_get_bufpool(vsi->ilq);
    433	if (!sqbuf)
    434		return NULL;
    435
    436	buf = sqbuf->mem.va;
    437
    438	if (options)
    439		opts_len = (u32)options->size;
    440
    441	if (hdr)
    442		hdr_len = hdr->size;
    443
    444	if (pdata)
    445		pd_len = pdata->size;
    446
    447	if (cm_node->vlan_id < VLAN_N_VID)
    448		eth_hlen += 4;
    449
    450	if (cm_node->ipv4)
    451		pktsize = sizeof(*iph) + sizeof(*tcph);
    452	else
    453		pktsize = sizeof(*ip6h) + sizeof(*tcph);
    454	pktsize += opts_len + hdr_len + pd_len;
    455
    456	memset(buf, 0, eth_hlen + pktsize);
    457
    458	sqbuf->totallen = pktsize + eth_hlen;
    459	sqbuf->maclen = eth_hlen;
    460	sqbuf->tcphlen = sizeof(*tcph) + opts_len;
    461	sqbuf->scratch = cm_node;
    462
    463	ethh = (struct ethhdr *)buf;
    464	buf += eth_hlen;
    465
    466	if (cm_node->do_lpb)
    467		sqbuf->do_lpb = true;
    468
    469	if (cm_node->ipv4) {
    470		sqbuf->ipv4 = true;
    471
    472		iph = (struct iphdr *)buf;
    473		buf += sizeof(*iph);
    474		tcph = (struct tcphdr *)buf;
    475		buf += sizeof(*tcph);
    476
    477		ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
    478		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
    479		if (cm_node->vlan_id < VLAN_N_VID) {
    480			((struct vlan_ethhdr *)ethh)->h_vlan_proto =
    481				htons(ETH_P_8021Q);
    482			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
    483			       cm_node->vlan_id;
    484			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
    485
    486			((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
    487				htons(ETH_P_IP);
    488		} else {
    489			ethh->h_proto = htons(ETH_P_IP);
    490		}
    491
    492		iph->version = IPVERSION;
    493		iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
    494		iph->tos = cm_node->tos;
    495		iph->tot_len = htons(pktsize);
    496		iph->id = htons(++cm_node->tcp_cntxt.loc_id);
    497
    498		iph->frag_off = htons(0x4000);
    499		iph->ttl = 0x40;
    500		iph->protocol = IPPROTO_TCP;
    501		iph->saddr = htonl(cm_node->loc_addr[0]);
    502		iph->daddr = htonl(cm_node->rem_addr[0]);
    503	} else {
    504		sqbuf->ipv4 = false;
    505		ip6h = (struct ipv6hdr *)buf;
    506		buf += sizeof(*ip6h);
    507		tcph = (struct tcphdr *)buf;
    508		buf += sizeof(*tcph);
    509
    510		ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
    511		ether_addr_copy(ethh->h_source, cm_node->loc_mac);
    512		if (cm_node->vlan_id < VLAN_N_VID) {
    513			((struct vlan_ethhdr *)ethh)->h_vlan_proto =
    514				htons(ETH_P_8021Q);
    515			vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
    516			       cm_node->vlan_id;
    517			((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
    518			((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
    519				htons(ETH_P_IPV6);
    520		} else {
    521			ethh->h_proto = htons(ETH_P_IPV6);
    522		}
    523		ip6h->version = 6;
    524		ip6h->priority = cm_node->tos >> 4;
    525		ip6h->flow_lbl[0] = cm_node->tos << 4;
    526		ip6h->flow_lbl[1] = 0;
    527		ip6h->flow_lbl[2] = 0;
    528		ip6h->payload_len = htons(pktsize - sizeof(*ip6h));
    529		ip6h->nexthdr = 6;
    530		ip6h->hop_limit = 128;
    531		irdma_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
    532				    cm_node->loc_addr);
    533		irdma_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
    534				    cm_node->rem_addr);
    535	}
    536
    537	tcph->source = htons(cm_node->loc_port);
    538	tcph->dest = htons(cm_node->rem_port);
    539	tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
    540
    541	if (flags & SET_ACK) {
    542		cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
    543		tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
    544		tcph->ack = 1;
    545	} else {
    546		tcph->ack_seq = 0;
    547	}
    548
    549	if (flags & SET_SYN) {
    550		cm_node->tcp_cntxt.loc_seq_num++;
    551		tcph->syn = 1;
    552	} else {
    553		cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
    554	}
    555
    556	if (flags & SET_FIN) {
    557		cm_node->tcp_cntxt.loc_seq_num++;
    558		tcph->fin = 1;
    559	}
    560
    561	if (flags & SET_RST)
    562		tcph->rst = 1;
    563
    564	tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
    565	sqbuf->tcphlen = tcph->doff << 2;
    566	tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
    567	tcph->urg_ptr = 0;
    568
    569	if (opts_len) {
    570		memcpy(buf, options->addr, opts_len);
    571		buf += opts_len;
    572	}
    573
    574	if (hdr_len) {
    575		memcpy(buf, hdr->addr, hdr_len);
    576		buf += hdr_len;
    577	}
    578
    579	if (pdata && pdata->addr)
    580		memcpy(buf, pdata->addr, pdata->size);
    581
    582	refcount_set(&sqbuf->refcount, 1);
    583
    584	print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
    585			     16, 8, sqbuf->mem.va, sqbuf->totallen, false);
    586	return sqbuf;
    587}
    588
    589/**
    590 * irdma_send_reset - Send RST packet
    591 * @cm_node: connection's node
    592 */
    593int irdma_send_reset(struct irdma_cm_node *cm_node)
    594{
    595	struct irdma_puda_buf *sqbuf;
    596	int flags = SET_RST | SET_ACK;
    597
    598	trace_irdma_send_reset(cm_node, 0, __builtin_return_address(0));
    599	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
    600						flags);
    601	if (!sqbuf)
    602		return -ENOMEM;
    603
    604	ibdev_dbg(&cm_node->iwdev->ibdev,
    605		  "CM: caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
    606		  __builtin_return_address(0), cm_node, cm_node->cm_id,
    607		  cm_node->accelerated, cm_node->state, cm_node->rem_port,
    608		  cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
    609
    610	return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
    611				       1);
    612}
    613
    614/**
    615 * irdma_active_open_err - send event for active side cm error
    616 * @cm_node: connection's node
    617 * @reset: Flag to send reset or not
    618 */
    619static void irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
    620{
    621	trace_irdma_active_open_err(cm_node, reset,
    622				    __builtin_return_address(0));
    623	irdma_cleanup_retrans_entry(cm_node);
    624	cm_node->cm_core->stats_connect_errs++;
    625	if (reset) {
    626		ibdev_dbg(&cm_node->iwdev->ibdev,
    627			  "CM: cm_node=%p state=%d\n", cm_node,
    628			  cm_node->state);
    629		refcount_inc(&cm_node->refcnt);
    630		irdma_send_reset(cm_node);
    631	}
    632
    633	cm_node->state = IRDMA_CM_STATE_CLOSED;
    634	irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
    635}
    636
    637/**
    638 * irdma_passive_open_err - handle passive side cm error
    639 * @cm_node: connection's node
    640 * @reset: send reset or just free cm_node
    641 */
    642static void irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
    643{
    644	irdma_cleanup_retrans_entry(cm_node);
    645	cm_node->cm_core->stats_passive_errs++;
    646	cm_node->state = IRDMA_CM_STATE_CLOSED;
    647	ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state =%d\n",
    648		  cm_node, cm_node->state);
    649	trace_irdma_passive_open_err(cm_node, reset,
    650				     __builtin_return_address(0));
    651	if (reset)
    652		irdma_send_reset(cm_node);
    653	else
    654		irdma_rem_ref_cm_node(cm_node);
    655}
    656
    657/**
    658 * irdma_event_connect_error - to create connect error event
    659 * @event: cm information for connect event
    660 */
    661static void irdma_event_connect_error(struct irdma_cm_event *event)
    662{
    663	struct irdma_qp *iwqp;
    664	struct iw_cm_id *cm_id;
    665
    666	cm_id = event->cm_node->cm_id;
    667	if (!cm_id)
    668		return;
    669
    670	iwqp = cm_id->provider_data;
    671
    672	if (!iwqp || !iwqp->iwdev)
    673		return;
    674
    675	iwqp->cm_id = NULL;
    676	cm_id->provider_data = NULL;
    677	irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
    678			    -ECONNRESET);
    679	irdma_rem_ref_cm_node(event->cm_node);
    680}
    681
    682/**
    683 * irdma_process_options - process options from TCP header
    684 * @cm_node: connection's node
    685 * @optionsloc: point to start of options
    686 * @optionsize: size of all options
    687 * @syn_pkt: flag if syn packet
    688 */
    689static int irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
    690				 u32 optionsize, u32 syn_pkt)
    691{
    692	u32 tmp;
    693	u32 offset = 0;
    694	union all_known_options *all_options;
    695	char got_mss_option = 0;
    696
    697	while (offset < optionsize) {
    698		all_options = (union all_known_options *)(optionsloc + offset);
    699		switch (all_options->base.optionnum) {
    700		case OPTION_NUM_EOL:
    701			offset = optionsize;
    702			break;
    703		case OPTION_NUM_NONE:
    704			offset += 1;
    705			continue;
    706		case OPTION_NUM_MSS:
    707			ibdev_dbg(&cm_node->iwdev->ibdev,
    708				  "CM: MSS Length: %d Offset: %d Size: %d\n",
    709				  all_options->mss.len, offset, optionsize);
    710			got_mss_option = 1;
    711			if (all_options->mss.len != 4)
    712				return -EINVAL;
    713			tmp = ntohs(all_options->mss.mss);
    714			if ((cm_node->ipv4 &&
    715			     (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) ||
    716			    (!cm_node->ipv4 &&
    717			     (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6))
    718				return -EINVAL;
    719			if (tmp < cm_node->tcp_cntxt.mss)
    720				cm_node->tcp_cntxt.mss = tmp;
    721			break;
    722		case OPTION_NUM_WINDOW_SCALE:
    723			cm_node->tcp_cntxt.snd_wscale =
    724				all_options->windowscale.shiftcount;
    725			break;
    726		default:
    727			ibdev_dbg(&cm_node->iwdev->ibdev,
    728				  "CM: Unsupported TCP Option: %x\n",
    729				  all_options->base.optionnum);
    730			break;
    731		}
    732		offset += all_options->base.len;
    733	}
    734	if (!got_mss_option && syn_pkt)
    735		cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS;
    736
    737	return 0;
    738}
    739
    740/**
    741 * irdma_handle_tcp_options - setup TCP context info after parsing TCP options
    742 * @cm_node: connection's node
    743 * @tcph: pointer tcp header
    744 * @optionsize: size of options rcvd
    745 * @passive: active or passive flag
    746 */
    747static int irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
    748				    struct tcphdr *tcph, int optionsize,
    749				    int passive)
    750{
    751	u8 *optionsloc = (u8 *)&tcph[1];
    752	int ret;
    753
    754	if (optionsize) {
    755		ret = irdma_process_options(cm_node, optionsloc, optionsize,
    756					    (u32)tcph->syn);
    757		if (ret) {
    758			ibdev_dbg(&cm_node->iwdev->ibdev,
    759				  "CM: Node %p, Sending Reset\n", cm_node);
    760			if (passive)
    761				irdma_passive_open_err(cm_node, true);
    762			else
    763				irdma_active_open_err(cm_node, true);
    764			return ret;
    765		}
    766	}
    767
    768	cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window)
    769				     << cm_node->tcp_cntxt.snd_wscale;
    770
    771	if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
    772		cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
    773
    774	return 0;
    775}
    776
    777/**
    778 * irdma_build_mpa_v1 - build a MPA V1 frame
    779 * @cm_node: connection's node
    780 * @start_addr: address where to build frame
    781 * @mpa_key: to do read0 or write0
    782 */
    783static void irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr,
    784			       u8 mpa_key)
    785{
    786	struct ietf_mpa_v1 *mpa_frame = start_addr;
    787
    788	switch (mpa_key) {
    789	case MPA_KEY_REQUEST:
    790		memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
    791		break;
    792	case MPA_KEY_REPLY:
    793		memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
    794		break;
    795	default:
    796		break;
    797	}
    798	mpa_frame->flags = IETF_MPA_FLAGS_CRC;
    799	mpa_frame->rev = cm_node->mpa_frame_rev;
    800	mpa_frame->priv_data_len = htons(cm_node->pdata.size);
    801}
    802
    803/**
    804 * irdma_build_mpa_v2 - build a MPA V2 frame
    805 * @cm_node: connection's node
    806 * @start_addr: buffer start address
    807 * @mpa_key: to do read0 or write0
    808 */
    809static void irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr,
    810			       u8 mpa_key)
    811{
    812	struct ietf_mpa_v2 *mpa_frame = start_addr;
    813	struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
    814	u16 ctrl_ird, ctrl_ord;
    815
    816	/* initialize the upper 5 bytes of the frame */
    817	irdma_build_mpa_v1(cm_node, start_addr, mpa_key);
    818	mpa_frame->flags |= IETF_MPA_V2_FLAG;
    819	if (cm_node->iwdev->iw_ooo) {
    820		mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS;
    821		cm_node->rcv_mark_en = true;
    822	}
    823	mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) +
    824					       IETF_RTR_MSG_SIZE);
    825
    826	/* initialize RTR msg */
    827	if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
    828		ctrl_ird = IETF_NO_IRD_ORD;
    829		ctrl_ord = IETF_NO_IRD_ORD;
    830	} else {
    831		ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
    832				   IETF_NO_IRD_ORD :
    833				   cm_node->ird_size;
    834		ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
    835				   IETF_NO_IRD_ORD :
    836				   cm_node->ord_size;
    837	}
    838	ctrl_ird |= IETF_PEER_TO_PEER;
    839
    840	switch (mpa_key) {
    841	case MPA_KEY_REQUEST:
    842		ctrl_ord |= IETF_RDMA0_WRITE;
    843		ctrl_ord |= IETF_RDMA0_READ;
    844		break;
    845	case MPA_KEY_REPLY:
    846		switch (cm_node->send_rdma0_op) {
    847		case SEND_RDMA_WRITE_ZERO:
    848			ctrl_ord |= IETF_RDMA0_WRITE;
    849			break;
    850		case SEND_RDMA_READ_ZERO:
    851			ctrl_ord |= IETF_RDMA0_READ;
    852			break;
    853		}
    854		break;
    855	default:
    856		break;
    857	}
    858	rtr_msg->ctrl_ird = htons(ctrl_ird);
    859	rtr_msg->ctrl_ord = htons(ctrl_ord);
    860}
    861
    862/**
    863 * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
    864 * @cm_node: connection's node
    865 * @mpa: mpa: data buffer
    866 * @mpa_key: to do read0 or write0
    867 */
    868static int irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node,
    869				    struct irdma_kmem_info *mpa, u8 mpa_key)
    870{
    871	int hdr_len = 0;
    872
    873	switch (cm_node->mpa_frame_rev) {
    874	case IETF_MPA_V1:
    875		hdr_len = sizeof(struct ietf_mpa_v1);
    876		irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key);
    877		break;
    878	case IETF_MPA_V2:
    879		hdr_len = sizeof(struct ietf_mpa_v2);
    880		irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key);
    881		break;
    882	default:
    883		break;
    884	}
    885
    886	return hdr_len;
    887}
    888
    889/**
    890 * irdma_send_mpa_request - active node send mpa request to passive node
    891 * @cm_node: connection's node
    892 */
    893static int irdma_send_mpa_request(struct irdma_cm_node *cm_node)
    894{
    895	struct irdma_puda_buf *sqbuf;
    896
    897	cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
    898	cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
    899							 &cm_node->mpa_hdr,
    900							 MPA_KEY_REQUEST);
    901	if (!cm_node->mpa_hdr.size) {
    902		ibdev_dbg(&cm_node->iwdev->ibdev,
    903			  "CM: mpa size = %d\n", cm_node->mpa_hdr.size);
    904		return -EINVAL;
    905	}
    906
    907	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
    908						&cm_node->mpa_hdr,
    909						&cm_node->pdata, SET_ACK);
    910	if (!sqbuf)
    911		return -ENOMEM;
    912
    913	return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
    914				       0);
    915}
    916
    917/**
    918 * irdma_send_mpa_reject -
    919 * @cm_node: connection's node
    920 * @pdata: reject data for connection
    921 * @plen: length of reject data
    922 */
    923static int irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
    924				 const void *pdata, u8 plen)
    925{
    926	struct irdma_puda_buf *sqbuf;
    927	struct irdma_mpa_priv_info priv_info;
    928
    929	cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
    930	cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
    931							 &cm_node->mpa_hdr,
    932							 MPA_KEY_REPLY);
    933
    934	cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
    935	priv_info.addr = pdata;
    936	priv_info.size = plen;
    937
    938	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
    939						&cm_node->mpa_hdr, &priv_info,
    940						SET_ACK | SET_FIN);
    941	if (!sqbuf)
    942		return -ENOMEM;
    943
    944	cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
    945
    946	return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
    947				       0);
    948}
    949
    950/**
    951 * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD
    952 * @cm_node: connection's node
    953 * @buf: Data pointer
    954 */
    955static int irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
    956					  u8 *buf)
    957{
    958	struct ietf_mpa_v2 *mpa_v2_frame;
    959	struct ietf_rtr_msg *rtr_msg;
    960	u16 ird_size;
    961	u16 ord_size;
    962	u16 ctrl_ord;
    963	u16 ctrl_ird;
    964
    965	mpa_v2_frame = (struct ietf_mpa_v2 *)buf;
    966	rtr_msg = &mpa_v2_frame->rtr_msg;
    967
    968	/* parse rtr message */
    969	ctrl_ord = ntohs(rtr_msg->ctrl_ord);
    970	ctrl_ird = ntohs(rtr_msg->ctrl_ird);
    971	ird_size = ctrl_ird & IETF_NO_IRD_ORD;
    972	ord_size = ctrl_ord & IETF_NO_IRD_ORD;
    973
    974	if (!(ctrl_ird & IETF_PEER_TO_PEER))
    975		return -EOPNOTSUPP;
    976
    977	if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
    978		cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
    979		goto negotiate_done;
    980	}
    981
    982	if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
    983		/* responder */
    984		if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
    985			cm_node->ird_size = 1;
    986		if (cm_node->ord_size > ird_size)
    987			cm_node->ord_size = ird_size;
    988	} else {
    989		/* initiator */
    990		if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
    991			/* Remote peer doesn't support RDMA0_READ */
    992			return -EOPNOTSUPP;
    993
    994		if (cm_node->ord_size > ird_size)
    995			cm_node->ord_size = ird_size;
    996
    997		if (cm_node->ird_size < ord_size)
    998		/* no resources available */
    999			return -EINVAL;
   1000	}
   1001
   1002negotiate_done:
   1003	if (ctrl_ord & IETF_RDMA0_READ)
   1004		cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
   1005	else if (ctrl_ord & IETF_RDMA0_WRITE)
   1006		cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
   1007	else
   1008		/* Not supported RDMA0 operation */
   1009		return -EOPNOTSUPP;
   1010
   1011	ibdev_dbg(&cm_node->iwdev->ibdev,
   1012		  "CM: MPAV2 Negotiated ORD: %d, IRD: %d\n",
   1013		  cm_node->ord_size, cm_node->ird_size);
   1014	trace_irdma_negotiate_mpa_v2(cm_node);
   1015	return 0;
   1016}
   1017
   1018/**
   1019 * irdma_parse_mpa - process an IETF MPA frame
   1020 * @cm_node: connection's node
   1021 * @buf: Data pointer
   1022 * @type: to return accept or reject
   1023 * @len: Len of mpa buffer
   1024 */
   1025static int irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
   1026			   u32 len)
   1027{
   1028	struct ietf_mpa_v1 *mpa_frame;
   1029	int mpa_hdr_len, priv_data_len, ret;
   1030
   1031	*type = IRDMA_MPA_REQUEST_ACCEPT;
   1032
   1033	if (len < sizeof(struct ietf_mpa_v1)) {
   1034		ibdev_dbg(&cm_node->iwdev->ibdev,
   1035			  "CM: ietf buffer small (%x)\n", len);
   1036		return -EINVAL;
   1037	}
   1038
   1039	mpa_frame = (struct ietf_mpa_v1 *)buf;
   1040	mpa_hdr_len = sizeof(struct ietf_mpa_v1);
   1041	priv_data_len = ntohs(mpa_frame->priv_data_len);
   1042
   1043	if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
   1044		ibdev_dbg(&cm_node->iwdev->ibdev,
   1045			  "CM: private_data too big %d\n", priv_data_len);
   1046		return -EOVERFLOW;
   1047	}
   1048
   1049	if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
   1050		ibdev_dbg(&cm_node->iwdev->ibdev,
   1051			  "CM: unsupported mpa rev = %d\n", mpa_frame->rev);
   1052		return -EINVAL;
   1053	}
   1054
   1055	if (mpa_frame->rev > cm_node->mpa_frame_rev) {
   1056		ibdev_dbg(&cm_node->iwdev->ibdev, "CM: rev %d\n",
   1057			  mpa_frame->rev);
   1058		return -EINVAL;
   1059	}
   1060
   1061	cm_node->mpa_frame_rev = mpa_frame->rev;
   1062	if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
   1063		if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
   1064			   IETF_MPA_KEY_SIZE)) {
   1065			ibdev_dbg(&cm_node->iwdev->ibdev,
   1066				  "CM: Unexpected MPA Key received\n");
   1067			return -EINVAL;
   1068		}
   1069	} else {
   1070		if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
   1071			   IETF_MPA_KEY_SIZE)) {
   1072			ibdev_dbg(&cm_node->iwdev->ibdev,
   1073				  "CM: Unexpected MPA Key received\n");
   1074			return -EINVAL;
   1075		}
   1076	}
   1077
   1078	if (priv_data_len + mpa_hdr_len > len) {
   1079		ibdev_dbg(&cm_node->iwdev->ibdev,
   1080			  "CM: ietf buffer len(%x + %x != %x)\n",
   1081			  priv_data_len, mpa_hdr_len, len);
   1082		return -EOVERFLOW;
   1083	}
   1084
   1085	if (len > IRDMA_MAX_CM_BUF) {
   1086		ibdev_dbg(&cm_node->iwdev->ibdev,
   1087			  "CM: ietf buffer large len = %d\n", len);
   1088		return -EOVERFLOW;
   1089	}
   1090
   1091	switch (mpa_frame->rev) {
   1092	case IETF_MPA_V2:
   1093		mpa_hdr_len += IETF_RTR_MSG_SIZE;
   1094		ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf);
   1095		if (ret)
   1096			return ret;
   1097		break;
   1098	case IETF_MPA_V1:
   1099	default:
   1100		break;
   1101	}
   1102
   1103	memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len);
   1104	cm_node->pdata.size = priv_data_len;
   1105
   1106	if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
   1107		*type = IRDMA_MPA_REQUEST_REJECT;
   1108
   1109	if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
   1110		cm_node->snd_mark_en = true;
   1111
   1112	return 0;
   1113}
   1114
   1115/**
   1116 * irdma_schedule_cm_timer
   1117 * @cm_node: connection's node
   1118 * @sqbuf: buffer to send
   1119 * @type: if it is send or close
   1120 * @send_retrans: if rexmits to be done
   1121 * @close_when_complete: is cm_node to be removed
   1122 *
   1123 * note - cm_node needs to be protected before calling this. Encase in:
   1124 *		irdma_rem_ref_cm_node(cm_core, cm_node);
   1125 *		irdma_schedule_cm_timer(...)
   1126 *		refcount_inc(&cm_node->refcnt);
   1127 */
   1128int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
   1129			    struct irdma_puda_buf *sqbuf,
   1130			    enum irdma_timer_type type, int send_retrans,
   1131			    int close_when_complete)
   1132{
   1133	struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
   1134	struct irdma_cm_core *cm_core = cm_node->cm_core;
   1135	struct irdma_timer_entry *new_send;
   1136	u32 was_timer_set;
   1137	unsigned long flags;
   1138
   1139	new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
   1140	if (!new_send) {
   1141		if (type != IRDMA_TIMER_TYPE_CLOSE)
   1142			irdma_free_sqbuf(vsi, sqbuf);
   1143		return -ENOMEM;
   1144	}
   1145
   1146	new_send->retrycount = IRDMA_DEFAULT_RETRYS;
   1147	new_send->retranscount = IRDMA_DEFAULT_RETRANS;
   1148	new_send->sqbuf = sqbuf;
   1149	new_send->timetosend = jiffies;
   1150	new_send->type = type;
   1151	new_send->send_retrans = send_retrans;
   1152	new_send->close_when_complete = close_when_complete;
   1153
   1154	if (type == IRDMA_TIMER_TYPE_CLOSE) {
   1155		new_send->timetosend += (HZ / 10);
   1156		if (cm_node->close_entry) {
   1157			kfree(new_send);
   1158			ibdev_dbg(&cm_node->iwdev->ibdev,
   1159				  "CM: already close entry\n");
   1160			return -EINVAL;
   1161		}
   1162
   1163		cm_node->close_entry = new_send;
   1164	} else { /* type == IRDMA_TIMER_TYPE_SEND */
   1165		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
   1166		cm_node->send_entry = new_send;
   1167		refcount_inc(&cm_node->refcnt);
   1168		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
   1169		new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
   1170
   1171		refcount_inc(&sqbuf->refcount);
   1172		irdma_puda_send_buf(vsi->ilq, sqbuf);
   1173		if (!send_retrans) {
   1174			irdma_cleanup_retrans_entry(cm_node);
   1175			if (close_when_complete)
   1176				irdma_rem_ref_cm_node(cm_node);
   1177			return 0;
   1178		}
   1179	}
   1180
   1181	spin_lock_irqsave(&cm_core->ht_lock, flags);
   1182	was_timer_set = timer_pending(&cm_core->tcp_timer);
   1183
   1184	if (!was_timer_set) {
   1185		cm_core->tcp_timer.expires = new_send->timetosend;
   1186		add_timer(&cm_core->tcp_timer);
   1187	}
   1188	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
   1189
   1190	return 0;
   1191}
   1192
   1193/**
   1194 * irdma_retrans_expired - Could not rexmit the packet
   1195 * @cm_node: connection's node
   1196 */
   1197static void irdma_retrans_expired(struct irdma_cm_node *cm_node)
   1198{
   1199	enum irdma_cm_node_state state = cm_node->state;
   1200
   1201	cm_node->state = IRDMA_CM_STATE_CLOSED;
   1202	switch (state) {
   1203	case IRDMA_CM_STATE_SYN_RCVD:
   1204	case IRDMA_CM_STATE_CLOSING:
   1205		irdma_rem_ref_cm_node(cm_node);
   1206		break;
   1207	case IRDMA_CM_STATE_FIN_WAIT1:
   1208	case IRDMA_CM_STATE_LAST_ACK:
   1209		irdma_send_reset(cm_node);
   1210		break;
   1211	default:
   1212		refcount_inc(&cm_node->refcnt);
   1213		irdma_send_reset(cm_node);
   1214		irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
   1215		break;
   1216	}
   1217}
   1218
   1219/**
   1220 * irdma_handle_close_entry - for handling retry/timeouts
   1221 * @cm_node: connection's node
   1222 * @rem_node: flag for remove cm_node
   1223 */
   1224static void irdma_handle_close_entry(struct irdma_cm_node *cm_node,
   1225				     u32 rem_node)
   1226{
   1227	struct irdma_timer_entry *close_entry = cm_node->close_entry;
   1228	struct irdma_qp *iwqp;
   1229	unsigned long flags;
   1230
   1231	if (!close_entry)
   1232		return;
   1233	iwqp = (struct irdma_qp *)close_entry->sqbuf;
   1234	if (iwqp) {
   1235		spin_lock_irqsave(&iwqp->lock, flags);
   1236		if (iwqp->cm_id) {
   1237			iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
   1238			iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR;
   1239			iwqp->last_aeq = IRDMA_AE_RESET_SENT;
   1240			iwqp->ibqp_state = IB_QPS_ERR;
   1241			spin_unlock_irqrestore(&iwqp->lock, flags);
   1242			irdma_cm_disconn(iwqp);
   1243		} else {
   1244			spin_unlock_irqrestore(&iwqp->lock, flags);
   1245		}
   1246	} else if (rem_node) {
   1247		/* TIME_WAIT state */
   1248		irdma_rem_ref_cm_node(cm_node);
   1249	}
   1250
   1251	kfree(close_entry);
   1252	cm_node->close_entry = NULL;
   1253}
   1254
   1255/**
   1256 * irdma_cm_timer_tick - system's timer expired callback
   1257 * @t: Pointer to timer_list
   1258 */
   1259static void irdma_cm_timer_tick(struct timer_list *t)
   1260{
   1261	unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME;
   1262	struct irdma_cm_node *cm_node;
   1263	struct irdma_timer_entry *send_entry, *close_entry;
   1264	struct list_head *list_core_temp;
   1265	struct list_head *list_node;
   1266	struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
   1267	struct irdma_sc_vsi *vsi;
   1268	u32 settimer = 0;
   1269	unsigned long timetosend;
   1270	unsigned long flags;
   1271	struct list_head timer_list;
   1272
   1273	INIT_LIST_HEAD(&timer_list);
   1274
   1275	rcu_read_lock();
   1276	irdma_timer_list_prep(cm_core, &timer_list);
   1277	rcu_read_unlock();
   1278
   1279	list_for_each_safe (list_node, list_core_temp, &timer_list) {
   1280		cm_node = container_of(list_node, struct irdma_cm_node,
   1281				       timer_entry);
   1282		close_entry = cm_node->close_entry;
   1283
   1284		if (close_entry) {
   1285			if (time_after(close_entry->timetosend, jiffies)) {
   1286				if (nexttimeout > close_entry->timetosend ||
   1287				    !settimer) {
   1288					nexttimeout = close_entry->timetosend;
   1289					settimer = 1;
   1290				}
   1291			} else {
   1292				irdma_handle_close_entry(cm_node, 1);
   1293			}
   1294		}
   1295
   1296		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
   1297
   1298		send_entry = cm_node->send_entry;
   1299		if (!send_entry)
   1300			goto done;
   1301		if (time_after(send_entry->timetosend, jiffies)) {
   1302			if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
   1303				if (nexttimeout > send_entry->timetosend ||
   1304				    !settimer) {
   1305					nexttimeout = send_entry->timetosend;
   1306					settimer = 1;
   1307				}
   1308			} else {
   1309				irdma_free_retrans_entry(cm_node);
   1310			}
   1311			goto done;
   1312		}
   1313
   1314		if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
   1315		    cm_node->state == IRDMA_CM_STATE_CLOSED) {
   1316			irdma_free_retrans_entry(cm_node);
   1317			goto done;
   1318		}
   1319
   1320		if (!send_entry->retranscount || !send_entry->retrycount) {
   1321			irdma_free_retrans_entry(cm_node);
   1322
   1323			spin_unlock_irqrestore(&cm_node->retrans_list_lock,
   1324					       flags);
   1325			irdma_retrans_expired(cm_node);
   1326			cm_node->state = IRDMA_CM_STATE_CLOSED;
   1327			spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
   1328			goto done;
   1329		}
   1330		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
   1331
   1332		vsi = &cm_node->iwdev->vsi;
   1333		if (!cm_node->ack_rcvd) {
   1334			refcount_inc(&send_entry->sqbuf->refcount);
   1335			irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
   1336			cm_node->cm_core->stats_pkt_retrans++;
   1337		}
   1338
   1339		spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
   1340		if (send_entry->send_retrans) {
   1341			send_entry->retranscount--;
   1342			timetosend = (IRDMA_RETRY_TIMEOUT <<
   1343				      (IRDMA_DEFAULT_RETRANS -
   1344				       send_entry->retranscount));
   1345
   1346			send_entry->timetosend = jiffies +
   1347			    min(timetosend, IRDMA_MAX_TIMEOUT);
   1348			if (nexttimeout > send_entry->timetosend || !settimer) {
   1349				nexttimeout = send_entry->timetosend;
   1350				settimer = 1;
   1351			}
   1352		} else {
   1353			int close_when_complete;
   1354
   1355			close_when_complete = send_entry->close_when_complete;
   1356			irdma_free_retrans_entry(cm_node);
   1357			if (close_when_complete)
   1358				irdma_rem_ref_cm_node(cm_node);
   1359		}
   1360done:
   1361		spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
   1362		irdma_rem_ref_cm_node(cm_node);
   1363	}
   1364
   1365	if (settimer) {
   1366		spin_lock_irqsave(&cm_core->ht_lock, flags);
   1367		if (!timer_pending(&cm_core->tcp_timer)) {
   1368			cm_core->tcp_timer.expires = nexttimeout;
   1369			add_timer(&cm_core->tcp_timer);
   1370		}
   1371		spin_unlock_irqrestore(&cm_core->ht_lock, flags);
   1372	}
   1373}
   1374
   1375/**
   1376 * irdma_send_syn - send SYN packet
   1377 * @cm_node: connection's node
   1378 * @sendack: flag to set ACK bit or not
   1379 */
   1380int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
   1381{
   1382	struct irdma_puda_buf *sqbuf;
   1383	int flags = SET_SYN;
   1384	char optionsbuf[sizeof(struct option_mss) +
   1385			sizeof(struct option_windowscale) +
   1386			sizeof(struct option_base) + TCP_OPTIONS_PADDING];
   1387	struct irdma_kmem_info opts;
   1388	int optionssize = 0;
   1389	/* Sending MSS option */
   1390	union all_known_options *options;
   1391
   1392	opts.addr = optionsbuf;
   1393	if (!cm_node)
   1394		return -EINVAL;
   1395
   1396	options = (union all_known_options *)&optionsbuf[optionssize];
   1397	options->mss.optionnum = OPTION_NUM_MSS;
   1398	options->mss.len = sizeof(struct option_mss);
   1399	options->mss.mss = htons(cm_node->tcp_cntxt.mss);
   1400	optionssize += sizeof(struct option_mss);
   1401
   1402	options = (union all_known_options *)&optionsbuf[optionssize];
   1403	options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE;
   1404	options->windowscale.len = sizeof(struct option_windowscale);
   1405	options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
   1406	optionssize += sizeof(struct option_windowscale);
   1407	options = (union all_known_options *)&optionsbuf[optionssize];
   1408	options->eol = OPTION_NUM_EOL;
   1409	optionssize += 1;
   1410
   1411	if (sendack)
   1412		flags |= SET_ACK;
   1413
   1414	opts.size = optionssize;
   1415
   1416	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL,
   1417						flags);
   1418	if (!sqbuf)
   1419		return -ENOMEM;
   1420
   1421	return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
   1422				       0);
   1423}
   1424
   1425/**
   1426 * irdma_send_ack - Send ACK packet
   1427 * @cm_node: connection's node
   1428 */
   1429void irdma_send_ack(struct irdma_cm_node *cm_node)
   1430{
   1431	struct irdma_puda_buf *sqbuf;
   1432	struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
   1433
   1434	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
   1435						SET_ACK);
   1436	if (sqbuf)
   1437		irdma_puda_send_buf(vsi->ilq, sqbuf);
   1438}
   1439
   1440/**
   1441 * irdma_send_fin - Send FIN pkt
   1442 * @cm_node: connection's node
   1443 */
   1444static int irdma_send_fin(struct irdma_cm_node *cm_node)
   1445{
   1446	struct irdma_puda_buf *sqbuf;
   1447
   1448	sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
   1449						SET_ACK | SET_FIN);
   1450	if (!sqbuf)
   1451		return -ENOMEM;
   1452
   1453	return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
   1454				       0);
   1455}
   1456
   1457/**
   1458 * irdma_find_listener - find a cm node listening on this addr-port pair
   1459 * @cm_core: cm's core
   1460 * @dst_addr: listener ip addr
   1461 * @dst_port: listener tcp port num
   1462 * @vlan_id: virtual LAN ID
   1463 * @listener_state: state to match with listen node's
   1464 */
   1465static struct irdma_cm_listener *
   1466irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
   1467		    u16 vlan_id, enum irdma_cm_listener_state listener_state)
   1468{
   1469	struct irdma_cm_listener *listen_node;
   1470	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
   1471	u32 listen_addr[4];
   1472	u16 listen_port;
   1473	unsigned long flags;
   1474
   1475	/* walk list and find cm_node associated with this session ID */
   1476	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
   1477	list_for_each_entry (listen_node, &cm_core->listen_list, list) {
   1478		memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
   1479		listen_port = listen_node->loc_port;
   1480		/* compare node pair, return node handle if a match */
   1481		if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
   1482		     !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
   1483		    listen_port == dst_port &&
   1484		    vlan_id == listen_node->vlan_id &&
   1485		    (listener_state & listen_node->listener_state)) {
   1486			refcount_inc(&listen_node->refcnt);
   1487			spin_unlock_irqrestore(&cm_core->listen_list_lock,
   1488					       flags);
   1489			trace_irdma_find_listener(listen_node);
   1490			return listen_node;
   1491		}
   1492	}
   1493	spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
   1494
   1495	return NULL;
   1496}
   1497
   1498/**
   1499 * irdma_del_multiple_qhash - Remove qhash and child listens
   1500 * @iwdev: iWarp device
   1501 * @cm_info: CM info for parent listen node
   1502 * @cm_parent_listen_node: The parent listen node
   1503 */
   1504static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
   1505				    struct irdma_cm_info *cm_info,
   1506				    struct irdma_cm_listener *cm_parent_listen_node)
   1507{
   1508	struct irdma_cm_listener *child_listen_node;
   1509	struct list_head *pos, *tpos;
   1510	unsigned long flags;
   1511	int ret = -EINVAL;
   1512
   1513	spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
   1514	list_for_each_safe (pos, tpos,
   1515			    &cm_parent_listen_node->child_listen_list) {
   1516		child_listen_node = list_entry(pos, struct irdma_cm_listener,
   1517					       child_listen_list);
   1518		if (child_listen_node->ipv4)
   1519			ibdev_dbg(&iwdev->ibdev,
   1520				  "CM: removing child listen for IP=%pI4, port=%d, vlan=%d\n",
   1521				  child_listen_node->loc_addr,
   1522				  child_listen_node->loc_port,
   1523				  child_listen_node->vlan_id);
   1524		else
   1525			ibdev_dbg(&iwdev->ibdev,
   1526				  "CM: removing child listen for IP=%pI6, port=%d, vlan=%d\n",
   1527				  child_listen_node->loc_addr,
   1528				  child_listen_node->loc_port,
   1529				  child_listen_node->vlan_id);
   1530		trace_irdma_del_multiple_qhash(child_listen_node);
   1531		list_del(pos);
   1532		memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
   1533		       sizeof(cm_info->loc_addr));
   1534		cm_info->vlan_id = child_listen_node->vlan_id;
   1535		if (child_listen_node->qhash_set) {
   1536			ret = irdma_manage_qhash(iwdev, cm_info,
   1537						 IRDMA_QHASH_TYPE_TCP_SYN,
   1538						 IRDMA_QHASH_MANAGE_TYPE_DELETE,
   1539						 NULL, false);
   1540			child_listen_node->qhash_set = false;
   1541		} else {
   1542			ret = 0;
   1543		}
   1544		ibdev_dbg(&iwdev->ibdev,
   1545			  "CM: Child listen node freed = %p\n",
   1546			  child_listen_node);
   1547		kfree(child_listen_node);
   1548		cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
   1549	}
   1550	spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
   1551
   1552	return ret;
   1553}
   1554
   1555/**
   1556 * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
   1557 * @addr: local IPv6 address
   1558 * @vlan_id: vlan id for the given IPv6 address
   1559 * @mac: mac address for the given IPv6 address
   1560 *
   1561 * Returns the net_device of the IPv6 address and also sets the
   1562 * vlan id and mac for that address.
   1563 */
   1564struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
   1565{
   1566	struct net_device *ip_dev = NULL;
   1567	struct in6_addr laddr6;
   1568
   1569	if (!IS_ENABLED(CONFIG_IPV6))
   1570		return NULL;
   1571
   1572	irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
   1573	if (vlan_id)
   1574		*vlan_id = 0xFFFF;	/* Match rdma_vlan_dev_vlan_id() */
   1575	if (mac)
   1576		eth_zero_addr(mac);
   1577
   1578	rcu_read_lock();
   1579	for_each_netdev_rcu (&init_net, ip_dev) {
   1580		if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
   1581			if (vlan_id)
   1582				*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
   1583			if (ip_dev->dev_addr && mac)
   1584				ether_addr_copy(mac, ip_dev->dev_addr);
   1585			break;
   1586		}
   1587	}
   1588	rcu_read_unlock();
   1589
   1590	return ip_dev;
   1591}
   1592
   1593/**
   1594 * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
   1595 * @addr: local IPv4 address
   1596 */
   1597u16 irdma_get_vlan_ipv4(u32 *addr)
   1598{
   1599	struct net_device *netdev;
   1600	u16 vlan_id = 0xFFFF;
   1601
   1602	netdev = ip_dev_find(&init_net, htonl(addr[0]));
   1603	if (netdev) {
   1604		vlan_id = rdma_vlan_dev_vlan_id(netdev);
   1605		dev_put(netdev);
   1606	}
   1607
   1608	return vlan_id;
   1609}
   1610
   1611/**
   1612 * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
   1613 * @iwdev: iWarp device
   1614 * @cm_info: CM info for parent listen node
   1615 * @cm_parent_listen_node: The parent listen node
   1616 *
   1617 * Adds a qhash and a child listen node for every IPv6 address
   1618 * on the adapter and adds the associated qhash filter
   1619 */
   1620static int irdma_add_mqh_6(struct irdma_device *iwdev,
   1621			   struct irdma_cm_info *cm_info,
   1622			   struct irdma_cm_listener *cm_parent_listen_node)
   1623{
   1624	struct net_device *ip_dev;
   1625	struct inet6_dev *idev;
   1626	struct inet6_ifaddr *ifp, *tmp;
   1627	struct irdma_cm_listener *child_listen_node;
   1628	unsigned long flags;
   1629	int ret = 0;
   1630
   1631	rtnl_lock();
   1632	for_each_netdev(&init_net, ip_dev) {
   1633		if (!(ip_dev->flags & IFF_UP))
   1634			continue;
   1635
   1636		if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
   1637		     (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
   1638		    ip_dev != iwdev->netdev)
   1639			continue;
   1640
   1641		idev = __in6_dev_get(ip_dev);
   1642		if (!idev) {
   1643			ibdev_dbg(&iwdev->ibdev, "CM: idev == NULL\n");
   1644			break;
   1645		}
   1646		list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) {
   1647			ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n",
   1648				  &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev),
   1649				  ip_dev->dev_addr);
   1650			child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
   1651			ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
   1652				  child_listen_node);
   1653			if (!child_listen_node) {
   1654				ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
   1655				ret = -ENOMEM;
   1656				goto exit;
   1657			}
   1658
   1659			cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
   1660			cm_parent_listen_node->vlan_id = cm_info->vlan_id;
   1661			memcpy(child_listen_node, cm_parent_listen_node,
   1662			       sizeof(*child_listen_node));
   1663			irdma_copy_ip_ntohl(child_listen_node->loc_addr,
   1664					    ifp->addr.in6_u.u6_addr32);
   1665			memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
   1666			       sizeof(cm_info->loc_addr));
   1667			ret = irdma_manage_qhash(iwdev, cm_info,
   1668						 IRDMA_QHASH_TYPE_TCP_SYN,
   1669						 IRDMA_QHASH_MANAGE_TYPE_ADD,
   1670						 NULL, true);
   1671			if (ret) {
   1672				kfree(child_listen_node);
   1673				continue;
   1674			}
   1675
   1676			trace_irdma_add_mqh_6(iwdev, child_listen_node,
   1677					      ip_dev->dev_addr);
   1678
   1679			child_listen_node->qhash_set = true;
   1680			spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
   1681			list_add(&child_listen_node->child_listen_list,
   1682				 &cm_parent_listen_node->child_listen_list);
   1683			spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
   1684			cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
   1685		}
   1686	}
   1687exit:
   1688	rtnl_unlock();
   1689
   1690	return ret;
   1691}
   1692
   1693/**
   1694 * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
   1695 * @iwdev: iWarp device
   1696 * @cm_info: CM info for parent listen node
   1697 * @cm_parent_listen_node: The parent listen node
   1698 *
   1699 * Adds a qhash and a child listen node for every IPv4 address
   1700 * on the adapter and adds the associated qhash filter
   1701 */
   1702static int irdma_add_mqh_4(struct irdma_device *iwdev,
   1703			   struct irdma_cm_info *cm_info,
   1704			   struct irdma_cm_listener *cm_parent_listen_node)
   1705{
   1706	struct net_device *ip_dev;
   1707	struct in_device *idev;
   1708	struct irdma_cm_listener *child_listen_node;
   1709	unsigned long flags;
   1710	const struct in_ifaddr *ifa;
   1711	int ret = 0;
   1712
   1713	rtnl_lock();
   1714	for_each_netdev(&init_net, ip_dev) {
   1715		if (!(ip_dev->flags & IFF_UP))
   1716			continue;
   1717
   1718		if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
   1719		     (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
   1720		    ip_dev != iwdev->netdev)
   1721			continue;
   1722
   1723		idev = in_dev_get(ip_dev);
   1724		in_dev_for_each_ifa_rtnl(ifa, idev) {
   1725			ibdev_dbg(&iwdev->ibdev,
   1726				  "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
   1727				  &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev),
   1728				  ip_dev->dev_addr);
   1729			child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
   1730			cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
   1731			ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
   1732				  child_listen_node);
   1733			if (!child_listen_node) {
   1734				ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
   1735				in_dev_put(idev);
   1736				ret = -ENOMEM;
   1737				goto exit;
   1738			}
   1739
   1740			cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
   1741			cm_parent_listen_node->vlan_id = cm_info->vlan_id;
   1742			memcpy(child_listen_node, cm_parent_listen_node,
   1743			       sizeof(*child_listen_node));
   1744			child_listen_node->loc_addr[0] =
   1745				ntohl(ifa->ifa_address);
   1746			memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
   1747			       sizeof(cm_info->loc_addr));
   1748			ret = irdma_manage_qhash(iwdev, cm_info,
   1749						 IRDMA_QHASH_TYPE_TCP_SYN,
   1750						 IRDMA_QHASH_MANAGE_TYPE_ADD,
   1751						 NULL, true);
   1752			if (ret) {
   1753				kfree(child_listen_node);
   1754				cm_parent_listen_node->cm_core
   1755					->stats_listen_nodes_created--;
   1756				continue;
   1757			}
   1758
   1759			trace_irdma_add_mqh_4(iwdev, child_listen_node,
   1760					      ip_dev->dev_addr);
   1761
   1762			child_listen_node->qhash_set = true;
   1763			spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
   1764					  flags);
   1765			list_add(&child_listen_node->child_listen_list,
   1766				 &cm_parent_listen_node->child_listen_list);
   1767			spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
   1768		}
   1769		in_dev_put(idev);
   1770	}
   1771exit:
   1772	rtnl_unlock();
   1773
   1774	return ret;
   1775}
   1776
   1777/**
   1778 * irdma_add_mqh - Adds multiple qhashes
   1779 * @iwdev: iWarp device
   1780 * @cm_info: CM info for parent listen node
   1781 * @cm_listen_node: The parent listen node
   1782 */
   1783static int irdma_add_mqh(struct irdma_device *iwdev,
   1784			 struct irdma_cm_info *cm_info,
   1785			 struct irdma_cm_listener *cm_listen_node)
   1786{
   1787	if (cm_info->ipv4)
   1788		return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
   1789	else
   1790		return irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
   1791}
   1792
   1793/**
   1794 * irdma_reset_list_prep - add connection nodes slated for reset to list
   1795 * @cm_core: cm's core
   1796 * @listener: pointer to listener node
   1797 * @reset_list: a list to which cm_node will be selected
   1798 */
   1799static void irdma_reset_list_prep(struct irdma_cm_core *cm_core,
   1800				  struct irdma_cm_listener *listener,
   1801				  struct list_head *reset_list)
   1802{
   1803	struct irdma_cm_node *cm_node;
   1804	int bkt;
   1805
   1806	hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
   1807		if (cm_node->listener == listener &&
   1808		    !cm_node->accelerated &&
   1809		    refcount_inc_not_zero(&cm_node->refcnt))
   1810			list_add(&cm_node->reset_entry, reset_list);
   1811	}
   1812}
   1813
   1814/**
   1815 * irdma_dec_refcnt_listen - delete listener and associated cm nodes
   1816 * @cm_core: cm's core
   1817 * @listener: pointer to listener node
   1818 * @free_hanging_nodes: to free associated cm_nodes
   1819 * @apbvt_del: flag to delete the apbvt
   1820 */
   1821static int irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
   1822				   struct irdma_cm_listener *listener,
   1823				   int free_hanging_nodes, bool apbvt_del)
   1824{
   1825	int err;
   1826	struct list_head *list_pos;
   1827	struct list_head *list_temp;
   1828	struct irdma_cm_node *cm_node;
   1829	struct list_head reset_list;
   1830	struct irdma_cm_info nfo;
   1831	enum irdma_cm_node_state old_state;
   1832	unsigned long flags;
   1833
   1834	trace_irdma_dec_refcnt_listen(listener, __builtin_return_address(0));
   1835	/* free non-accelerated child nodes for this listener */
   1836	INIT_LIST_HEAD(&reset_list);
   1837	if (free_hanging_nodes) {
   1838		rcu_read_lock();
   1839		irdma_reset_list_prep(cm_core, listener, &reset_list);
   1840		rcu_read_unlock();
   1841	}
   1842
   1843	list_for_each_safe (list_pos, list_temp, &reset_list) {
   1844		cm_node = container_of(list_pos, struct irdma_cm_node,
   1845				       reset_entry);
   1846		if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
   1847			irdma_rem_ref_cm_node(cm_node);
   1848			continue;
   1849		}
   1850
   1851		irdma_cleanup_retrans_entry(cm_node);
   1852		err = irdma_send_reset(cm_node);
   1853		if (err) {
   1854			cm_node->state = IRDMA_CM_STATE_CLOSED;
   1855			ibdev_dbg(&cm_node->iwdev->ibdev,
   1856				  "CM: send reset failed\n");
   1857		} else {
   1858			old_state = cm_node->state;
   1859			cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
   1860			if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
   1861				irdma_rem_ref_cm_node(cm_node);
   1862		}
   1863	}
   1864
   1865	if (refcount_dec_and_test(&listener->refcnt)) {
   1866		spin_lock_irqsave(&cm_core->listen_list_lock, flags);
   1867		list_del(&listener->list);
   1868		spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
   1869
   1870		if (apbvt_del)
   1871			irdma_del_apbvt(listener->iwdev,
   1872					listener->apbvt_entry);
   1873		memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
   1874		nfo.loc_port = listener->loc_port;
   1875		nfo.ipv4 = listener->ipv4;
   1876		nfo.vlan_id = listener->vlan_id;
   1877		nfo.user_pri = listener->user_pri;
   1878		nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id;
   1879
   1880		if (!list_empty(&listener->child_listen_list)) {
   1881			irdma_del_multiple_qhash(listener->iwdev, &nfo,
   1882						 listener);
   1883		} else {
   1884			if (listener->qhash_set)
   1885				irdma_manage_qhash(listener->iwdev,
   1886						   &nfo,
   1887						   IRDMA_QHASH_TYPE_TCP_SYN,
   1888						   IRDMA_QHASH_MANAGE_TYPE_DELETE,
   1889						   NULL, false);
   1890		}
   1891
   1892		cm_core->stats_listen_destroyed++;
   1893		cm_core->stats_listen_nodes_destroyed++;
   1894		ibdev_dbg(&listener->iwdev->ibdev,
   1895			  "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
   1896			  listener->loc_port, listener->loc_addr, listener,
   1897			  listener->cm_id, listener->qhash_set,
   1898			  listener->vlan_id, apbvt_del);
   1899		kfree(listener);
   1900		listener = NULL;
   1901		return 0;
   1902	}
   1903
   1904	return -EINVAL;
   1905}
   1906
   1907/**
   1908 * irdma_cm_del_listen - delete a listener
   1909 * @cm_core: cm's core
   1910 * @listener: passive connection's listener
   1911 * @apbvt_del: flag to delete apbvt
   1912 */
   1913static int irdma_cm_del_listen(struct irdma_cm_core *cm_core,
   1914			       struct irdma_cm_listener *listener,
   1915			       bool apbvt_del)
   1916{
   1917	listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE;
   1918	listener->cm_id = NULL;
   1919
   1920	return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
   1921}
   1922
   1923/**
   1924 * irdma_addr_resolve_neigh - resolve neighbor address
   1925 * @iwdev: iwarp device structure
   1926 * @src_ip: local ip address
   1927 * @dst_ip: remote ip address
   1928 * @arpindex: if there is an arp entry
   1929 */
   1930static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip,
   1931				    u32 dst_ip, int arpindex)
   1932{
   1933	struct rtable *rt;
   1934	struct neighbour *neigh;
   1935	int rc = arpindex;
   1936	__be32 dst_ipaddr = htonl(dst_ip);
   1937	__be32 src_ipaddr = htonl(src_ip);
   1938
   1939	rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
   1940	if (IS_ERR(rt)) {
   1941		ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n");
   1942		return -EINVAL;
   1943	}
   1944
   1945	neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
   1946	if (!neigh)
   1947		goto exit;
   1948
   1949	if (neigh->nud_state & NUD_VALID)
   1950		rc = irdma_add_arp(iwdev->rf, &dst_ip, true, neigh->ha);
   1951	else
   1952		neigh_event_send(neigh, NULL);
   1953	if (neigh)
   1954		neigh_release(neigh);
   1955exit:
   1956	ip_rt_put(rt);
   1957
   1958	return rc;
   1959}
   1960
   1961/**
   1962 * irdma_get_dst_ipv6 - get destination cache entry via ipv6 lookup
   1963 * @src_addr: local ipv6 sock address
   1964 * @dst_addr: destination ipv6 sock address
   1965 */
   1966static struct dst_entry *irdma_get_dst_ipv6(struct sockaddr_in6 *src_addr,
   1967					    struct sockaddr_in6 *dst_addr)
   1968{
   1969	struct dst_entry *dst = NULL;
   1970
   1971	if ((IS_ENABLED(CONFIG_IPV6))) {
   1972		struct flowi6 fl6 = {};
   1973
   1974		fl6.daddr = dst_addr->sin6_addr;
   1975		fl6.saddr = src_addr->sin6_addr;
   1976		if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
   1977			fl6.flowi6_oif = dst_addr->sin6_scope_id;
   1978
   1979		dst = ip6_route_output(&init_net, NULL, &fl6);
   1980	}
   1981
   1982	return dst;
   1983}
   1984
   1985/**
   1986 * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
   1987 * @iwdev: iwarp device structure
   1988 * @src: local ip address
   1989 * @dest: remote ip address
   1990 * @arpindex: if there is an arp entry
   1991 */
   1992static int irdma_addr_resolve_neigh_ipv6(struct irdma_device *iwdev, u32 *src,
   1993					 u32 *dest, int arpindex)
   1994{
   1995	struct neighbour *neigh;
   1996	int rc = arpindex;
   1997	struct dst_entry *dst;
   1998	struct sockaddr_in6 dst_addr = {};
   1999	struct sockaddr_in6 src_addr = {};
   2000
   2001	dst_addr.sin6_family = AF_INET6;
   2002	irdma_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
   2003	src_addr.sin6_family = AF_INET6;
   2004	irdma_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
   2005	dst = irdma_get_dst_ipv6(&src_addr, &dst_addr);
   2006	if (!dst || dst->error) {
   2007		if (dst) {
   2008			dst_release(dst);
   2009			ibdev_dbg(&iwdev->ibdev,
   2010				  "CM: ip6_route_output returned dst->error = %d\n",
   2011				  dst->error);
   2012		}
   2013		return -EINVAL;
   2014	}
   2015
   2016	neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
   2017	if (!neigh)
   2018		goto exit;
   2019
   2020	ibdev_dbg(&iwdev->ibdev, "CM: dst_neigh_lookup MAC=%pM\n",
   2021		  neigh->ha);
   2022
   2023	trace_irdma_addr_resolve(iwdev, neigh->ha);
   2024
   2025	if (neigh->nud_state & NUD_VALID)
   2026		rc = irdma_add_arp(iwdev->rf, dest, false, neigh->ha);
   2027	else
   2028		neigh_event_send(neigh, NULL);
   2029	if (neigh)
   2030		neigh_release(neigh);
   2031exit:
   2032	dst_release(dst);
   2033
   2034	return rc;
   2035}
   2036
   2037/**
   2038 * irdma_find_node - find a cm node that matches the reference cm node
   2039 * @cm_core: cm's core
   2040 * @rem_port: remote tcp port num
   2041 * @rem_addr: remote ip addr
   2042 * @loc_port: local tcp port num
   2043 * @loc_addr: local ip addr
   2044 * @vlan_id: local VLAN ID
   2045 */
   2046struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
   2047				      u16 rem_port, u32 *rem_addr, u16 loc_port,
   2048				      u32 *loc_addr, u16 vlan_id)
   2049{
   2050	struct irdma_cm_node *cm_node;
   2051	u32 key = (rem_port << 16) | loc_port;
   2052
   2053	rcu_read_lock();
   2054	hash_for_each_possible_rcu(cm_core->cm_hash_tbl, cm_node, list, key) {
   2055		if (cm_node->vlan_id == vlan_id &&
   2056		    cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
   2057		    !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
   2058		    !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
   2059			if (!refcount_inc_not_zero(&cm_node->refcnt))
   2060				goto exit;
   2061			rcu_read_unlock();
   2062			trace_irdma_find_node(cm_node, 0, NULL);
   2063			return cm_node;
   2064		}
   2065	}
   2066
   2067exit:
   2068	rcu_read_unlock();
   2069
   2070	/* no owner node */
   2071	return NULL;
   2072}
   2073
   2074/**
   2075 * irdma_add_hte_node - add a cm node to the hash table
   2076 * @cm_core: cm's core
   2077 * @cm_node: connection's node
   2078 */
   2079static void irdma_add_hte_node(struct irdma_cm_core *cm_core,
   2080			       struct irdma_cm_node *cm_node)
   2081{
   2082	unsigned long flags;
   2083	u32 key = (cm_node->rem_port << 16) | cm_node->loc_port;
   2084
   2085	spin_lock_irqsave(&cm_core->ht_lock, flags);
   2086	hash_add_rcu(cm_core->cm_hash_tbl, &cm_node->list, key);
   2087	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
   2088}
   2089
   2090/**
   2091 * irdma_ipv4_is_lpb - check if loopback
   2092 * @loc_addr: local addr to compare
   2093 * @rem_addr: remote address
   2094 */
   2095bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
   2096{
   2097	return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
   2098}
   2099
   2100/**
   2101 * irdma_ipv6_is_lpb - check if loopback
   2102 * @loc_addr: local addr to compare
   2103 * @rem_addr: remote address
   2104 */
   2105bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
   2106{
   2107	struct in6_addr raddr6;
   2108
   2109	irdma_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
   2110
   2111	return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
   2112}
   2113
   2114/**
   2115 * irdma_cm_create_ah - create a cm address handle
   2116 * @cm_node: The connection manager node to create AH for
   2117 * @wait: Provides option to wait for ah creation or not
   2118 */
   2119static int irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
   2120{
   2121	struct irdma_ah_info ah_info = {};
   2122	struct irdma_device *iwdev = cm_node->iwdev;
   2123
   2124	ether_addr_copy(ah_info.mac_addr, iwdev->netdev->dev_addr);
   2125
   2126	ah_info.hop_ttl = 0x40;
   2127	ah_info.tc_tos = cm_node->tos;
   2128	ah_info.vsi = &iwdev->vsi;
   2129
   2130	if (cm_node->ipv4) {
   2131		ah_info.ipv4_valid = true;
   2132		ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
   2133		ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
   2134		ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
   2135						    ah_info.dest_ip_addr[0]);
   2136	} else {
   2137		memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
   2138		       sizeof(ah_info.dest_ip_addr));
   2139		memcpy(ah_info.src_ip_addr, cm_node->loc_addr,
   2140		       sizeof(ah_info.src_ip_addr));
   2141		ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr,
   2142						    ah_info.dest_ip_addr);
   2143	}
   2144
   2145	ah_info.vlan_tag = cm_node->vlan_id;
   2146	if (cm_node->vlan_id < VLAN_N_VID) {
   2147		ah_info.insert_vlan_tag = 1;
   2148		ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
   2149	}
   2150
   2151	ah_info.dst_arpindex =
   2152		irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr,
   2153				ah_info.ipv4_valid, NULL, IRDMA_ARP_RESOLVE);
   2154
   2155	if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait,
   2156				 IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node,
   2157				 &cm_node->ah))
   2158		return -ENOMEM;
   2159
   2160	trace_irdma_create_ah(cm_node);
   2161	return 0;
   2162}
   2163
   2164/**
   2165 * irdma_cm_free_ah - free a cm address handle
   2166 * @cm_node: The connection manager node to create AH for
   2167 */
   2168static void irdma_cm_free_ah(struct irdma_cm_node *cm_node)
   2169{
   2170	struct irdma_device *iwdev = cm_node->iwdev;
   2171
   2172	trace_irdma_cm_free_ah(cm_node);
   2173	irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
   2174	cm_node->ah = NULL;
   2175}
   2176
   2177/**
   2178 * irdma_make_cm_node - create a new instance of a cm node
   2179 * @cm_core: cm's core
   2180 * @iwdev: iwarp device structure
   2181 * @cm_info: quad info for connection
   2182 * @listener: passive connection's listener
   2183 */
   2184static struct irdma_cm_node *
   2185irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
   2186		   struct irdma_cm_info *cm_info,
   2187		   struct irdma_cm_listener *listener)
   2188{
   2189	struct irdma_cm_node *cm_node;
   2190	int oldarpindex;
   2191	int arpindex;
   2192	struct net_device *netdev = iwdev->netdev;
   2193
   2194	/* create an hte and cm_node for this instance */
   2195	cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
   2196	if (!cm_node)
   2197		return NULL;
   2198
   2199	/* set our node specific transport info */
   2200	cm_node->ipv4 = cm_info->ipv4;
   2201	cm_node->vlan_id = cm_info->vlan_id;
   2202	if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
   2203		cm_node->vlan_id = 0;
   2204	cm_node->tos = cm_info->tos;
   2205	cm_node->user_pri = cm_info->user_pri;
   2206	if (listener) {
   2207		if (listener->tos != cm_info->tos)
   2208			ibdev_warn(&iwdev->ibdev,
   2209				   "application TOS[%d] and remote client TOS[%d] mismatch\n",
   2210				   listener->tos, cm_info->tos);
   2211		if (iwdev->vsi.dscp_mode) {
   2212			cm_node->user_pri = listener->user_pri;
   2213		} else {
   2214			cm_node->tos = max(listener->tos, cm_info->tos);
   2215			cm_node->user_pri = rt_tos2priority(cm_node->tos);
   2216		}
   2217		ibdev_dbg(&iwdev->ibdev,
   2218			  "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
   2219			  cm_node->user_pri);
   2220		trace_irdma_listener_tos(iwdev, cm_node->tos,
   2221					 cm_node->user_pri);
   2222	}
   2223	memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
   2224	memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
   2225	cm_node->loc_port = cm_info->loc_port;
   2226	cm_node->rem_port = cm_info->rem_port;
   2227
   2228	cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER;
   2229	cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
   2230	cm_node->iwdev = iwdev;
   2231	cm_node->dev = &iwdev->rf->sc_dev;
   2232
   2233	cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird;
   2234	cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord;
   2235
   2236	cm_node->listener = listener;
   2237	cm_node->cm_id = cm_info->cm_id;
   2238	ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
   2239	spin_lock_init(&cm_node->retrans_list_lock);
   2240	cm_node->ack_rcvd = false;
   2241
   2242	init_completion(&cm_node->establish_comp);
   2243	refcount_set(&cm_node->refcnt, 1);
   2244	/* associate our parent CM core */
   2245	cm_node->cm_core = cm_core;
   2246	cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID;
   2247	cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale;
   2248	cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale;
   2249	if (cm_node->ipv4) {
   2250		cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
   2251								htonl(cm_node->rem_addr[0]),
   2252								htons(cm_node->loc_port),
   2253								htons(cm_node->rem_port));
   2254		cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4;
   2255	} else if (IS_ENABLED(CONFIG_IPV6)) {
   2256		__be32 loc[4] = {
   2257			htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
   2258			htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
   2259		};
   2260		__be32 rem[4] = {
   2261			htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
   2262			htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
   2263		};
   2264		cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
   2265								  htons(cm_node->loc_port),
   2266								  htons(cm_node->rem_port));
   2267		cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6;
   2268	}
   2269
   2270	if ((cm_node->ipv4 &&
   2271	     irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
   2272	    (!cm_node->ipv4 &&
   2273	     irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
   2274		cm_node->do_lpb = true;
   2275		arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
   2276					   cm_node->ipv4, NULL,
   2277					   IRDMA_ARP_RESOLVE);
   2278	} else {
   2279		oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
   2280					      cm_node->ipv4, NULL,
   2281					      IRDMA_ARP_RESOLVE);
   2282		if (cm_node->ipv4)
   2283			arpindex = irdma_addr_resolve_neigh(iwdev,
   2284							    cm_info->loc_addr[0],
   2285							    cm_info->rem_addr[0],
   2286							    oldarpindex);
   2287		else if (IS_ENABLED(CONFIG_IPV6))
   2288			arpindex = irdma_addr_resolve_neigh_ipv6(iwdev,
   2289								 cm_info->loc_addr,
   2290								 cm_info->rem_addr,
   2291								 oldarpindex);
   2292		else
   2293			arpindex = -EINVAL;
   2294	}
   2295
   2296	if (arpindex < 0)
   2297		goto err;
   2298
   2299	ether_addr_copy(cm_node->rem_mac,
   2300			iwdev->rf->arp_table[arpindex].mac_addr);
   2301	irdma_add_hte_node(cm_core, cm_node);
   2302	cm_core->stats_nodes_created++;
   2303	return cm_node;
   2304
   2305err:
   2306	kfree(cm_node);
   2307
   2308	return NULL;
   2309}
   2310
   2311static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
   2312{
   2313	struct irdma_cm_core *cm_core = cm_node->cm_core;
   2314	struct irdma_qp *iwqp;
   2315	struct irdma_cm_info nfo;
   2316
   2317	/* if the node is destroyed before connection was accelerated */
   2318	if (!cm_node->accelerated && cm_node->accept_pend) {
   2319		ibdev_dbg(&cm_node->iwdev->ibdev,
   2320			  "CM: node destroyed before established\n");
   2321		atomic_dec(&cm_node->listener->pend_accepts_cnt);
   2322	}
   2323	if (cm_node->close_entry)
   2324		irdma_handle_close_entry(cm_node, 0);
   2325	if (cm_node->listener) {
   2326		irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
   2327	} else {
   2328		if (cm_node->apbvt_set) {
   2329			irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry);
   2330			cm_node->apbvt_set = 0;
   2331		}
   2332		irdma_get_addr_info(cm_node, &nfo);
   2333		if (cm_node->qhash_set) {
   2334			nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
   2335			irdma_manage_qhash(cm_node->iwdev, &nfo,
   2336					   IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
   2337					   IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL,
   2338					   false);
   2339			cm_node->qhash_set = 0;
   2340		}
   2341	}
   2342
   2343	iwqp = cm_node->iwqp;
   2344	if (iwqp) {
   2345		cm_node->cm_id->rem_ref(cm_node->cm_id);
   2346		cm_node->cm_id = NULL;
   2347		iwqp->cm_id = NULL;
   2348		irdma_qp_rem_ref(&iwqp->ibqp);
   2349		cm_node->iwqp = NULL;
   2350	} else if (cm_node->qhash_set) {
   2351		irdma_get_addr_info(cm_node, &nfo);
   2352		nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
   2353		irdma_manage_qhash(cm_node->iwdev, &nfo,
   2354				   IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
   2355				   IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false);
   2356		cm_node->qhash_set = 0;
   2357	}
   2358
   2359	cm_core->cm_free_ah(cm_node);
   2360}
   2361
   2362/**
   2363 * irdma_rem_ref_cm_node - destroy an instance of a cm node
   2364 * @cm_node: connection's node
   2365 */
   2366void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
   2367{
   2368	struct irdma_cm_core *cm_core = cm_node->cm_core;
   2369	unsigned long flags;
   2370
   2371	trace_irdma_rem_ref_cm_node(cm_node, 0, __builtin_return_address(0));
   2372	spin_lock_irqsave(&cm_core->ht_lock, flags);
   2373
   2374	if (!refcount_dec_and_test(&cm_node->refcnt)) {
   2375		spin_unlock_irqrestore(&cm_core->ht_lock, flags);
   2376		return;
   2377	}
   2378	if (cm_node->iwqp) {
   2379		cm_node->iwqp->cm_node = NULL;
   2380		cm_node->iwqp->cm_id = NULL;
   2381	}
   2382	hash_del_rcu(&cm_node->list);
   2383	cm_node->cm_core->stats_nodes_destroyed++;
   2384
   2385	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
   2386
   2387	irdma_destroy_connection(cm_node);
   2388
   2389	kfree_rcu(cm_node, rcu_head);
   2390}
   2391
   2392/**
   2393 * irdma_handle_fin_pkt - FIN packet received
   2394 * @cm_node: connection's node
   2395 */
   2396static void irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
   2397{
   2398	switch (cm_node->state) {
   2399	case IRDMA_CM_STATE_SYN_RCVD:
   2400	case IRDMA_CM_STATE_SYN_SENT:
   2401	case IRDMA_CM_STATE_ESTABLISHED:
   2402	case IRDMA_CM_STATE_MPAREJ_RCVD:
   2403		cm_node->tcp_cntxt.rcv_nxt++;
   2404		irdma_cleanup_retrans_entry(cm_node);
   2405		cm_node->state = IRDMA_CM_STATE_LAST_ACK;
   2406		irdma_send_fin(cm_node);
   2407		break;
   2408	case IRDMA_CM_STATE_MPAREQ_SENT:
   2409		irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
   2410		cm_node->tcp_cntxt.rcv_nxt++;
   2411		irdma_cleanup_retrans_entry(cm_node);
   2412		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2413		refcount_inc(&cm_node->refcnt);
   2414		irdma_send_reset(cm_node);
   2415		break;
   2416	case IRDMA_CM_STATE_FIN_WAIT1:
   2417		cm_node->tcp_cntxt.rcv_nxt++;
   2418		irdma_cleanup_retrans_entry(cm_node);
   2419		cm_node->state = IRDMA_CM_STATE_CLOSING;
   2420		irdma_send_ack(cm_node);
   2421		/*
   2422		 * Wait for ACK as this is simultaneous close.
   2423		 * After we receive ACK, do not send anything.
   2424		 * Just rm the node.
   2425		 */
   2426		break;
   2427	case IRDMA_CM_STATE_FIN_WAIT2:
   2428		cm_node->tcp_cntxt.rcv_nxt++;
   2429		irdma_cleanup_retrans_entry(cm_node);
   2430		cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
   2431		irdma_send_ack(cm_node);
   2432		irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
   2433					1, 0);
   2434		break;
   2435	case IRDMA_CM_STATE_TIME_WAIT:
   2436		cm_node->tcp_cntxt.rcv_nxt++;
   2437		irdma_cleanup_retrans_entry(cm_node);
   2438		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2439		irdma_rem_ref_cm_node(cm_node);
   2440		break;
   2441	case IRDMA_CM_STATE_OFFLOADED:
   2442	default:
   2443		ibdev_dbg(&cm_node->iwdev->ibdev,
   2444			  "CM: bad state node state = %d\n", cm_node->state);
   2445		break;
   2446	}
   2447}
   2448
   2449/**
   2450 * irdma_handle_rst_pkt - process received RST packet
   2451 * @cm_node: connection's node
   2452 * @rbuf: receive buffer
   2453 */
   2454static void irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
   2455				 struct irdma_puda_buf *rbuf)
   2456{
   2457	ibdev_dbg(&cm_node->iwdev->ibdev,
   2458		  "CM: caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
   2459		  __builtin_return_address(0), cm_node, cm_node->state,
   2460		  cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
   2461		  cm_node->loc_addr);
   2462
   2463	irdma_cleanup_retrans_entry(cm_node);
   2464	switch (cm_node->state) {
   2465	case IRDMA_CM_STATE_SYN_SENT:
   2466	case IRDMA_CM_STATE_MPAREQ_SENT:
   2467		switch (cm_node->mpa_frame_rev) {
   2468		case IETF_MPA_V2:
   2469			/* Drop down to MPA_V1*/
   2470			cm_node->mpa_frame_rev = IETF_MPA_V1;
   2471			/* send a syn and goto syn sent state */
   2472			cm_node->state = IRDMA_CM_STATE_SYN_SENT;
   2473			if (irdma_send_syn(cm_node, 0))
   2474				irdma_active_open_err(cm_node, false);
   2475			break;
   2476		case IETF_MPA_V1:
   2477		default:
   2478			irdma_active_open_err(cm_node, false);
   2479			break;
   2480		}
   2481		break;
   2482	case IRDMA_CM_STATE_MPAREQ_RCVD:
   2483		atomic_inc(&cm_node->passive_state);
   2484		break;
   2485	case IRDMA_CM_STATE_ESTABLISHED:
   2486	case IRDMA_CM_STATE_SYN_RCVD:
   2487	case IRDMA_CM_STATE_LISTENING:
   2488		irdma_passive_open_err(cm_node, false);
   2489		break;
   2490	case IRDMA_CM_STATE_OFFLOADED:
   2491		irdma_active_open_err(cm_node, false);
   2492		break;
   2493	case IRDMA_CM_STATE_CLOSED:
   2494		break;
   2495	case IRDMA_CM_STATE_FIN_WAIT2:
   2496	case IRDMA_CM_STATE_FIN_WAIT1:
   2497	case IRDMA_CM_STATE_LAST_ACK:
   2498	case IRDMA_CM_STATE_TIME_WAIT:
   2499		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2500		irdma_rem_ref_cm_node(cm_node);
   2501		break;
   2502	default:
   2503		break;
   2504	}
   2505}
   2506
   2507/**
   2508 * irdma_handle_rcv_mpa - Process a recv'd mpa buffer
   2509 * @cm_node: connection's node
   2510 * @rbuf: receive buffer
   2511 */
   2512static void irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
   2513				 struct irdma_puda_buf *rbuf)
   2514{
   2515	int err;
   2516	int datasize = rbuf->datalen;
   2517	u8 *dataloc = rbuf->data;
   2518
   2519	enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN;
   2520	u32 res_type;
   2521
   2522	err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
   2523	if (err) {
   2524		if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
   2525			irdma_active_open_err(cm_node, true);
   2526		else
   2527			irdma_passive_open_err(cm_node, true);
   2528		return;
   2529	}
   2530
   2531	switch (cm_node->state) {
   2532	case IRDMA_CM_STATE_ESTABLISHED:
   2533		if (res_type == IRDMA_MPA_REQUEST_REJECT)
   2534			ibdev_dbg(&cm_node->iwdev->ibdev,
   2535				  "CM: state for reject\n");
   2536		cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
   2537		type = IRDMA_CM_EVENT_MPA_REQ;
   2538		irdma_send_ack(cm_node); /* ACK received MPA request */
   2539		atomic_set(&cm_node->passive_state,
   2540			   IRDMA_PASSIVE_STATE_INDICATED);
   2541		break;
   2542	case IRDMA_CM_STATE_MPAREQ_SENT:
   2543		irdma_cleanup_retrans_entry(cm_node);
   2544		if (res_type == IRDMA_MPA_REQUEST_REJECT) {
   2545			type = IRDMA_CM_EVENT_MPA_REJECT;
   2546			cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
   2547		} else {
   2548			type = IRDMA_CM_EVENT_CONNECTED;
   2549			cm_node->state = IRDMA_CM_STATE_OFFLOADED;
   2550		}
   2551		irdma_send_ack(cm_node);
   2552		break;
   2553	default:
   2554		ibdev_dbg(&cm_node->iwdev->ibdev,
   2555			  "CM: wrong cm_node state =%d\n", cm_node->state);
   2556		break;
   2557	}
   2558	irdma_create_event(cm_node, type);
   2559}
   2560
   2561/**
   2562 * irdma_check_syn - Check for error on received syn ack
   2563 * @cm_node: connection's node
   2564 * @tcph: pointer tcp header
   2565 */
   2566static int irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
   2567{
   2568	if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
   2569		irdma_active_open_err(cm_node, true);
   2570		return 1;
   2571	}
   2572
   2573	return 0;
   2574}
   2575
   2576/**
   2577 * irdma_check_seq - check seq numbers if OK
   2578 * @cm_node: connection's node
   2579 * @tcph: pointer tcp header
   2580 */
   2581static int irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
   2582{
   2583	u32 seq;
   2584	u32 ack_seq;
   2585	u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
   2586	u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
   2587	u32 rcv_wnd;
   2588	int err = 0;
   2589
   2590	seq = ntohl(tcph->seq);
   2591	ack_seq = ntohl(tcph->ack_seq);
   2592	rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
   2593	if (ack_seq != loc_seq_num ||
   2594	    !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
   2595		err = -1;
   2596	if (err)
   2597		ibdev_dbg(&cm_node->iwdev->ibdev,
   2598			  "CM: seq number err\n");
   2599
   2600	return err;
   2601}
   2602
   2603void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node)
   2604{
   2605	struct irdma_cm_info nfo;
   2606
   2607	irdma_get_addr_info(cm_node, &nfo);
   2608	nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
   2609	irdma_manage_qhash(cm_node->iwdev, &nfo,
   2610			   IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
   2611			   IRDMA_QHASH_MANAGE_TYPE_ADD,
   2612			   cm_node, false);
   2613	cm_node->qhash_set = true;
   2614}
   2615
   2616/**
   2617 * irdma_handle_syn_pkt - is for Passive node
   2618 * @cm_node: connection's node
   2619 * @rbuf: receive buffer
   2620 */
   2621static void irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
   2622				 struct irdma_puda_buf *rbuf)
   2623{
   2624	struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
   2625	int err;
   2626	u32 inc_sequence;
   2627	int optionsize;
   2628
   2629	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
   2630	inc_sequence = ntohl(tcph->seq);
   2631
   2632	switch (cm_node->state) {
   2633	case IRDMA_CM_STATE_SYN_SENT:
   2634	case IRDMA_CM_STATE_MPAREQ_SENT:
   2635		/* Rcvd syn on active open connection */
   2636		irdma_active_open_err(cm_node, 1);
   2637		break;
   2638	case IRDMA_CM_STATE_LISTENING:
   2639		/* Passive OPEN */
   2640		if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
   2641		    cm_node->listener->backlog) {
   2642			cm_node->cm_core->stats_backlog_drops++;
   2643			irdma_passive_open_err(cm_node, false);
   2644			break;
   2645		}
   2646		err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
   2647		if (err) {
   2648			irdma_passive_open_err(cm_node, false);
   2649			/* drop pkt */
   2650			break;
   2651		}
   2652		err = cm_node->cm_core->cm_create_ah(cm_node, false);
   2653		if (err) {
   2654			irdma_passive_open_err(cm_node, false);
   2655			/* drop pkt */
   2656			break;
   2657		}
   2658		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
   2659		cm_node->accept_pend = 1;
   2660		atomic_inc(&cm_node->listener->pend_accepts_cnt);
   2661
   2662		cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
   2663		break;
   2664	case IRDMA_CM_STATE_CLOSED:
   2665		irdma_cleanup_retrans_entry(cm_node);
   2666		refcount_inc(&cm_node->refcnt);
   2667		irdma_send_reset(cm_node);
   2668		break;
   2669	case IRDMA_CM_STATE_OFFLOADED:
   2670	case IRDMA_CM_STATE_ESTABLISHED:
   2671	case IRDMA_CM_STATE_FIN_WAIT1:
   2672	case IRDMA_CM_STATE_FIN_WAIT2:
   2673	case IRDMA_CM_STATE_MPAREQ_RCVD:
   2674	case IRDMA_CM_STATE_LAST_ACK:
   2675	case IRDMA_CM_STATE_CLOSING:
   2676	case IRDMA_CM_STATE_UNKNOWN:
   2677	default:
   2678		break;
   2679	}
   2680}
   2681
   2682/**
   2683 * irdma_handle_synack_pkt - Process SYN+ACK packet (active side)
   2684 * @cm_node: connection's node
   2685 * @rbuf: receive buffer
   2686 */
   2687static void irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
   2688				    struct irdma_puda_buf *rbuf)
   2689{
   2690	struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
   2691	int err;
   2692	u32 inc_sequence;
   2693	int optionsize;
   2694
   2695	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
   2696	inc_sequence = ntohl(tcph->seq);
   2697	switch (cm_node->state) {
   2698	case IRDMA_CM_STATE_SYN_SENT:
   2699		irdma_cleanup_retrans_entry(cm_node);
   2700		/* active open */
   2701		if (irdma_check_syn(cm_node, tcph)) {
   2702			ibdev_dbg(&cm_node->iwdev->ibdev,
   2703				  "CM: check syn fail\n");
   2704			return;
   2705		}
   2706		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
   2707		/* setup options */
   2708		err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
   2709		if (err) {
   2710			ibdev_dbg(&cm_node->iwdev->ibdev,
   2711				  "CM: cm_node=%p tcp_options failed\n",
   2712				  cm_node);
   2713			break;
   2714		}
   2715		irdma_cleanup_retrans_entry(cm_node);
   2716		cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
   2717		irdma_send_ack(cm_node); /* ACK  for the syn_ack */
   2718		err = irdma_send_mpa_request(cm_node);
   2719		if (err) {
   2720			ibdev_dbg(&cm_node->iwdev->ibdev,
   2721				  "CM: cm_node=%p irdma_send_mpa_request failed\n",
   2722				  cm_node);
   2723			break;
   2724		}
   2725		cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
   2726		break;
   2727	case IRDMA_CM_STATE_MPAREQ_RCVD:
   2728		irdma_passive_open_err(cm_node, true);
   2729		break;
   2730	case IRDMA_CM_STATE_LISTENING:
   2731		cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
   2732		irdma_cleanup_retrans_entry(cm_node);
   2733		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2734		irdma_send_reset(cm_node);
   2735		break;
   2736	case IRDMA_CM_STATE_CLOSED:
   2737		cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
   2738		irdma_cleanup_retrans_entry(cm_node);
   2739		refcount_inc(&cm_node->refcnt);
   2740		irdma_send_reset(cm_node);
   2741		break;
   2742	case IRDMA_CM_STATE_ESTABLISHED:
   2743	case IRDMA_CM_STATE_FIN_WAIT1:
   2744	case IRDMA_CM_STATE_FIN_WAIT2:
   2745	case IRDMA_CM_STATE_LAST_ACK:
   2746	case IRDMA_CM_STATE_OFFLOADED:
   2747	case IRDMA_CM_STATE_CLOSING:
   2748	case IRDMA_CM_STATE_UNKNOWN:
   2749	case IRDMA_CM_STATE_MPAREQ_SENT:
   2750	default:
   2751		break;
   2752	}
   2753}
   2754
   2755/**
   2756 * irdma_handle_ack_pkt - process packet with ACK
   2757 * @cm_node: connection's node
   2758 * @rbuf: receive buffer
   2759 */
   2760static int irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
   2761				struct irdma_puda_buf *rbuf)
   2762{
   2763	struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
   2764	u32 inc_sequence;
   2765	int ret;
   2766	int optionsize;
   2767	u32 datasize = rbuf->datalen;
   2768
   2769	optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
   2770
   2771	if (irdma_check_seq(cm_node, tcph))
   2772		return -EINVAL;
   2773
   2774	inc_sequence = ntohl(tcph->seq);
   2775	switch (cm_node->state) {
   2776	case IRDMA_CM_STATE_SYN_RCVD:
   2777		irdma_cleanup_retrans_entry(cm_node);
   2778		ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
   2779		if (ret)
   2780			return ret;
   2781		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
   2782		cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
   2783		if (datasize) {
   2784			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
   2785			irdma_handle_rcv_mpa(cm_node, rbuf);
   2786		}
   2787		break;
   2788	case IRDMA_CM_STATE_ESTABLISHED:
   2789		irdma_cleanup_retrans_entry(cm_node);
   2790		if (datasize) {
   2791			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
   2792			irdma_handle_rcv_mpa(cm_node, rbuf);
   2793		}
   2794		break;
   2795	case IRDMA_CM_STATE_MPAREQ_SENT:
   2796		cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
   2797		if (datasize) {
   2798			cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
   2799			cm_node->ack_rcvd = false;
   2800			irdma_handle_rcv_mpa(cm_node, rbuf);
   2801		} else {
   2802			cm_node->ack_rcvd = true;
   2803		}
   2804		break;
   2805	case IRDMA_CM_STATE_LISTENING:
   2806		irdma_cleanup_retrans_entry(cm_node);
   2807		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2808		irdma_send_reset(cm_node);
   2809		break;
   2810	case IRDMA_CM_STATE_CLOSED:
   2811		irdma_cleanup_retrans_entry(cm_node);
   2812		refcount_inc(&cm_node->refcnt);
   2813		irdma_send_reset(cm_node);
   2814		break;
   2815	case IRDMA_CM_STATE_LAST_ACK:
   2816	case IRDMA_CM_STATE_CLOSING:
   2817		irdma_cleanup_retrans_entry(cm_node);
   2818		cm_node->state = IRDMA_CM_STATE_CLOSED;
   2819		irdma_rem_ref_cm_node(cm_node);
   2820		break;
   2821	case IRDMA_CM_STATE_FIN_WAIT1:
   2822		irdma_cleanup_retrans_entry(cm_node);
   2823		cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
   2824		break;
   2825	case IRDMA_CM_STATE_SYN_SENT:
   2826	case IRDMA_CM_STATE_FIN_WAIT2:
   2827	case IRDMA_CM_STATE_OFFLOADED:
   2828	case IRDMA_CM_STATE_MPAREQ_RCVD:
   2829	case IRDMA_CM_STATE_UNKNOWN:
   2830	default:
   2831		irdma_cleanup_retrans_entry(cm_node);
   2832		break;
   2833	}
   2834
   2835	return 0;
   2836}
   2837
   2838/**
   2839 * irdma_process_pkt - process cm packet
   2840 * @cm_node: connection's node
   2841 * @rbuf: receive buffer
   2842 */
   2843static void irdma_process_pkt(struct irdma_cm_node *cm_node,
   2844			      struct irdma_puda_buf *rbuf)
   2845{
   2846	enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN;
   2847	struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
   2848	u32 fin_set = 0;
   2849	int err;
   2850
   2851	if (tcph->rst) {
   2852		pkt_type = IRDMA_PKT_TYPE_RST;
   2853	} else if (tcph->syn) {
   2854		pkt_type = IRDMA_PKT_TYPE_SYN;
   2855		if (tcph->ack)
   2856			pkt_type = IRDMA_PKT_TYPE_SYNACK;
   2857	} else if (tcph->ack) {
   2858		pkt_type = IRDMA_PKT_TYPE_ACK;
   2859	}
   2860	if (tcph->fin)
   2861		fin_set = 1;
   2862
   2863	switch (pkt_type) {
   2864	case IRDMA_PKT_TYPE_SYN:
   2865		irdma_handle_syn_pkt(cm_node, rbuf);
   2866		break;
   2867	case IRDMA_PKT_TYPE_SYNACK:
   2868		irdma_handle_synack_pkt(cm_node, rbuf);
   2869		break;
   2870	case IRDMA_PKT_TYPE_ACK:
   2871		err = irdma_handle_ack_pkt(cm_node, rbuf);
   2872		if (fin_set && !err)
   2873			irdma_handle_fin_pkt(cm_node);
   2874		break;
   2875	case IRDMA_PKT_TYPE_RST:
   2876		irdma_handle_rst_pkt(cm_node, rbuf);
   2877		break;
   2878	default:
   2879		if (fin_set &&
   2880		    (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
   2881			irdma_handle_fin_pkt(cm_node);
   2882		break;
   2883	}
   2884}
   2885
   2886/**
   2887 * irdma_make_listen_node - create a listen node with params
   2888 * @cm_core: cm's core
   2889 * @iwdev: iwarp device structure
   2890 * @cm_info: quad info for connection
   2891 */
   2892static struct irdma_cm_listener *
   2893irdma_make_listen_node(struct irdma_cm_core *cm_core,
   2894		       struct irdma_device *iwdev,
   2895		       struct irdma_cm_info *cm_info)
   2896{
   2897	struct irdma_cm_listener *listener;
   2898	unsigned long flags;
   2899
   2900	/* cannot have multiple matching listeners */
   2901	listener = irdma_find_listener(cm_core, cm_info->loc_addr,
   2902				       cm_info->loc_port, cm_info->vlan_id,
   2903				       IRDMA_CM_LISTENER_EITHER_STATE);
   2904	if (listener &&
   2905	    listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
   2906		refcount_dec(&listener->refcnt);
   2907		return NULL;
   2908	}
   2909
   2910	if (!listener) {
   2911		/* create a CM listen node
   2912		 * 1/2 node to compare incoming traffic to
   2913		 */
   2914		listener = kzalloc(sizeof(*listener), GFP_KERNEL);
   2915		if (!listener)
   2916			return NULL;
   2917		cm_core->stats_listen_nodes_created++;
   2918		memcpy(listener->loc_addr, cm_info->loc_addr,
   2919		       sizeof(listener->loc_addr));
   2920		listener->loc_port = cm_info->loc_port;
   2921
   2922		INIT_LIST_HEAD(&listener->child_listen_list);
   2923
   2924		refcount_set(&listener->refcnt, 1);
   2925	} else {
   2926		listener->reused_node = 1;
   2927	}
   2928
   2929	listener->cm_id = cm_info->cm_id;
   2930	listener->ipv4 = cm_info->ipv4;
   2931	listener->vlan_id = cm_info->vlan_id;
   2932	atomic_set(&listener->pend_accepts_cnt, 0);
   2933	listener->cm_core = cm_core;
   2934	listener->iwdev = iwdev;
   2935
   2936	listener->backlog = cm_info->backlog;
   2937	listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE;
   2938
   2939	if (!listener->reused_node) {
   2940		spin_lock_irqsave(&cm_core->listen_list_lock, flags);
   2941		list_add(&listener->list, &cm_core->listen_list);
   2942		spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
   2943	}
   2944
   2945	return listener;
   2946}
   2947
   2948/**
   2949 * irdma_create_cm_node - make a connection node with params
   2950 * @cm_core: cm's core
   2951 * @iwdev: iwarp device structure
   2952 * @conn_param: connection parameters
   2953 * @cm_info: quad info for connection
   2954 * @caller_cm_node: pointer to cm_node structure to return
   2955 */
   2956static int irdma_create_cm_node(struct irdma_cm_core *cm_core,
   2957				struct irdma_device *iwdev,
   2958				struct iw_cm_conn_param *conn_param,
   2959				struct irdma_cm_info *cm_info,
   2960				struct irdma_cm_node **caller_cm_node)
   2961{
   2962	struct irdma_cm_node *cm_node;
   2963	u16 private_data_len = conn_param->private_data_len;
   2964	const void *private_data = conn_param->private_data;
   2965
   2966	/* create a CM connection node */
   2967	cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
   2968	if (!cm_node)
   2969		return -ENOMEM;
   2970
   2971	/* set our node side to client (active) side */
   2972	cm_node->tcp_cntxt.client = 1;
   2973	cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
   2974
   2975	irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
   2976
   2977	cm_node->pdata.size = private_data_len;
   2978	cm_node->pdata.addr = cm_node->pdata_buf;
   2979
   2980	memcpy(cm_node->pdata_buf, private_data, private_data_len);
   2981	*caller_cm_node = cm_node;
   2982
   2983	return 0;
   2984}
   2985
   2986/**
   2987 * irdma_cm_reject - reject and teardown a connection
   2988 * @cm_node: connection's node
   2989 * @pdata: ptr to private data for reject
   2990 * @plen: size of private data
   2991 */
   2992static int irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
   2993			   u8 plen)
   2994{
   2995	int ret;
   2996	int passive_state;
   2997
   2998	if (cm_node->tcp_cntxt.client)
   2999		return 0;
   3000
   3001	irdma_cleanup_retrans_entry(cm_node);
   3002
   3003	passive_state = atomic_add_return(1, &cm_node->passive_state);
   3004	if (passive_state == IRDMA_SEND_RESET_EVENT) {
   3005		cm_node->state = IRDMA_CM_STATE_CLOSED;
   3006		irdma_rem_ref_cm_node(cm_node);
   3007		return 0;
   3008	}
   3009
   3010	if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
   3011		irdma_rem_ref_cm_node(cm_node);
   3012		return 0;
   3013	}
   3014
   3015	ret = irdma_send_mpa_reject(cm_node, pdata, plen);
   3016	if (!ret)
   3017		return 0;
   3018
   3019	cm_node->state = IRDMA_CM_STATE_CLOSED;
   3020	if (irdma_send_reset(cm_node))
   3021		ibdev_dbg(&cm_node->iwdev->ibdev,
   3022			  "CM: send reset failed\n");
   3023
   3024	return ret;
   3025}
   3026
   3027/**
   3028 * irdma_cm_close - close of cm connection
   3029 * @cm_node: connection's node
   3030 */
   3031static int irdma_cm_close(struct irdma_cm_node *cm_node)
   3032{
   3033	switch (cm_node->state) {
   3034	case IRDMA_CM_STATE_SYN_RCVD:
   3035	case IRDMA_CM_STATE_SYN_SENT:
   3036	case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
   3037	case IRDMA_CM_STATE_ESTABLISHED:
   3038	case IRDMA_CM_STATE_ACCEPTING:
   3039	case IRDMA_CM_STATE_MPAREQ_SENT:
   3040	case IRDMA_CM_STATE_MPAREQ_RCVD:
   3041		irdma_cleanup_retrans_entry(cm_node);
   3042		irdma_send_reset(cm_node);
   3043		break;
   3044	case IRDMA_CM_STATE_CLOSE_WAIT:
   3045		cm_node->state = IRDMA_CM_STATE_LAST_ACK;
   3046		irdma_send_fin(cm_node);
   3047		break;
   3048	case IRDMA_CM_STATE_FIN_WAIT1:
   3049	case IRDMA_CM_STATE_FIN_WAIT2:
   3050	case IRDMA_CM_STATE_LAST_ACK:
   3051	case IRDMA_CM_STATE_TIME_WAIT:
   3052	case IRDMA_CM_STATE_CLOSING:
   3053		return -EINVAL;
   3054	case IRDMA_CM_STATE_LISTENING:
   3055		irdma_cleanup_retrans_entry(cm_node);
   3056		irdma_send_reset(cm_node);
   3057		break;
   3058	case IRDMA_CM_STATE_MPAREJ_RCVD:
   3059	case IRDMA_CM_STATE_UNKNOWN:
   3060	case IRDMA_CM_STATE_INITED:
   3061	case IRDMA_CM_STATE_CLOSED:
   3062	case IRDMA_CM_STATE_LISTENER_DESTROYED:
   3063		irdma_rem_ref_cm_node(cm_node);
   3064		break;
   3065	case IRDMA_CM_STATE_OFFLOADED:
   3066		if (cm_node->send_entry)
   3067			ibdev_dbg(&cm_node->iwdev->ibdev,
   3068				  "CM: CM send_entry in OFFLOADED state\n");
   3069		irdma_rem_ref_cm_node(cm_node);
   3070		break;
   3071	}
   3072
   3073	return 0;
   3074}
   3075
   3076/**
   3077 * irdma_receive_ilq - recv an ETHERNET packet, and process it
   3078 * through CM
   3079 * @vsi: VSI structure of dev
   3080 * @rbuf: receive buffer
   3081 */
   3082void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
   3083{
   3084	struct irdma_cm_node *cm_node;
   3085	struct irdma_cm_listener *listener;
   3086	struct iphdr *iph;
   3087	struct ipv6hdr *ip6h;
   3088	struct tcphdr *tcph;
   3089	struct irdma_cm_info cm_info = {};
   3090	struct irdma_device *iwdev = vsi->back_vsi;
   3091	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   3092	struct vlan_ethhdr *ethh;
   3093	u16 vtag;
   3094
   3095	/* if vlan, then maclen = 18 else 14 */
   3096	iph = (struct iphdr *)rbuf->iph;
   3097	print_hex_dump_debug("ILQ: RECEIVE ILQ BUFFER", DUMP_PREFIX_OFFSET,
   3098			     16, 8, rbuf->mem.va, rbuf->totallen, false);
   3099	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
   3100		if (rbuf->vlan_valid) {
   3101			vtag = rbuf->vlan_id;
   3102			cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
   3103					   VLAN_PRIO_SHIFT;
   3104			cm_info.vlan_id = vtag & VLAN_VID_MASK;
   3105		} else {
   3106			cm_info.vlan_id = 0xFFFF;
   3107		}
   3108	} else {
   3109		ethh = rbuf->mem.va;
   3110
   3111		if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
   3112			vtag = ntohs(ethh->h_vlan_TCI);
   3113			cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
   3114					   VLAN_PRIO_SHIFT;
   3115			cm_info.vlan_id = vtag & VLAN_VID_MASK;
   3116			ibdev_dbg(&cm_core->iwdev->ibdev,
   3117				  "CM: vlan_id=%d\n", cm_info.vlan_id);
   3118		} else {
   3119			cm_info.vlan_id = 0xFFFF;
   3120		}
   3121	}
   3122	tcph = (struct tcphdr *)rbuf->tcph;
   3123
   3124	if (rbuf->ipv4) {
   3125		cm_info.loc_addr[0] = ntohl(iph->daddr);
   3126		cm_info.rem_addr[0] = ntohl(iph->saddr);
   3127		cm_info.ipv4 = true;
   3128		cm_info.tos = iph->tos;
   3129	} else {
   3130		ip6h = (struct ipv6hdr *)rbuf->iph;
   3131		irdma_copy_ip_ntohl(cm_info.loc_addr,
   3132				    ip6h->daddr.in6_u.u6_addr32);
   3133		irdma_copy_ip_ntohl(cm_info.rem_addr,
   3134				    ip6h->saddr.in6_u.u6_addr32);
   3135		cm_info.ipv4 = false;
   3136		cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
   3137	}
   3138	cm_info.loc_port = ntohs(tcph->dest);
   3139	cm_info.rem_port = ntohs(tcph->source);
   3140	cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr,
   3141				  cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id);
   3142
   3143	if (!cm_node) {
   3144		/* Only type of packet accepted are for the
   3145		 * PASSIVE open (syn only)
   3146		 */
   3147		if (!tcph->syn || tcph->ack)
   3148			return;
   3149
   3150		listener = irdma_find_listener(cm_core,
   3151					       cm_info.loc_addr,
   3152					       cm_info.loc_port,
   3153					       cm_info.vlan_id,
   3154					       IRDMA_CM_LISTENER_ACTIVE_STATE);
   3155		if (!listener) {
   3156			cm_info.cm_id = NULL;
   3157			ibdev_dbg(&cm_core->iwdev->ibdev,
   3158				  "CM: no listener found\n");
   3159			return;
   3160		}
   3161
   3162		cm_info.cm_id = listener->cm_id;
   3163		cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
   3164					     listener);
   3165		if (!cm_node) {
   3166			ibdev_dbg(&cm_core->iwdev->ibdev,
   3167				  "CM: allocate node failed\n");
   3168			refcount_dec(&listener->refcnt);
   3169			return;
   3170		}
   3171
   3172		if (!tcph->rst && !tcph->fin) {
   3173			cm_node->state = IRDMA_CM_STATE_LISTENING;
   3174		} else {
   3175			irdma_rem_ref_cm_node(cm_node);
   3176			return;
   3177		}
   3178
   3179		refcount_inc(&cm_node->refcnt);
   3180	} else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
   3181		irdma_rem_ref_cm_node(cm_node);
   3182		return;
   3183	}
   3184
   3185	irdma_process_pkt(cm_node, rbuf);
   3186	irdma_rem_ref_cm_node(cm_node);
   3187}
   3188
   3189static int irdma_add_qh(struct irdma_cm_node *cm_node, bool active)
   3190{
   3191	if (!active)
   3192		irdma_add_conn_est_qh(cm_node);
   3193	return 0;
   3194}
   3195
   3196static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
   3197{
   3198}
   3199
   3200/**
   3201 * irdma_setup_cm_core - setup top level instance of a cm core
   3202 * @iwdev: iwarp device structure
   3203 * @rdma_ver: HW version
   3204 */
   3205int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
   3206{
   3207	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   3208
   3209	cm_core->iwdev = iwdev;
   3210	cm_core->dev = &iwdev->rf->sc_dev;
   3211
   3212	/* Handles CM event work items send to Iwarp core */
   3213	cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
   3214	if (!cm_core->event_wq)
   3215		return -ENOMEM;
   3216
   3217	INIT_LIST_HEAD(&cm_core->listen_list);
   3218
   3219	timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0);
   3220
   3221	spin_lock_init(&cm_core->ht_lock);
   3222	spin_lock_init(&cm_core->listen_list_lock);
   3223	spin_lock_init(&cm_core->apbvt_lock);
   3224	switch (rdma_ver) {
   3225	case IRDMA_GEN_1:
   3226		cm_core->form_cm_frame = irdma_form_uda_cm_frame;
   3227		cm_core->cm_create_ah = irdma_add_qh;
   3228		cm_core->cm_free_ah = irdma_cm_free_ah_nop;
   3229		break;
   3230	case IRDMA_GEN_2:
   3231	default:
   3232		cm_core->form_cm_frame = irdma_form_ah_cm_frame;
   3233		cm_core->cm_create_ah = irdma_cm_create_ah;
   3234		cm_core->cm_free_ah = irdma_cm_free_ah;
   3235	}
   3236
   3237	return 0;
   3238}
   3239
   3240/**
   3241 * irdma_cleanup_cm_core - deallocate a top level instance of a
   3242 * cm core
   3243 * @cm_core: cm's core
   3244 */
   3245void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
   3246{
   3247	if (!cm_core)
   3248		return;
   3249
   3250	del_timer_sync(&cm_core->tcp_timer);
   3251
   3252	destroy_workqueue(cm_core->event_wq);
   3253	cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
   3254}
   3255
   3256/**
   3257 * irdma_init_tcp_ctx - setup qp context
   3258 * @cm_node: connection's node
   3259 * @tcp_info: offload info for tcp
   3260 * @iwqp: associate qp for the connection
   3261 */
   3262static void irdma_init_tcp_ctx(struct irdma_cm_node *cm_node,
   3263			       struct irdma_tcp_offload_info *tcp_info,
   3264			       struct irdma_qp *iwqp)
   3265{
   3266	tcp_info->ipv4 = cm_node->ipv4;
   3267	tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo;
   3268	tcp_info->wscale = true;
   3269	tcp_info->ignore_tcp_opt = true;
   3270	tcp_info->ignore_tcp_uns_opt = true;
   3271	tcp_info->no_nagle = false;
   3272
   3273	tcp_info->ttl = IRDMA_DEFAULT_TTL;
   3274	tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR;
   3275	tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH;
   3276	tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH;
   3277
   3278	tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
   3279	tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
   3280	tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
   3281
   3282	tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num;
   3283	tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd;
   3284	tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
   3285	tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num;
   3286
   3287	tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num;
   3288	tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss;
   3289	tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt;
   3290	tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num;
   3291	tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd;
   3292	tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd
   3293			    << cm_node->tcp_cntxt.rcv_wscale;
   3294
   3295	tcp_info->flow_label = 0;
   3296	tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss;
   3297	tcp_info->tos = cm_node->tos;
   3298	if (cm_node->vlan_id < VLAN_N_VID) {
   3299		tcp_info->insert_vlan_tag = true;
   3300		tcp_info->vlan_tag = cm_node->vlan_id;
   3301		tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
   3302	}
   3303	if (cm_node->ipv4) {
   3304		tcp_info->src_port = cm_node->loc_port;
   3305		tcp_info->dst_port = cm_node->rem_port;
   3306
   3307		tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0];
   3308		tcp_info->local_ipaddr[3] = cm_node->loc_addr[0];
   3309		tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
   3310							 &tcp_info->dest_ip_addr[3],
   3311							 true, NULL,
   3312							 IRDMA_ARP_RESOLVE);
   3313	} else {
   3314		tcp_info->src_port = cm_node->loc_port;
   3315		tcp_info->dst_port = cm_node->rem_port;
   3316		memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr,
   3317		       sizeof(tcp_info->dest_ip_addr));
   3318		memcpy(tcp_info->local_ipaddr, cm_node->loc_addr,
   3319		       sizeof(tcp_info->local_ipaddr));
   3320
   3321		tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
   3322							 &tcp_info->dest_ip_addr[0],
   3323							 false, NULL,
   3324							 IRDMA_ARP_RESOLVE);
   3325	}
   3326}
   3327
   3328/**
   3329 * irdma_cm_init_tsa_conn - setup qp for RTS
   3330 * @iwqp: associate qp for the connection
   3331 * @cm_node: connection's node
   3332 */
   3333static void irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
   3334				   struct irdma_cm_node *cm_node)
   3335{
   3336	struct irdma_iwarp_offload_info *iwarp_info;
   3337	struct irdma_qp_host_ctx_info *ctx_info;
   3338
   3339	iwarp_info = &iwqp->iwarp_info;
   3340	ctx_info = &iwqp->ctx_info;
   3341
   3342	ctx_info->tcp_info = &iwqp->tcp_info;
   3343	ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
   3344	ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
   3345
   3346	iwarp_info->ord_size = cm_node->ord_size;
   3347	iwarp_info->ird_size = cm_node->ird_size;
   3348	iwarp_info->rd_en = true;
   3349	iwarp_info->rdmap_ver = 1;
   3350	iwarp_info->ddp_ver = 1;
   3351	iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
   3352
   3353	ctx_info->tcp_info_valid = true;
   3354	ctx_info->iwarp_info_valid = true;
   3355	ctx_info->user_pri = cm_node->user_pri;
   3356
   3357	irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp);
   3358	if (cm_node->snd_mark_en) {
   3359		iwarp_info->snd_mark_en = true;
   3360		iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) +
   3361					       cm_node->lsmm_size;
   3362	}
   3363
   3364	cm_node->state = IRDMA_CM_STATE_OFFLOADED;
   3365	iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
   3366	iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
   3367
   3368	if (cm_node->rcv_mark_en) {
   3369		iwarp_info->rcv_mark_en = true;
   3370		iwarp_info->align_hdrs = true;
   3371	}
   3372
   3373	irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
   3374
   3375	/* once tcp_info is set, no need to do it again */
   3376	ctx_info->tcp_info_valid = false;
   3377	ctx_info->iwarp_info_valid = false;
   3378}
   3379
   3380/**
   3381 * irdma_cm_disconn - when a connection is being closed
   3382 * @iwqp: associated qp for the connection
   3383 */
   3384void irdma_cm_disconn(struct irdma_qp *iwqp)
   3385{
   3386	struct irdma_device *iwdev = iwqp->iwdev;
   3387	struct disconn_work *work;
   3388	unsigned long flags;
   3389
   3390	work = kzalloc(sizeof(*work), GFP_ATOMIC);
   3391	if (!work)
   3392		return;
   3393
   3394	spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
   3395	if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
   3396		spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
   3397		ibdev_dbg(&iwdev->ibdev,
   3398			  "CM: qp_id %d is already freed\n",
   3399			  iwqp->ibqp.qp_num);
   3400		kfree(work);
   3401		return;
   3402	}
   3403	irdma_qp_add_ref(&iwqp->ibqp);
   3404	spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
   3405
   3406	work->iwqp = iwqp;
   3407	INIT_WORK(&work->work, irdma_disconnect_worker);
   3408	queue_work(iwdev->cleanup_wq, &work->work);
   3409}
   3410
   3411/**
   3412 * irdma_qp_disconnect - free qp and close cm
   3413 * @iwqp: associate qp for the connection
   3414 */
   3415static void irdma_qp_disconnect(struct irdma_qp *iwqp)
   3416{
   3417	struct irdma_device *iwdev = iwqp->iwdev;
   3418
   3419	iwqp->active_conn = 0;
   3420	/* close the CM node down if it is still active */
   3421	ibdev_dbg(&iwdev->ibdev, "CM: Call close API\n");
   3422	irdma_cm_close(iwqp->cm_node);
   3423}
   3424
   3425/**
   3426 * irdma_cm_disconn_true - called by worker thread to disconnect qp
   3427 * @iwqp: associate qp for the connection
   3428 */
   3429static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
   3430{
   3431	struct iw_cm_id *cm_id;
   3432	struct irdma_device *iwdev;
   3433	struct irdma_sc_qp *qp = &iwqp->sc_qp;
   3434	u16 last_ae;
   3435	u8 original_hw_tcp_state;
   3436	u8 original_ibqp_state;
   3437	int disconn_status = 0;
   3438	int issue_disconn = 0;
   3439	int issue_close = 0;
   3440	int issue_flush = 0;
   3441	unsigned long flags;
   3442	int err;
   3443
   3444	iwdev = iwqp->iwdev;
   3445	spin_lock_irqsave(&iwqp->lock, flags);
   3446	if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
   3447		struct ib_qp_attr attr;
   3448
   3449		if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
   3450			spin_unlock_irqrestore(&iwqp->lock, flags);
   3451			return;
   3452		}
   3453
   3454		spin_unlock_irqrestore(&iwqp->lock, flags);
   3455
   3456		attr.qp_state = IB_QPS_ERR;
   3457		irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
   3458		irdma_ib_qp_event(iwqp, qp->event_type);
   3459		return;
   3460	}
   3461
   3462	cm_id = iwqp->cm_id;
   3463	original_hw_tcp_state = iwqp->hw_tcp_state;
   3464	original_ibqp_state = iwqp->ibqp_state;
   3465	last_ae = iwqp->last_aeq;
   3466
   3467	if (qp->term_flags) {
   3468		issue_disconn = 1;
   3469		issue_close = 1;
   3470		iwqp->cm_id = NULL;
   3471		irdma_terminate_del_timer(qp);
   3472		if (!iwqp->flush_issued) {
   3473			iwqp->flush_issued = 1;
   3474			issue_flush = 1;
   3475		}
   3476	} else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
   3477		   ((original_ibqp_state == IB_QPS_RTS) &&
   3478		    (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
   3479		issue_disconn = 1;
   3480		if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET)
   3481			disconn_status = -ECONNRESET;
   3482	}
   3483
   3484	if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
   3485	    original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
   3486	    last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
   3487	    last_ae == IRDMA_AE_BAD_CLOSE ||
   3488	    last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
   3489		issue_close = 1;
   3490		iwqp->cm_id = NULL;
   3491		qp->term_flags = 0;
   3492		if (!iwqp->flush_issued) {
   3493			iwqp->flush_issued = 1;
   3494			issue_flush = 1;
   3495		}
   3496	}
   3497
   3498	spin_unlock_irqrestore(&iwqp->lock, flags);
   3499	if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
   3500		irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
   3501				 IRDMA_FLUSH_WAIT);
   3502
   3503		if (qp->term_flags)
   3504			irdma_ib_qp_event(iwqp, qp->event_type);
   3505	}
   3506
   3507	if (!cm_id || !cm_id->event_handler)
   3508		return;
   3509
   3510	spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
   3511	if (!iwqp->cm_node) {
   3512		spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
   3513		return;
   3514	}
   3515	refcount_inc(&iwqp->cm_node->refcnt);
   3516
   3517	spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
   3518
   3519	if (issue_disconn) {
   3520		err = irdma_send_cm_event(iwqp->cm_node, cm_id,
   3521					  IW_CM_EVENT_DISCONNECT,
   3522					  disconn_status);
   3523		if (err)
   3524			ibdev_dbg(&iwdev->ibdev,
   3525				  "CM: disconnect event failed: - cm_id = %p\n",
   3526				  cm_id);
   3527	}
   3528	if (issue_close) {
   3529		cm_id->provider_data = iwqp;
   3530		err = irdma_send_cm_event(iwqp->cm_node, cm_id,
   3531					  IW_CM_EVENT_CLOSE, 0);
   3532		if (err)
   3533			ibdev_dbg(&iwdev->ibdev,
   3534				  "CM: close event failed: - cm_id = %p\n",
   3535				  cm_id);
   3536		irdma_qp_disconnect(iwqp);
   3537	}
   3538	irdma_rem_ref_cm_node(iwqp->cm_node);
   3539}
   3540
   3541/**
   3542 * irdma_disconnect_worker - worker for connection close
   3543 * @work: points or disconn structure
   3544 */
   3545static void irdma_disconnect_worker(struct work_struct *work)
   3546{
   3547	struct disconn_work *dwork = container_of(work, struct disconn_work, work);
   3548	struct irdma_qp *iwqp = dwork->iwqp;
   3549
   3550	kfree(dwork);
   3551	irdma_cm_disconn_true(iwqp);
   3552	irdma_qp_rem_ref(&iwqp->ibqp);
   3553}
   3554
   3555/**
   3556 * irdma_free_lsmm_rsrc - free lsmm memory and deregister
   3557 * @iwqp: associate qp for the connection
   3558 */
   3559void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
   3560{
   3561	struct irdma_device *iwdev;
   3562
   3563	iwdev = iwqp->iwdev;
   3564
   3565	if (iwqp->ietf_mem.va) {
   3566		if (iwqp->lsmm_mr)
   3567			iwdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, NULL);
   3568		dma_free_coherent(iwdev->rf->sc_dev.hw->device,
   3569				  iwqp->ietf_mem.size, iwqp->ietf_mem.va,
   3570				  iwqp->ietf_mem.pa);
   3571		iwqp->ietf_mem.va = NULL;
   3572		iwqp->ietf_mem.va = NULL;
   3573	}
   3574}
   3575
   3576/**
   3577 * irdma_accept - registered call for connection to be accepted
   3578 * @cm_id: cm information for passive connection
   3579 * @conn_param: accpet parameters
   3580 */
   3581int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
   3582{
   3583	struct ib_qp *ibqp;
   3584	struct irdma_qp *iwqp;
   3585	struct irdma_device *iwdev;
   3586	struct irdma_sc_dev *dev;
   3587	struct irdma_cm_node *cm_node;
   3588	struct ib_qp_attr attr = {};
   3589	int passive_state;
   3590	struct ib_mr *ibmr;
   3591	struct irdma_pd *iwpd;
   3592	u16 buf_len = 0;
   3593	struct irdma_kmem_info accept;
   3594	u64 tagged_offset;
   3595	int wait_ret;
   3596	int ret = 0;
   3597
   3598	ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
   3599	if (!ibqp)
   3600		return -EINVAL;
   3601
   3602	iwqp = to_iwqp(ibqp);
   3603	iwdev = iwqp->iwdev;
   3604	dev = &iwdev->rf->sc_dev;
   3605	cm_node = cm_id->provider_data;
   3606
   3607	if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
   3608		cm_node->ipv4 = true;
   3609		cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
   3610	} else {
   3611		cm_node->ipv4 = false;
   3612		irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
   3613				       NULL);
   3614	}
   3615	ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
   3616		  cm_node->vlan_id);
   3617
   3618	trace_irdma_accept(cm_node, 0, NULL);
   3619
   3620	if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
   3621		ret = -EINVAL;
   3622		goto error;
   3623	}
   3624
   3625	passive_state = atomic_add_return(1, &cm_node->passive_state);
   3626	if (passive_state == IRDMA_SEND_RESET_EVENT) {
   3627		ret = -ECONNRESET;
   3628		goto error;
   3629	}
   3630
   3631	buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE;
   3632	iwqp->ietf_mem.size = ALIGN(buf_len, 1);
   3633	iwqp->ietf_mem.va = dma_alloc_coherent(dev->hw->device,
   3634					       iwqp->ietf_mem.size,
   3635					       &iwqp->ietf_mem.pa, GFP_KERNEL);
   3636	if (!iwqp->ietf_mem.va) {
   3637		ret = -ENOMEM;
   3638		goto error;
   3639	}
   3640
   3641	cm_node->pdata.size = conn_param->private_data_len;
   3642	accept.addr = iwqp->ietf_mem.va;
   3643	accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
   3644	memcpy((u8 *)accept.addr + accept.size, conn_param->private_data,
   3645	       conn_param->private_data_len);
   3646
   3647	if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) {
   3648		ret = -ENOMEM;
   3649		goto error;
   3650	}
   3651	iwqp->sc_qp.user_pri = cm_node->user_pri;
   3652	irdma_qp_add_qos(&iwqp->sc_qp);
   3653	/* setup our first outgoing iWarp send WQE (the IETF frame response) */
   3654	iwpd = iwqp->iwpd;
   3655	tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
   3656	ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
   3657				 IB_ACCESS_LOCAL_WRITE, &tagged_offset);
   3658	if (IS_ERR(ibmr)) {
   3659		ret = -ENOMEM;
   3660		goto error;
   3661	}
   3662
   3663	ibmr->pd = &iwpd->ibpd;
   3664	ibmr->device = iwpd->ibpd.device;
   3665	iwqp->lsmm_mr = ibmr;
   3666	if (iwqp->page)
   3667		iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
   3668
   3669	cm_node->lsmm_size = accept.size + conn_param->private_data_len;
   3670	irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size,
   3671			   ibmr->lkey);
   3672
   3673	if (iwqp->page)
   3674		kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
   3675
   3676	iwqp->cm_id = cm_id;
   3677	cm_node->cm_id = cm_id;
   3678
   3679	cm_id->provider_data = iwqp;
   3680	iwqp->active_conn = 0;
   3681	iwqp->cm_node = cm_node;
   3682	cm_node->iwqp = iwqp;
   3683	irdma_cm_init_tsa_conn(iwqp, cm_node);
   3684	irdma_qp_add_ref(&iwqp->ibqp);
   3685	cm_id->add_ref(cm_id);
   3686
   3687	attr.qp_state = IB_QPS_RTS;
   3688	cm_node->qhash_set = false;
   3689	cm_node->cm_core->cm_free_ah(cm_node);
   3690
   3691	irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
   3692	if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
   3693		wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
   3694							    iwqp->rts_ae_rcvd,
   3695							    IRDMA_MAX_TIMEOUT);
   3696		if (!wait_ret) {
   3697			ibdev_dbg(&iwdev->ibdev,
   3698				  "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
   3699				  cm_node, cm_node->loc_port,
   3700				  cm_node->rem_port, cm_node->cm_id);
   3701			ret = -ECONNRESET;
   3702			goto error;
   3703		}
   3704	}
   3705
   3706	irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
   3707	cm_node->accelerated = true;
   3708	complete(&cm_node->establish_comp);
   3709
   3710	if (cm_node->accept_pend) {
   3711		atomic_dec(&cm_node->listener->pend_accepts_cnt);
   3712		cm_node->accept_pend = 0;
   3713	}
   3714
   3715	ibdev_dbg(&iwdev->ibdev,
   3716		  "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
   3717		  cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
   3718		  cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
   3719	cm_node->cm_core->stats_accepts++;
   3720
   3721	return 0;
   3722error:
   3723	irdma_free_lsmm_rsrc(iwqp);
   3724	irdma_rem_ref_cm_node(cm_node);
   3725
   3726	return ret;
   3727}
   3728
   3729/**
   3730 * irdma_reject - registered call for connection to be rejected
   3731 * @cm_id: cm information for passive connection
   3732 * @pdata: private data to be sent
   3733 * @pdata_len: private data length
   3734 */
   3735int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
   3736{
   3737	struct irdma_device *iwdev;
   3738	struct irdma_cm_node *cm_node;
   3739
   3740	cm_node = cm_id->provider_data;
   3741	cm_node->pdata.size = pdata_len;
   3742
   3743	trace_irdma_reject(cm_node, 0, NULL);
   3744
   3745	iwdev = to_iwdev(cm_id->device);
   3746	if (!iwdev)
   3747		return -EINVAL;
   3748
   3749	cm_node->cm_core->stats_rejects++;
   3750
   3751	if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF)
   3752		return -EINVAL;
   3753
   3754	return irdma_cm_reject(cm_node, pdata, pdata_len);
   3755}
   3756
   3757/**
   3758 * irdma_connect - registered call for connection to be established
   3759 * @cm_id: cm information for passive connection
   3760 * @conn_param: Information about the connection
   3761 */
   3762int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
   3763{
   3764	struct ib_qp *ibqp;
   3765	struct irdma_qp *iwqp;
   3766	struct irdma_device *iwdev;
   3767	struct irdma_cm_node *cm_node;
   3768	struct irdma_cm_info cm_info;
   3769	struct sockaddr_in *laddr;
   3770	struct sockaddr_in *raddr;
   3771	struct sockaddr_in6 *laddr6;
   3772	struct sockaddr_in6 *raddr6;
   3773	int ret = 0;
   3774
   3775	ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
   3776	if (!ibqp)
   3777		return -EINVAL;
   3778	iwqp = to_iwqp(ibqp);
   3779	if (!iwqp)
   3780		return -EINVAL;
   3781	iwdev = iwqp->iwdev;
   3782	if (!iwdev)
   3783		return -EINVAL;
   3784
   3785	laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
   3786	raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
   3787	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
   3788	raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
   3789
   3790	if (!(laddr->sin_port) || !(raddr->sin_port))
   3791		return -EINVAL;
   3792
   3793	iwqp->active_conn = 1;
   3794	iwqp->cm_id = NULL;
   3795	cm_id->provider_data = iwqp;
   3796
   3797	/* set up the connection params for the node */
   3798	if (cm_id->remote_addr.ss_family == AF_INET) {
   3799		if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
   3800			return -EINVAL;
   3801
   3802		cm_info.ipv4 = true;
   3803		memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
   3804		memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
   3805		cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
   3806		cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
   3807		cm_info.loc_port = ntohs(laddr->sin_port);
   3808		cm_info.rem_port = ntohs(raddr->sin_port);
   3809		cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
   3810	} else {
   3811		if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
   3812			return -EINVAL;
   3813
   3814		cm_info.ipv4 = false;
   3815		irdma_copy_ip_ntohl(cm_info.loc_addr,
   3816				    laddr6->sin6_addr.in6_u.u6_addr32);
   3817		irdma_copy_ip_ntohl(cm_info.rem_addr,
   3818				    raddr6->sin6_addr.in6_u.u6_addr32);
   3819		cm_info.loc_port = ntohs(laddr6->sin6_port);
   3820		cm_info.rem_port = ntohs(raddr6->sin6_port);
   3821		irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
   3822				       NULL);
   3823	}
   3824	cm_info.cm_id = cm_id;
   3825	cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
   3826	cm_info.tos = cm_id->tos;
   3827	if (iwdev->vsi.dscp_mode)
   3828		cm_info.user_pri =
   3829			iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
   3830	else
   3831		cm_info.user_pri = rt_tos2priority(cm_id->tos);
   3832
   3833	if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
   3834		return -ENOMEM;
   3835	iwqp->sc_qp.user_pri = cm_info.user_pri;
   3836	irdma_qp_add_qos(&iwqp->sc_qp);
   3837	ibdev_dbg(&iwdev->ibdev, "DCB: TOS:[%d] UP:[%d]\n", cm_id->tos,
   3838		  cm_info.user_pri);
   3839
   3840	trace_irdma_dcb_tos(iwdev, cm_id->tos, cm_info.user_pri);
   3841
   3842	ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
   3843				   &cm_node);
   3844	if (ret)
   3845		return ret;
   3846	ret = cm_node->cm_core->cm_create_ah(cm_node, true);
   3847	if (ret)
   3848		goto err;
   3849	if (irdma_manage_qhash(iwdev, &cm_info,
   3850			       IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
   3851			       IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
   3852		ret = -EINVAL;
   3853		goto err;
   3854	}
   3855	cm_node->qhash_set = true;
   3856
   3857	cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port);
   3858	if (!cm_node->apbvt_entry) {
   3859		ret = -EINVAL;
   3860		goto err;
   3861	}
   3862
   3863	cm_node->apbvt_set = true;
   3864	iwqp->cm_node = cm_node;
   3865	cm_node->iwqp = iwqp;
   3866	iwqp->cm_id = cm_id;
   3867	irdma_qp_add_ref(&iwqp->ibqp);
   3868	cm_id->add_ref(cm_id);
   3869
   3870	if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
   3871		cm_node->state = IRDMA_CM_STATE_SYN_SENT;
   3872		ret = irdma_send_syn(cm_node, 0);
   3873		if (ret)
   3874			goto err;
   3875	}
   3876
   3877	ibdev_dbg(&iwdev->ibdev,
   3878		  "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
   3879		  cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
   3880		  cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
   3881
   3882	trace_irdma_connect(cm_node, 0, NULL);
   3883
   3884	return 0;
   3885
   3886err:
   3887	if (cm_info.ipv4)
   3888		ibdev_dbg(&iwdev->ibdev,
   3889			  "CM: connect() FAILED: dest addr=%pI4",
   3890			  cm_info.rem_addr);
   3891	else
   3892		ibdev_dbg(&iwdev->ibdev,
   3893			  "CM: connect() FAILED: dest addr=%pI6",
   3894			  cm_info.rem_addr);
   3895	irdma_rem_ref_cm_node(cm_node);
   3896	iwdev->cm_core.stats_connect_errs++;
   3897
   3898	return ret;
   3899}
   3900
   3901/**
   3902 * irdma_create_listen - registered call creating listener
   3903 * @cm_id: cm information for passive connection
   3904 * @backlog: to max accept pending count
   3905 */
   3906int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
   3907{
   3908	struct irdma_device *iwdev;
   3909	struct irdma_cm_listener *cm_listen_node;
   3910	struct irdma_cm_info cm_info = {};
   3911	struct sockaddr_in *laddr;
   3912	struct sockaddr_in6 *laddr6;
   3913	bool wildcard = false;
   3914	int err;
   3915
   3916	iwdev = to_iwdev(cm_id->device);
   3917	if (!iwdev)
   3918		return -EINVAL;
   3919
   3920	laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
   3921	laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
   3922	cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
   3923
   3924	if (laddr->sin_family == AF_INET) {
   3925		if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
   3926			return -EINVAL;
   3927
   3928		cm_info.ipv4 = true;
   3929		cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
   3930		cm_info.loc_port = ntohs(laddr->sin_port);
   3931
   3932		if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) {
   3933			cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
   3934		} else {
   3935			cm_info.vlan_id = 0xFFFF;
   3936			wildcard = true;
   3937		}
   3938	} else {
   3939		if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
   3940			return -EINVAL;
   3941
   3942		cm_info.ipv4 = false;
   3943		irdma_copy_ip_ntohl(cm_info.loc_addr,
   3944				    laddr6->sin6_addr.in6_u.u6_addr32);
   3945		cm_info.loc_port = ntohs(laddr6->sin6_port);
   3946		if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
   3947			irdma_netdev_vlan_ipv6(cm_info.loc_addr,
   3948					       &cm_info.vlan_id, NULL);
   3949		} else {
   3950			cm_info.vlan_id = 0xFFFF;
   3951			wildcard = true;
   3952		}
   3953	}
   3954
   3955	if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
   3956		cm_info.vlan_id = 0;
   3957	cm_info.backlog = backlog;
   3958	cm_info.cm_id = cm_id;
   3959
   3960	trace_irdma_create_listen(iwdev, &cm_info);
   3961
   3962	cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
   3963						&cm_info);
   3964	if (!cm_listen_node) {
   3965		ibdev_dbg(&iwdev->ibdev,
   3966			  "CM: cm_listen_node == NULL\n");
   3967		return -ENOMEM;
   3968	}
   3969
   3970	cm_id->provider_data = cm_listen_node;
   3971
   3972	cm_listen_node->tos = cm_id->tos;
   3973	if (iwdev->vsi.dscp_mode)
   3974		cm_listen_node->user_pri =
   3975			iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
   3976	else
   3977		cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
   3978	cm_info.user_pri = cm_listen_node->user_pri;
   3979	if (!cm_listen_node->reused_node) {
   3980		if (wildcard) {
   3981			err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node);
   3982			if (err)
   3983				goto error;
   3984		} else {
   3985			err = irdma_manage_qhash(iwdev, &cm_info,
   3986						 IRDMA_QHASH_TYPE_TCP_SYN,
   3987						 IRDMA_QHASH_MANAGE_TYPE_ADD,
   3988						 NULL, true);
   3989			if (err)
   3990				goto error;
   3991
   3992			cm_listen_node->qhash_set = true;
   3993		}
   3994
   3995		cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev,
   3996							      cm_info.loc_port);
   3997		if (!cm_listen_node->apbvt_entry)
   3998			goto error;
   3999	}
   4000	cm_id->add_ref(cm_id);
   4001	cm_listen_node->cm_core->stats_listen_created++;
   4002	ibdev_dbg(&iwdev->ibdev,
   4003		  "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
   4004		  cm_listen_node->loc_port, cm_listen_node->loc_addr,
   4005		  cm_listen_node, cm_listen_node->cm_id,
   4006		  cm_listen_node->qhash_set, cm_listen_node->vlan_id);
   4007
   4008	return 0;
   4009
   4010error:
   4011
   4012	irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false);
   4013
   4014	return -EINVAL;
   4015}
   4016
   4017/**
   4018 * irdma_destroy_listen - registered call to destroy listener
   4019 * @cm_id: cm information for passive connection
   4020 */
   4021int irdma_destroy_listen(struct iw_cm_id *cm_id)
   4022{
   4023	struct irdma_device *iwdev;
   4024
   4025	iwdev = to_iwdev(cm_id->device);
   4026	if (cm_id->provider_data)
   4027		irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
   4028				    true);
   4029	else
   4030		ibdev_dbg(&iwdev->ibdev,
   4031			  "CM: cm_id->provider_data was NULL\n");
   4032
   4033	cm_id->rem_ref(cm_id);
   4034
   4035	return 0;
   4036}
   4037
   4038/**
   4039 * irdma_teardown_list_prep - add conn nodes slated for tear down to list
   4040 * @cm_core: cm's core
   4041 * @teardown_list: a list to which cm_node will be selected
   4042 * @ipaddr: pointer to ip address
   4043 * @nfo: pointer to cm_info structure instance
   4044 * @disconnect_all: flag indicating disconnect all QPs
   4045 */
   4046static void irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
   4047				     struct list_head *teardown_list,
   4048				     u32 *ipaddr,
   4049				     struct irdma_cm_info *nfo,
   4050				     bool disconnect_all)
   4051{
   4052	struct irdma_cm_node *cm_node;
   4053	int bkt;
   4054
   4055	hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
   4056		if ((disconnect_all ||
   4057		     (nfo->vlan_id == cm_node->vlan_id &&
   4058		      !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
   4059		    refcount_inc_not_zero(&cm_node->refcnt))
   4060			list_add(&cm_node->teardown_entry, teardown_list);
   4061	}
   4062}
   4063
   4064/**
   4065 * irdma_cm_event_connected - handle connected active node
   4066 * @event: the info for cm_node of connection
   4067 */
   4068static void irdma_cm_event_connected(struct irdma_cm_event *event)
   4069{
   4070	struct irdma_qp *iwqp;
   4071	struct irdma_device *iwdev;
   4072	struct irdma_cm_node *cm_node;
   4073	struct irdma_sc_dev *dev;
   4074	struct ib_qp_attr attr = {};
   4075	struct iw_cm_id *cm_id;
   4076	int status;
   4077	bool read0;
   4078	int wait_ret = 0;
   4079
   4080	cm_node = event->cm_node;
   4081	cm_id = cm_node->cm_id;
   4082	iwqp = cm_id->provider_data;
   4083	iwdev = iwqp->iwdev;
   4084	dev = &iwdev->rf->sc_dev;
   4085	if (iwqp->sc_qp.qp_uk.destroy_pending) {
   4086		status = -ETIMEDOUT;
   4087		goto error;
   4088	}
   4089
   4090	irdma_cm_init_tsa_conn(iwqp, cm_node);
   4091	read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
   4092	if (iwqp->page)
   4093		iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
   4094	irdma_sc_send_rtt(&iwqp->sc_qp, read0);
   4095	if (iwqp->page)
   4096		kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
   4097
   4098	attr.qp_state = IB_QPS_RTS;
   4099	cm_node->qhash_set = false;
   4100	irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
   4101	if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
   4102		wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
   4103							    iwqp->rts_ae_rcvd,
   4104							    IRDMA_MAX_TIMEOUT);
   4105		if (!wait_ret)
   4106			ibdev_dbg(&iwdev->ibdev,
   4107				  "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
   4108				  cm_node, cm_node->loc_port,
   4109				  cm_node->rem_port, cm_node->cm_id);
   4110	}
   4111
   4112	irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
   4113	cm_node->accelerated = true;
   4114	complete(&cm_node->establish_comp);
   4115	cm_node->cm_core->cm_free_ah(cm_node);
   4116	return;
   4117
   4118error:
   4119	iwqp->cm_id = NULL;
   4120	cm_id->provider_data = NULL;
   4121	irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
   4122			    status);
   4123	irdma_rem_ref_cm_node(event->cm_node);
   4124}
   4125
   4126/**
   4127 * irdma_cm_event_reset - handle reset
   4128 * @event: the info for cm_node of connection
   4129 */
   4130static void irdma_cm_event_reset(struct irdma_cm_event *event)
   4131{
   4132	struct irdma_cm_node *cm_node = event->cm_node;
   4133	struct iw_cm_id *cm_id = cm_node->cm_id;
   4134	struct irdma_qp *iwqp;
   4135
   4136	if (!cm_id)
   4137		return;
   4138
   4139	iwqp = cm_id->provider_data;
   4140	if (!iwqp)
   4141		return;
   4142
   4143	ibdev_dbg(&cm_node->iwdev->ibdev,
   4144		  "CM: reset event %p - cm_id = %p\n", event->cm_node, cm_id);
   4145	iwqp->cm_id = NULL;
   4146
   4147	irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
   4148			    -ECONNRESET);
   4149	irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
   4150}
   4151
   4152/**
   4153 * irdma_cm_event_handler - send event to cm upper layer
   4154 * @work: pointer of cm event info.
   4155 */
   4156static void irdma_cm_event_handler(struct work_struct *work)
   4157{
   4158	struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
   4159	struct irdma_cm_node *cm_node;
   4160
   4161	if (!event || !event->cm_node || !event->cm_node->cm_core)
   4162		return;
   4163
   4164	cm_node = event->cm_node;
   4165	trace_irdma_cm_event_handler(cm_node, event->type, NULL);
   4166
   4167	switch (event->type) {
   4168	case IRDMA_CM_EVENT_MPA_REQ:
   4169		irdma_send_cm_event(cm_node, cm_node->cm_id,
   4170				    IW_CM_EVENT_CONNECT_REQUEST, 0);
   4171		break;
   4172	case IRDMA_CM_EVENT_RESET:
   4173		irdma_cm_event_reset(event);
   4174		break;
   4175	case IRDMA_CM_EVENT_CONNECTED:
   4176		if (!event->cm_node->cm_id ||
   4177		    event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
   4178			break;
   4179		irdma_cm_event_connected(event);
   4180		break;
   4181	case IRDMA_CM_EVENT_MPA_REJECT:
   4182		if (!event->cm_node->cm_id ||
   4183		    cm_node->state == IRDMA_CM_STATE_OFFLOADED)
   4184			break;
   4185		irdma_send_cm_event(cm_node, cm_node->cm_id,
   4186				    IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
   4187		break;
   4188	case IRDMA_CM_EVENT_ABORTED:
   4189		if (!event->cm_node->cm_id ||
   4190		    event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
   4191			break;
   4192		irdma_event_connect_error(event);
   4193		break;
   4194	default:
   4195		ibdev_dbg(&cm_node->iwdev->ibdev,
   4196			  "CM: bad event type = %d\n", event->type);
   4197		break;
   4198	}
   4199
   4200	irdma_rem_ref_cm_node(event->cm_node);
   4201	kfree(event);
   4202}
   4203
   4204/**
   4205 * irdma_cm_post_event - queue event request for worker thread
   4206 * @event: cm node's info for up event call
   4207 */
   4208static void irdma_cm_post_event(struct irdma_cm_event *event)
   4209{
   4210	refcount_inc(&event->cm_node->refcnt);
   4211	INIT_WORK(&event->event_work, irdma_cm_event_handler);
   4212	queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
   4213}
   4214
   4215/**
   4216 * irdma_cm_teardown_connections - teardown QPs
   4217 * @iwdev: device pointer
   4218 * @ipaddr: Pointer to IPv4 or IPv6 address
   4219 * @nfo: Connection info
   4220 * @disconnect_all: flag indicating disconnect all QPs
   4221 *
   4222 * teardown QPs where source or destination addr matches ip addr
   4223 */
   4224void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
   4225				   struct irdma_cm_info *nfo,
   4226				   bool disconnect_all)
   4227{
   4228	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   4229	struct list_head *list_core_temp;
   4230	struct list_head *list_node;
   4231	struct irdma_cm_node *cm_node;
   4232	struct list_head teardown_list;
   4233	struct ib_qp_attr attr;
   4234	struct irdma_sc_vsi *vsi = &iwdev->vsi;
   4235	struct irdma_sc_qp *sc_qp;
   4236	struct irdma_qp *qp;
   4237	int i;
   4238
   4239	INIT_LIST_HEAD(&teardown_list);
   4240
   4241	rcu_read_lock();
   4242	irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
   4243	rcu_read_unlock();
   4244
   4245	list_for_each_safe (list_node, list_core_temp, &teardown_list) {
   4246		cm_node = container_of(list_node, struct irdma_cm_node,
   4247				       teardown_entry);
   4248		attr.qp_state = IB_QPS_ERR;
   4249		irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
   4250		if (iwdev->rf->reset)
   4251			irdma_cm_disconn(cm_node->iwqp);
   4252		irdma_rem_ref_cm_node(cm_node);
   4253	}
   4254	if (!iwdev->roce_mode)
   4255		return;
   4256
   4257	INIT_LIST_HEAD(&teardown_list);
   4258	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
   4259		mutex_lock(&vsi->qos[i].qos_mutex);
   4260		list_for_each_safe (list_node, list_core_temp,
   4261				    &vsi->qos[i].qplist) {
   4262			u32 qp_ip[4];
   4263
   4264			sc_qp = container_of(list_node, struct irdma_sc_qp,
   4265					     list);
   4266			if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
   4267				continue;
   4268
   4269			qp = sc_qp->qp_uk.back_qp;
   4270			if (!disconnect_all) {
   4271				if (nfo->ipv4)
   4272					qp_ip[0] = qp->udp_info.local_ipaddr[3];
   4273				else
   4274					memcpy(qp_ip,
   4275					       &qp->udp_info.local_ipaddr[0],
   4276					       sizeof(qp_ip));
   4277			}
   4278
   4279			if (disconnect_all ||
   4280			    (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
   4281			     !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
   4282				spin_lock(&iwdev->rf->qptable_lock);
   4283				if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
   4284					irdma_qp_add_ref(&qp->ibqp);
   4285					list_add(&qp->teardown_entry,
   4286						 &teardown_list);
   4287				}
   4288				spin_unlock(&iwdev->rf->qptable_lock);
   4289			}
   4290		}
   4291		mutex_unlock(&vsi->qos[i].qos_mutex);
   4292	}
   4293
   4294	list_for_each_safe (list_node, list_core_temp, &teardown_list) {
   4295		qp = container_of(list_node, struct irdma_qp, teardown_entry);
   4296		attr.qp_state = IB_QPS_ERR;
   4297		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
   4298		irdma_qp_rem_ref(&qp->ibqp);
   4299	}
   4300}
   4301
   4302/**
   4303 * irdma_qhash_ctrl - enable/disable qhash for list
   4304 * @iwdev: device pointer
   4305 * @parent_listen_node: parent listen node
   4306 * @nfo: cm info node
   4307 * @ipaddr: Pointer to IPv4 or IPv6 address
   4308 * @ipv4: flag indicating IPv4 when true
   4309 * @ifup: flag indicating interface up when true
   4310 *
   4311 * Enables or disables the qhash for the node in the child
   4312 * listen list that matches ipaddr. If no matching IP was found
   4313 * it will allocate and add a new child listen node to the
   4314 * parent listen node. The listen_list_lock is assumed to be
   4315 * held when called.
   4316 */
   4317static void irdma_qhash_ctrl(struct irdma_device *iwdev,
   4318			     struct irdma_cm_listener *parent_listen_node,
   4319			     struct irdma_cm_info *nfo, u32 *ipaddr, bool ipv4,
   4320			     bool ifup)
   4321{
   4322	struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
   4323	struct irdma_cm_listener *child_listen_node;
   4324	struct list_head *pos, *tpos;
   4325	bool node_allocated = false;
   4326	enum irdma_quad_hash_manage_type op = ifup ?
   4327					      IRDMA_QHASH_MANAGE_TYPE_ADD :
   4328					      IRDMA_QHASH_MANAGE_TYPE_DELETE;
   4329	int err;
   4330
   4331	list_for_each_safe (pos, tpos, child_listen_list) {
   4332		child_listen_node = list_entry(pos, struct irdma_cm_listener,
   4333					       child_listen_list);
   4334		if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
   4335			goto set_qhash;
   4336	}
   4337
   4338	/* if not found then add a child listener if interface is going up */
   4339	if (!ifup)
   4340		return;
   4341	child_listen_node = kmemdup(parent_listen_node,
   4342				    sizeof(*child_listen_node), GFP_ATOMIC);
   4343	if (!child_listen_node)
   4344		return;
   4345
   4346	node_allocated = true;
   4347	memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
   4348
   4349set_qhash:
   4350	memcpy(nfo->loc_addr, child_listen_node->loc_addr,
   4351	       sizeof(nfo->loc_addr));
   4352	nfo->vlan_id = child_listen_node->vlan_id;
   4353	err = irdma_manage_qhash(iwdev, nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL,
   4354				 false);
   4355	if (!err) {
   4356		child_listen_node->qhash_set = ifup;
   4357		if (node_allocated)
   4358			list_add(&child_listen_node->child_listen_list,
   4359				 &parent_listen_node->child_listen_list);
   4360	} else if (node_allocated) {
   4361		kfree(child_listen_node);
   4362	}
   4363}
   4364
   4365/**
   4366 * irdma_if_notify - process an ifdown on an interface
   4367 * @iwdev: device pointer
   4368 * @netdev: network device structure
   4369 * @ipaddr: Pointer to IPv4 or IPv6 address
   4370 * @ipv4: flag indicating IPv4 when true
   4371 * @ifup: flag indicating interface up when true
   4372 */
   4373void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
   4374		     u32 *ipaddr, bool ipv4, bool ifup)
   4375{
   4376	struct irdma_cm_core *cm_core = &iwdev->cm_core;
   4377	unsigned long flags;
   4378	struct irdma_cm_listener *listen_node;
   4379	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
   4380	struct irdma_cm_info nfo = {};
   4381	u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
   4382	enum irdma_quad_hash_manage_type op = ifup ?
   4383					      IRDMA_QHASH_MANAGE_TYPE_ADD :
   4384					      IRDMA_QHASH_MANAGE_TYPE_DELETE;
   4385
   4386	nfo.vlan_id = vlan_id;
   4387	nfo.ipv4 = ipv4;
   4388	nfo.qh_qpid = 1;
   4389
   4390	/* Disable or enable qhash for listeners */
   4391	spin_lock_irqsave(&cm_core->listen_list_lock, flags);
   4392	list_for_each_entry (listen_node, &cm_core->listen_list, list) {
   4393		if (vlan_id != listen_node->vlan_id ||
   4394		    (memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) &&
   4395		     memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16)))
   4396			continue;
   4397
   4398		memcpy(nfo.loc_addr, listen_node->loc_addr,
   4399		       sizeof(nfo.loc_addr));
   4400		nfo.loc_port = listen_node->loc_port;
   4401		nfo.user_pri = listen_node->user_pri;
   4402		if (!list_empty(&listen_node->child_listen_list)) {
   4403			irdma_qhash_ctrl(iwdev, listen_node, &nfo, ipaddr, ipv4,
   4404					 ifup);
   4405		} else if (memcmp(listen_node->loc_addr, ip_zero,
   4406				  ipv4 ? 4 : 16)) {
   4407			if (!irdma_manage_qhash(iwdev, &nfo,
   4408						IRDMA_QHASH_TYPE_TCP_SYN, op,
   4409						NULL, false))
   4410				listen_node->qhash_set = ifup;
   4411		}
   4412	}
   4413	spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
   4414
   4415	/* disconnect any connected qp's on ifdown */
   4416	if (!ifup)
   4417		irdma_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
   4418}