cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

qlcnic_io.c (61282B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * QLogic qlcnic NIC Driver
      4 * Copyright (c) 2009-2013 QLogic Corporation
      5 */
      6
      7#include <linux/netdevice.h>
      8#include <linux/if_vlan.h>
      9#include <net/ip.h>
     10#include <linux/ipv6.h>
     11#include <net/checksum.h>
     12#include <linux/printk.h>
     13#include <linux/jiffies.h>
     14
     15#include "qlcnic.h"
     16
     17#define QLCNIC_TX_ETHER_PKT		0x01
     18#define QLCNIC_TX_TCP_PKT		0x02
     19#define QLCNIC_TX_UDP_PKT		0x03
     20#define QLCNIC_TX_IP_PKT		0x04
     21#define QLCNIC_TX_TCP_LSO		0x05
     22#define QLCNIC_TX_TCP_LSO6		0x06
     23#define QLCNIC_TX_ENCAP_PKT		0x07
     24#define QLCNIC_TX_ENCAP_LSO		0x08
     25#define QLCNIC_TX_TCPV6_PKT		0x0b
     26#define QLCNIC_TX_UDPV6_PKT		0x0c
     27
     28#define QLCNIC_FLAGS_VLAN_TAGGED	0x10
     29#define QLCNIC_FLAGS_VLAN_OOB		0x40
     30
     31#define qlcnic_set_tx_vlan_tci(cmd_desc, v)	\
     32	(cmd_desc)->vlan_TCI = cpu_to_le16(v);
     33#define qlcnic_set_cmd_desc_port(cmd_desc, var)	\
     34	((cmd_desc)->port_ctxid |= ((var) & 0x0F))
     35#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var)	\
     36	((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
     37
     38#define qlcnic_set_tx_port(_desc, _port) \
     39	((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
     40
     41#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
     42	((_desc)->flags_opcode |= \
     43	cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
     44
     45#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
     46	((_desc)->nfrags__length = \
     47	cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
     48
     49/* owner bits of status_desc */
     50#define STATUS_OWNER_HOST	(0x1ULL << 56)
     51#define STATUS_OWNER_PHANTOM	(0x2ULL << 56)
     52
     53/* Status descriptor:
     54   0-3 port, 4-7 status, 8-11 type, 12-27 total_length
     55   28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
     56   53-55 desc_cnt, 56-57 owner, 58-63 opcode
     57 */
     58#define qlcnic_get_sts_port(sts_data)	\
     59	((sts_data) & 0x0F)
     60#define qlcnic_get_sts_status(sts_data)	\
     61	(((sts_data) >> 4) & 0x0F)
     62#define qlcnic_get_sts_type(sts_data)	\
     63	(((sts_data) >> 8) & 0x0F)
     64#define qlcnic_get_sts_totallength(sts_data)	\
     65	(((sts_data) >> 12) & 0xFFFF)
     66#define qlcnic_get_sts_refhandle(sts_data)	\
     67	(((sts_data) >> 28) & 0xFFFF)
     68#define qlcnic_get_sts_prot(sts_data)	\
     69	(((sts_data) >> 44) & 0x0F)
     70#define qlcnic_get_sts_pkt_offset(sts_data)	\
     71	(((sts_data) >> 48) & 0x1F)
     72#define qlcnic_get_sts_desc_cnt(sts_data)	\
     73	(((sts_data) >> 53) & 0x7)
     74#define qlcnic_get_sts_opcode(sts_data)	\
     75	(((sts_data) >> 58) & 0x03F)
     76
     77#define qlcnic_get_lro_sts_refhandle(sts_data) 	\
     78	((sts_data) & 0x07FFF)
     79#define qlcnic_get_lro_sts_length(sts_data)	\
     80	(((sts_data) >> 16) & 0x0FFFF)
     81#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data)	\
     82	(((sts_data) >> 32) & 0x0FF)
     83#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data)	\
     84	(((sts_data) >> 40) & 0x0FF)
     85#define qlcnic_get_lro_sts_timestamp(sts_data)	\
     86	(((sts_data) >> 48) & 0x1)
     87#define qlcnic_get_lro_sts_type(sts_data)	\
     88	(((sts_data) >> 49) & 0x7)
     89#define qlcnic_get_lro_sts_push_flag(sts_data)		\
     90	(((sts_data) >> 52) & 0x1)
     91#define qlcnic_get_lro_sts_seq_number(sts_data)		\
     92	((sts_data) & 0x0FFFFFFFF)
     93#define qlcnic_get_lro_sts_mss(sts_data1)		\
     94	((sts_data1 >> 32) & 0x0FFFF)
     95
     96#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
     97
     98/* opcode field in status_desc */
     99#define QLCNIC_SYN_OFFLOAD	0x03
    100#define QLCNIC_RXPKT_DESC  	0x04
    101#define QLCNIC_OLD_RXPKT_DESC	0x3f
    102#define QLCNIC_RESPONSE_DESC	0x05
    103#define QLCNIC_LRO_DESC  	0x12
    104
    105#define QLCNIC_TCP_HDR_SIZE		20
    106#define QLCNIC_TCP_TS_OPTION_SIZE	12
    107#define QLCNIC_FETCH_RING_ID(handle)	((handle) >> 63)
    108#define QLCNIC_DESC_OWNER_FW		cpu_to_le64(STATUS_OWNER_PHANTOM)
    109
    110#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
    111
    112/* for status field in status_desc */
    113#define STATUS_CKSUM_LOOP	0
    114#define STATUS_CKSUM_OK		2
    115
    116#define qlcnic_83xx_pktln(sts)		((sts >> 32) & 0x3FFF)
    117#define qlcnic_83xx_hndl(sts)		((sts >> 48) & 0x7FFF)
    118#define qlcnic_83xx_csum_status(sts)	((sts >> 39) & 7)
    119#define qlcnic_83xx_opcode(sts)	((sts >> 42) & 0xF)
    120#define qlcnic_83xx_vlan_tag(sts)	(((sts) >> 48) & 0xFFFF)
    121#define qlcnic_83xx_lro_pktln(sts)	(((sts) >> 32) & 0x3FFF)
    122#define qlcnic_83xx_l2_hdr_off(sts)	(((sts) >> 16) & 0xFF)
    123#define qlcnic_83xx_l4_hdr_off(sts)	(((sts) >> 24) & 0xFF)
    124#define qlcnic_83xx_pkt_cnt(sts)	(((sts) >> 16) & 0x7)
    125#define qlcnic_83xx_is_tstamp(sts)	(((sts) >> 40) & 1)
    126#define qlcnic_83xx_is_psh_bit(sts)	(((sts) >> 41) & 1)
    127#define qlcnic_83xx_is_ip_align(sts)	(((sts) >> 46) & 1)
    128#define qlcnic_83xx_has_vlan_tag(sts)	(((sts) >> 47) & 1)
    129
    130static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
    131				   int max);
    132
    133static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
    134					    struct qlcnic_host_rds_ring *,
    135					    u16, u16);
    136
    137static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
    138{
    139	return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
    140}
    141
    142static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
    143					u16 handle, u8 ring_id)
    144{
    145	if (qlcnic_83xx_check(adapter))
    146		return handle | (ring_id << 15);
    147	else
    148		return handle;
    149}
    150
    151static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
    152{
    153	return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
    154}
    155
    156static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
    157				      struct qlcnic_filter *fil,
    158				      void *addr, u16 vlan_id)
    159{
    160	int ret;
    161	u8 op;
    162
    163	op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
    164	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
    165	if (ret)
    166		return;
    167
    168	op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
    169	ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
    170	if (!ret) {
    171		hlist_del(&fil->fnode);
    172		adapter->rx_fhash.fnum--;
    173	}
    174}
    175
    176static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
    177						    void *addr, u16 vlan_id)
    178{
    179	struct qlcnic_filter *tmp_fil = NULL;
    180	struct hlist_node *n;
    181
    182	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
    183		if (ether_addr_equal(tmp_fil->faddr, addr) &&
    184		    tmp_fil->vlan_id == vlan_id)
    185			return tmp_fil;
    186	}
    187
    188	return NULL;
    189}
    190
    191static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
    192				 struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
    193{
    194	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
    195	struct qlcnic_filter *fil, *tmp_fil;
    196	struct hlist_head *head;
    197	unsigned long time;
    198	u64 src_addr = 0;
    199	u8 hindex, op;
    200	int ret;
    201
    202	if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
    203		vlan_id = 0;
    204
    205	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
    206	hindex = qlcnic_mac_hash(src_addr, vlan_id) &
    207		 (adapter->fhash.fbucket_size - 1);
    208
    209	if (loopback_pkt) {
    210		if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
    211			return;
    212
    213		head = &(adapter->rx_fhash.fhead[hindex]);
    214
    215		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
    216		if (tmp_fil) {
    217			time = tmp_fil->ftime;
    218			if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
    219				tmp_fil->ftime = jiffies;
    220			return;
    221		}
    222
    223		fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
    224		if (!fil)
    225			return;
    226
    227		fil->ftime = jiffies;
    228		memcpy(fil->faddr, &src_addr, ETH_ALEN);
    229		fil->vlan_id = vlan_id;
    230		spin_lock(&adapter->rx_mac_learn_lock);
    231		hlist_add_head(&(fil->fnode), head);
    232		adapter->rx_fhash.fnum++;
    233		spin_unlock(&adapter->rx_mac_learn_lock);
    234	} else {
    235		head = &adapter->fhash.fhead[hindex];
    236
    237		spin_lock(&adapter->mac_learn_lock);
    238
    239		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
    240		if (tmp_fil) {
    241			op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
    242			ret = qlcnic_sre_macaddr_change(adapter,
    243							(u8 *)&src_addr,
    244							vlan_id, op);
    245			if (!ret) {
    246				hlist_del(&tmp_fil->fnode);
    247				adapter->fhash.fnum--;
    248			}
    249
    250			spin_unlock(&adapter->mac_learn_lock);
    251
    252			return;
    253		}
    254
    255		spin_unlock(&adapter->mac_learn_lock);
    256
    257		head = &adapter->rx_fhash.fhead[hindex];
    258
    259		spin_lock(&adapter->rx_mac_learn_lock);
    260
    261		tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
    262		if (tmp_fil)
    263			qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
    264						  vlan_id);
    265
    266		spin_unlock(&adapter->rx_mac_learn_lock);
    267	}
    268}
    269
    270void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
    271			       u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
    272{
    273	struct cmd_desc_type0 *hwdesc;
    274	struct qlcnic_nic_req *req;
    275	struct qlcnic_mac_req *mac_req;
    276	struct qlcnic_vlan_req *vlan_req;
    277	u32 producer;
    278	u64 word;
    279
    280	producer = tx_ring->producer;
    281	hwdesc = &tx_ring->desc_head[tx_ring->producer];
    282
    283	req = (struct qlcnic_nic_req *)hwdesc;
    284	memset(req, 0, sizeof(struct qlcnic_nic_req));
    285	req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
    286
    287	word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
    288	req->req_hdr = cpu_to_le64(word);
    289
    290	mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
    291	mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
    292	memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
    293
    294	vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
    295	vlan_req->vlan_id = cpu_to_le16(vlan_id);
    296
    297	tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
    298	smp_mb();
    299}
    300
    301static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
    302			       struct cmd_desc_type0 *first_desc,
    303			       struct sk_buff *skb,
    304			       struct qlcnic_host_tx_ring *tx_ring)
    305{
    306	struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
    307	struct ethhdr *phdr = (struct ethhdr *)(skb->data);
    308	u16 protocol = ntohs(skb->protocol);
    309	struct qlcnic_filter *fil, *tmp_fil;
    310	struct hlist_head *head;
    311	struct hlist_node *n;
    312	u64 src_addr = 0;
    313	u16 vlan_id = 0;
    314	u8 hindex, hval;
    315
    316	if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
    317		return;
    318
    319	if (adapter->flags & QLCNIC_VLAN_FILTERING) {
    320		if (protocol == ETH_P_8021Q) {
    321			vh = (struct vlan_ethhdr *)skb->data;
    322			vlan_id = ntohs(vh->h_vlan_TCI);
    323		} else if (skb_vlan_tag_present(skb)) {
    324			vlan_id = skb_vlan_tag_get(skb);
    325		}
    326	}
    327
    328	memcpy(&src_addr, phdr->h_source, ETH_ALEN);
    329	hval = qlcnic_mac_hash(src_addr, vlan_id);
    330	hindex = hval & (adapter->fhash.fbucket_size - 1);
    331	head = &(adapter->fhash.fhead[hindex]);
    332
    333	hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
    334		if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
    335		    tmp_fil->vlan_id == vlan_id) {
    336			if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
    337				qlcnic_change_filter(adapter, &src_addr,
    338						     vlan_id, tx_ring);
    339			tmp_fil->ftime = jiffies;
    340			return;
    341		}
    342	}
    343
    344	if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
    345		adapter->stats.mac_filter_limit_overrun++;
    346		return;
    347	}
    348
    349	fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
    350	if (!fil)
    351		return;
    352
    353	qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
    354	fil->ftime = jiffies;
    355	fil->vlan_id = vlan_id;
    356	memcpy(fil->faddr, &src_addr, ETH_ALEN);
    357	spin_lock(&adapter->mac_learn_lock);
    358	hlist_add_head(&(fil->fnode), head);
    359	adapter->fhash.fnum++;
    360	spin_unlock(&adapter->mac_learn_lock);
    361}
    362
    363#define QLCNIC_ENCAP_VXLAN_PKT		BIT_0
    364#define QLCNIC_ENCAP_OUTER_L3_IP6	BIT_1
    365#define QLCNIC_ENCAP_INNER_L3_IP6	BIT_2
    366#define QLCNIC_ENCAP_INNER_L4_UDP	BIT_3
    367#define QLCNIC_ENCAP_DO_L3_CSUM		BIT_4
    368#define QLCNIC_ENCAP_DO_L4_CSUM		BIT_5
    369
    370static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
    371			       struct cmd_desc_type0 *first_desc,
    372			       struct sk_buff *skb,
    373			       struct qlcnic_host_tx_ring *tx_ring)
    374{
    375	u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
    376	int copied, copy_len, descr_size;
    377	u32 producer = tx_ring->producer;
    378	struct cmd_desc_type0 *hwdesc;
    379	u16 flags = 0, encap_descr = 0;
    380
    381	opcode = QLCNIC_TX_ETHER_PKT;
    382	encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
    383
    384	if (skb_is_gso(skb)) {
    385		inner_hdr_len = skb_inner_transport_header(skb) +
    386				inner_tcp_hdrlen(skb) -
    387				skb_inner_mac_header(skb);
    388
    389		/* VXLAN header size = 8 */
    390		outer_hdr_len = skb_transport_offset(skb) + 8 +
    391				sizeof(struct udphdr);
    392		first_desc->outer_hdr_length = outer_hdr_len;
    393		total_hdr_len = inner_hdr_len + outer_hdr_len;
    394		encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
    395			       QLCNIC_ENCAP_DO_L4_CSUM;
    396		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
    397		first_desc->hdr_length = inner_hdr_len;
    398
    399		/* Copy inner and outer headers in Tx descriptor(s)
    400		 * If total_hdr_len > cmd_desc_type0, use multiple
    401		 * descriptors
    402		 */
    403		copied = 0;
    404		descr_size = (int)sizeof(struct cmd_desc_type0);
    405		while (copied < total_hdr_len) {
    406			copy_len = min(descr_size, (total_hdr_len - copied));
    407			hwdesc = &tx_ring->desc_head[producer];
    408			tx_ring->cmd_buf_arr[producer].skb = NULL;
    409			skb_copy_from_linear_data_offset(skb, copied,
    410							 (char *)hwdesc,
    411							 copy_len);
    412			copied += copy_len;
    413			producer = get_next_index(producer, tx_ring->num_desc);
    414		}
    415
    416		tx_ring->producer = producer;
    417
    418		/* Make sure updated tx_ring->producer is visible
    419		 * for qlcnic_tx_avail()
    420		 */
    421		smp_mb();
    422		adapter->stats.encap_lso_frames++;
    423
    424		opcode = QLCNIC_TX_ENCAP_LSO;
    425	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
    426		if (inner_ip_hdr(skb)->version == 6) {
    427			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
    428				encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
    429		} else {
    430			if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
    431				encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
    432		}
    433
    434		adapter->stats.encap_tx_csummed++;
    435		opcode = QLCNIC_TX_ENCAP_PKT;
    436	}
    437
    438	/* Prepare first 16 bits of byte offset 16 of Tx descriptor */
    439	if (ip_hdr(skb)->version == 6)
    440		encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
    441
    442	/* outer IP header's size in 32bit words size*/
    443	encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
    444
    445	/* outer IP header offset */
    446	encap_descr |= skb_network_offset(skb) << 10;
    447	first_desc->encap_descr = cpu_to_le16(encap_descr);
    448
    449	first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
    450				     skb->data;
    451	first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
    452
    453	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
    454
    455	return 0;
    456}
    457
    458static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
    459			 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
    460			 struct qlcnic_host_tx_ring *tx_ring)
    461{
    462	u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
    463	u16 flags = 0, vlan_tci = 0;
    464	int copied, offset, copy_len, size;
    465	struct cmd_desc_type0 *hwdesc;
    466	struct vlan_ethhdr *vh;
    467	u16 protocol = ntohs(skb->protocol);
    468	u32 producer = tx_ring->producer;
    469
    470	if (protocol == ETH_P_8021Q) {
    471		vh = (struct vlan_ethhdr *)skb->data;
    472		flags = QLCNIC_FLAGS_VLAN_TAGGED;
    473		vlan_tci = ntohs(vh->h_vlan_TCI);
    474		protocol = ntohs(vh->h_vlan_encapsulated_proto);
    475		tag_vlan = 1;
    476	} else if (skb_vlan_tag_present(skb)) {
    477		flags = QLCNIC_FLAGS_VLAN_OOB;
    478		vlan_tci = skb_vlan_tag_get(skb);
    479		tag_vlan = 1;
    480	}
    481	if (unlikely(adapter->tx_pvid)) {
    482		if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
    483			return -EIO;
    484		if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
    485			goto set_flags;
    486
    487		flags = QLCNIC_FLAGS_VLAN_OOB;
    488		vlan_tci = adapter->tx_pvid;
    489	}
    490set_flags:
    491	qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
    492	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
    493
    494	if (*(skb->data) & BIT_0) {
    495		flags |= BIT_0;
    496		memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
    497	}
    498	opcode = QLCNIC_TX_ETHER_PKT;
    499	if (skb_is_gso(skb)) {
    500		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
    501		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
    502		first_desc->hdr_length = hdr_len;
    503		opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
    504						    QLCNIC_TX_TCP_LSO;
    505
    506		/* For LSO, we need to copy the MAC/IP/TCP headers into
    507		* the descriptor ring */
    508		copied = 0;
    509		offset = 2;
    510
    511		if (flags & QLCNIC_FLAGS_VLAN_OOB) {
    512			first_desc->hdr_length += VLAN_HLEN;
    513			first_desc->tcp_hdr_offset = VLAN_HLEN;
    514			first_desc->ip_hdr_offset = VLAN_HLEN;
    515
    516			/* Only in case of TSO on vlan device */
    517			flags |= QLCNIC_FLAGS_VLAN_TAGGED;
    518
    519			/* Create a TSO vlan header template for firmware */
    520			hwdesc = &tx_ring->desc_head[producer];
    521			tx_ring->cmd_buf_arr[producer].skb = NULL;
    522
    523			copy_len = min((int)sizeof(struct cmd_desc_type0) -
    524				       offset, hdr_len + VLAN_HLEN);
    525
    526			vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
    527			skb_copy_from_linear_data(skb, vh, 12);
    528			vh->h_vlan_proto = htons(ETH_P_8021Q);
    529			vh->h_vlan_TCI = htons(vlan_tci);
    530
    531			skb_copy_from_linear_data_offset(skb, 12,
    532							 (char *)vh + 16,
    533							 copy_len - 16);
    534			copied = copy_len - VLAN_HLEN;
    535			offset = 0;
    536			producer = get_next_index(producer, tx_ring->num_desc);
    537		}
    538
    539		while (copied < hdr_len) {
    540			size = (int)sizeof(struct cmd_desc_type0) - offset;
    541			copy_len = min(size, (hdr_len - copied));
    542			hwdesc = &tx_ring->desc_head[producer];
    543			tx_ring->cmd_buf_arr[producer].skb = NULL;
    544			skb_copy_from_linear_data_offset(skb, copied,
    545							 (char *)hwdesc +
    546							 offset, copy_len);
    547			copied += copy_len;
    548			offset = 0;
    549			producer = get_next_index(producer, tx_ring->num_desc);
    550		}
    551
    552		tx_ring->producer = producer;
    553		smp_mb();
    554		adapter->stats.lso_frames++;
    555
    556	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
    557		if (protocol == ETH_P_IP) {
    558			l4proto = ip_hdr(skb)->protocol;
    559
    560			if (l4proto == IPPROTO_TCP)
    561				opcode = QLCNIC_TX_TCP_PKT;
    562			else if (l4proto == IPPROTO_UDP)
    563				opcode = QLCNIC_TX_UDP_PKT;
    564		} else if (protocol == ETH_P_IPV6) {
    565			l4proto = ipv6_hdr(skb)->nexthdr;
    566
    567			if (l4proto == IPPROTO_TCP)
    568				opcode = QLCNIC_TX_TCPV6_PKT;
    569			else if (l4proto == IPPROTO_UDP)
    570				opcode = QLCNIC_TX_UDPV6_PKT;
    571		}
    572	}
    573	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
    574	first_desc->ip_hdr_offset += skb_network_offset(skb);
    575	qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
    576
    577	return 0;
    578}
    579
    580static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
    581			     struct qlcnic_cmd_buffer *pbuf)
    582{
    583	struct qlcnic_skb_frag *nf;
    584	skb_frag_t *frag;
    585	int i, nr_frags;
    586	dma_addr_t map;
    587
    588	nr_frags = skb_shinfo(skb)->nr_frags;
    589	nf = &pbuf->frag_array[0];
    590
    591	map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
    592			     DMA_TO_DEVICE);
    593	if (dma_mapping_error(&pdev->dev, map))
    594		goto out_err;
    595
    596	nf->dma = map;
    597	nf->length = skb_headlen(skb);
    598
    599	for (i = 0; i < nr_frags; i++) {
    600		frag = &skb_shinfo(skb)->frags[i];
    601		nf = &pbuf->frag_array[i+1];
    602		map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
    603				       DMA_TO_DEVICE);
    604		if (dma_mapping_error(&pdev->dev, map))
    605			goto unwind;
    606
    607		nf->dma = map;
    608		nf->length = skb_frag_size(frag);
    609	}
    610
    611	return 0;
    612
    613unwind:
    614	while (--i >= 0) {
    615		nf = &pbuf->frag_array[i+1];
    616		dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
    617	}
    618
    619	nf = &pbuf->frag_array[0];
    620	dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
    621
    622out_err:
    623	return -ENOMEM;
    624}
    625
    626static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
    627				 struct qlcnic_cmd_buffer *pbuf)
    628{
    629	struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
    630	int i, nr_frags = skb_shinfo(skb)->nr_frags;
    631
    632	for (i = 0; i < nr_frags; i++) {
    633		nf = &pbuf->frag_array[i+1];
    634		dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
    635	}
    636
    637	nf = &pbuf->frag_array[0];
    638	dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
    639	pbuf->skb = NULL;
    640}
    641
    642static inline void qlcnic_clear_cmddesc(u64 *desc)
    643{
    644	desc[0] = 0ULL;
    645	desc[2] = 0ULL;
    646	desc[7] = 0ULL;
    647}
    648
    649netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
    650{
    651	struct qlcnic_adapter *adapter = netdev_priv(netdev);
    652	struct qlcnic_host_tx_ring *tx_ring;
    653	struct qlcnic_cmd_buffer *pbuf;
    654	struct qlcnic_skb_frag *buffrag;
    655	struct cmd_desc_type0 *hwdesc, *first_desc;
    656	struct pci_dev *pdev;
    657	struct ethhdr *phdr;
    658	int i, k, frag_count, delta = 0;
    659	u32 producer, num_txd;
    660	u16 protocol;
    661	bool l4_is_udp = false;
    662
    663	if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
    664		netif_tx_stop_all_queues(netdev);
    665		return NETDEV_TX_BUSY;
    666	}
    667
    668	if (adapter->flags & QLCNIC_MACSPOOF) {
    669		phdr = (struct ethhdr *)skb->data;
    670		if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
    671			goto drop_packet;
    672	}
    673
    674	tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
    675	num_txd = tx_ring->num_desc;
    676
    677	frag_count = skb_shinfo(skb)->nr_frags + 1;
    678
    679	/* 14 frags supported for normal packet and
    680	 * 32 frags supported for TSO packet
    681	 */
    682	if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
    683		for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
    684			delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
    685
    686		if (!__pskb_pull_tail(skb, delta))
    687			goto drop_packet;
    688
    689		frag_count = 1 + skb_shinfo(skb)->nr_frags;
    690	}
    691
    692	if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
    693		netif_tx_stop_queue(tx_ring->txq);
    694		if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
    695			netif_tx_start_queue(tx_ring->txq);
    696		} else {
    697			tx_ring->tx_stats.xmit_off++;
    698			return NETDEV_TX_BUSY;
    699		}
    700	}
    701
    702	producer = tx_ring->producer;
    703	pbuf = &tx_ring->cmd_buf_arr[producer];
    704	pdev = adapter->pdev;
    705	first_desc = &tx_ring->desc_head[producer];
    706	hwdesc = &tx_ring->desc_head[producer];
    707	qlcnic_clear_cmddesc((u64 *)hwdesc);
    708
    709	if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
    710		adapter->stats.tx_dma_map_error++;
    711		goto drop_packet;
    712	}
    713
    714	pbuf->skb = skb;
    715	pbuf->frag_count = frag_count;
    716
    717	qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
    718	qlcnic_set_tx_port(first_desc, adapter->portnum);
    719
    720	for (i = 0; i < frag_count; i++) {
    721		k = i % 4;
    722
    723		if ((k == 0) && (i > 0)) {
    724			/* move to next desc.*/
    725			producer = get_next_index(producer, num_txd);
    726			hwdesc = &tx_ring->desc_head[producer];
    727			qlcnic_clear_cmddesc((u64 *)hwdesc);
    728			tx_ring->cmd_buf_arr[producer].skb = NULL;
    729		}
    730
    731		buffrag = &pbuf->frag_array[i];
    732		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
    733		switch (k) {
    734		case 0:
    735			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
    736			break;
    737		case 1:
    738			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
    739			break;
    740		case 2:
    741			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
    742			break;
    743		case 3:
    744			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
    745			break;
    746		}
    747	}
    748
    749	tx_ring->producer = get_next_index(producer, num_txd);
    750	smp_mb();
    751
    752	protocol = ntohs(skb->protocol);
    753	if (protocol == ETH_P_IP)
    754		l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
    755	else if (protocol == ETH_P_IPV6)
    756		l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
    757
    758	/* Check if it is a VXLAN packet */
    759	if (!skb->encapsulation || !l4_is_udp ||
    760	    !qlcnic_encap_tx_offload(adapter)) {
    761		if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
    762					   tx_ring)))
    763			goto unwind_buff;
    764	} else {
    765		if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
    766						 skb, tx_ring)))
    767			goto unwind_buff;
    768	}
    769
    770	if (adapter->drv_mac_learn)
    771		qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
    772
    773	tx_ring->tx_stats.tx_bytes += skb->len;
    774	tx_ring->tx_stats.xmit_called++;
    775
    776	/* Ensure writes are complete before HW fetches Tx descriptors */
    777	wmb();
    778	qlcnic_update_cmd_producer(tx_ring);
    779
    780	return NETDEV_TX_OK;
    781
    782unwind_buff:
    783	qlcnic_unmap_buffers(pdev, skb, pbuf);
    784drop_packet:
    785	adapter->stats.txdropped++;
    786	dev_kfree_skb_any(skb);
    787	return NETDEV_TX_OK;
    788}
    789
    790void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
    791{
    792	struct net_device *netdev = adapter->netdev;
    793
    794	if (adapter->ahw->linkup && !linkup) {
    795		netdev_info(netdev, "NIC Link is down\n");
    796		adapter->ahw->linkup = 0;
    797		netif_carrier_off(netdev);
    798	} else if (!adapter->ahw->linkup && linkup) {
    799		adapter->ahw->linkup = 1;
    800
    801		/* Do not advertise Link up to the stack if device
    802		 * is in loopback mode
    803		 */
    804		if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
    805			netdev_info(netdev, "NIC Link is up for loopback test\n");
    806			return;
    807		}
    808
    809		netdev_info(netdev, "NIC Link is up\n");
    810		netif_carrier_on(netdev);
    811	}
    812}
    813
    814static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
    815			       struct qlcnic_host_rds_ring *rds_ring,
    816			       struct qlcnic_rx_buffer *buffer)
    817{
    818	struct sk_buff *skb;
    819	dma_addr_t dma;
    820	struct pci_dev *pdev = adapter->pdev;
    821
    822	skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
    823	if (!skb) {
    824		adapter->stats.skb_alloc_failure++;
    825		return -ENOMEM;
    826	}
    827
    828	skb_reserve(skb, NET_IP_ALIGN);
    829	dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
    830			     DMA_FROM_DEVICE);
    831
    832	if (dma_mapping_error(&pdev->dev, dma)) {
    833		adapter->stats.rx_dma_map_error++;
    834		dev_kfree_skb_any(skb);
    835		return -ENOMEM;
    836	}
    837
    838	buffer->skb = skb;
    839	buffer->dma = dma;
    840
    841	return 0;
    842}
    843
    844static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
    845					struct qlcnic_host_rds_ring *rds_ring,
    846					u8 ring_id)
    847{
    848	struct rcv_desc *pdesc;
    849	struct qlcnic_rx_buffer *buffer;
    850	int  count = 0;
    851	uint32_t producer, handle;
    852	struct list_head *head;
    853
    854	if (!spin_trylock(&rds_ring->lock))
    855		return;
    856
    857	producer = rds_ring->producer;
    858	head = &rds_ring->free_list;
    859	while (!list_empty(head)) {
    860		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
    861
    862		if (!buffer->skb) {
    863			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
    864				break;
    865		}
    866		count++;
    867		list_del(&buffer->list);
    868
    869		/* make a rcv descriptor  */
    870		pdesc = &rds_ring->desc_head[producer];
    871		handle = qlcnic_get_ref_handle(adapter,
    872					       buffer->ref_handle, ring_id);
    873		pdesc->reference_handle = cpu_to_le16(handle);
    874		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
    875		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
    876		producer = get_next_index(producer, rds_ring->num_desc);
    877	}
    878	if (count) {
    879		rds_ring->producer = producer;
    880		writel((producer - 1) & (rds_ring->num_desc - 1),
    881		       rds_ring->crb_rcv_producer);
    882	}
    883	spin_unlock(&rds_ring->lock);
    884}
    885
    886static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
    887				   struct qlcnic_host_tx_ring *tx_ring,
    888				   int budget)
    889{
    890	u32 sw_consumer, hw_consumer;
    891	int i, done, count = 0;
    892	struct qlcnic_cmd_buffer *buffer;
    893	struct pci_dev *pdev = adapter->pdev;
    894	struct net_device *netdev = adapter->netdev;
    895	struct qlcnic_skb_frag *frag;
    896
    897	if (!spin_trylock(&tx_ring->tx_clean_lock))
    898		return 1;
    899
    900	sw_consumer = tx_ring->sw_consumer;
    901	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
    902
    903	while (sw_consumer != hw_consumer) {
    904		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
    905		if (buffer->skb) {
    906			frag = &buffer->frag_array[0];
    907			dma_unmap_single(&pdev->dev, frag->dma, frag->length,
    908					 DMA_TO_DEVICE);
    909			frag->dma = 0ULL;
    910			for (i = 1; i < buffer->frag_count; i++) {
    911				frag++;
    912				dma_unmap_page(&pdev->dev, frag->dma,
    913					       frag->length, DMA_TO_DEVICE);
    914				frag->dma = 0ULL;
    915			}
    916			tx_ring->tx_stats.xmit_finished++;
    917			dev_kfree_skb_any(buffer->skb);
    918			buffer->skb = NULL;
    919		}
    920
    921		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
    922		if (++count >= budget)
    923			break;
    924	}
    925
    926	tx_ring->sw_consumer = sw_consumer;
    927
    928	if (count && netif_running(netdev)) {
    929		smp_mb();
    930		if (netif_tx_queue_stopped(tx_ring->txq) &&
    931		    netif_carrier_ok(netdev)) {
    932			if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
    933				netif_tx_wake_queue(tx_ring->txq);
    934				tx_ring->tx_stats.xmit_on++;
    935			}
    936		}
    937		adapter->tx_timeo_cnt = 0;
    938	}
    939	/*
    940	 * If everything is freed up to consumer then check if the ring is full
    941	 * If the ring is full then check if more needs to be freed and
    942	 * schedule the call back again.
    943	 *
    944	 * This happens when there are 2 CPUs. One could be freeing and the
    945	 * other filling it. If the ring is full when we get out of here and
    946	 * the card has already interrupted the host then the host can miss the
    947	 * interrupt.
    948	 *
    949	 * There is still a possible race condition and the host could miss an
    950	 * interrupt. The card has to take care of this.
    951	 */
    952	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
    953	done = (sw_consumer == hw_consumer);
    954
    955	spin_unlock(&tx_ring->tx_clean_lock);
    956
    957	return done;
    958}
    959
    960static int qlcnic_poll(struct napi_struct *napi, int budget)
    961{
    962	int tx_complete, work_done;
    963	struct qlcnic_host_sds_ring *sds_ring;
    964	struct qlcnic_adapter *adapter;
    965	struct qlcnic_host_tx_ring *tx_ring;
    966
    967	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
    968	adapter = sds_ring->adapter;
    969	tx_ring = sds_ring->tx_ring;
    970
    971	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
    972					      budget);
    973	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
    974
    975	/* Check if we need a repoll */
    976	if (!tx_complete)
    977		work_done = budget;
    978
    979	if (work_done < budget) {
    980		napi_complete_done(&sds_ring->napi, work_done);
    981		if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
    982			qlcnic_enable_sds_intr(adapter, sds_ring);
    983			qlcnic_enable_tx_intr(adapter, tx_ring);
    984		}
    985	}
    986
    987	return work_done;
    988}
    989
    990static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
    991{
    992	struct qlcnic_host_tx_ring *tx_ring;
    993	struct qlcnic_adapter *adapter;
    994	int work_done;
    995
    996	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
    997	adapter = tx_ring->adapter;
    998
    999	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
   1000	if (work_done) {
   1001		napi_complete(&tx_ring->napi);
   1002		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
   1003			qlcnic_enable_tx_intr(adapter, tx_ring);
   1004	} else {
   1005		/* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
   1006		work_done = budget;
   1007	}
   1008
   1009	return work_done;
   1010}
   1011
   1012static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
   1013{
   1014	struct qlcnic_host_sds_ring *sds_ring;
   1015	struct qlcnic_adapter *adapter;
   1016	int work_done;
   1017
   1018	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
   1019	adapter = sds_ring->adapter;
   1020
   1021	work_done = qlcnic_process_rcv_ring(sds_ring, budget);
   1022
   1023	if (work_done < budget) {
   1024		napi_complete_done(&sds_ring->napi, work_done);
   1025		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
   1026			qlcnic_enable_sds_intr(adapter, sds_ring);
   1027	}
   1028
   1029	return work_done;
   1030}
   1031
   1032static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
   1033				    struct qlcnic_fw_msg *msg)
   1034{
   1035	u32 cable_OUI;
   1036	u16 cable_len, link_speed;
   1037	u8  link_status, module, duplex, autoneg, lb_status = 0;
   1038	struct net_device *netdev = adapter->netdev;
   1039
   1040	adapter->ahw->has_link_events = 1;
   1041
   1042	cable_OUI = msg->body[1] & 0xffffffff;
   1043	cable_len = (msg->body[1] >> 32) & 0xffff;
   1044	link_speed = (msg->body[1] >> 48) & 0xffff;
   1045
   1046	link_status = msg->body[2] & 0xff;
   1047	duplex = (msg->body[2] >> 16) & 0xff;
   1048	autoneg = (msg->body[2] >> 24) & 0xff;
   1049	lb_status = (msg->body[2] >> 32) & 0x3;
   1050
   1051	module = (msg->body[2] >> 8) & 0xff;
   1052	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
   1053		dev_info(&netdev->dev,
   1054			 "unsupported cable: OUI 0x%x, length %d\n",
   1055			 cable_OUI, cable_len);
   1056	else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
   1057		dev_info(&netdev->dev, "unsupported cable length %d\n",
   1058			 cable_len);
   1059
   1060	if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
   1061	    lb_status == QLCNIC_ELB_MODE))
   1062		adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
   1063
   1064	qlcnic_advert_link_change(adapter, link_status);
   1065
   1066	if (duplex == LINKEVENT_FULL_DUPLEX)
   1067		adapter->ahw->link_duplex = DUPLEX_FULL;
   1068	else
   1069		adapter->ahw->link_duplex = DUPLEX_HALF;
   1070
   1071	adapter->ahw->module_type = module;
   1072	adapter->ahw->link_autoneg = autoneg;
   1073
   1074	if (link_status) {
   1075		adapter->ahw->link_speed = link_speed;
   1076	} else {
   1077		adapter->ahw->link_speed = SPEED_UNKNOWN;
   1078		adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
   1079	}
   1080}
   1081
   1082static void qlcnic_handle_fw_message(int desc_cnt, int index,
   1083				     struct qlcnic_host_sds_ring *sds_ring)
   1084{
   1085	struct qlcnic_fw_msg msg;
   1086	struct status_desc *desc;
   1087	struct qlcnic_adapter *adapter;
   1088	struct device *dev;
   1089	int i = 0, opcode, ret;
   1090
   1091	while (desc_cnt > 0 && i < 8) {
   1092		desc = &sds_ring->desc_head[index];
   1093		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
   1094		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
   1095
   1096		index = get_next_index(index, sds_ring->num_desc);
   1097		desc_cnt--;
   1098	}
   1099
   1100	adapter = sds_ring->adapter;
   1101	dev = &adapter->pdev->dev;
   1102	opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
   1103
   1104	switch (opcode) {
   1105	case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
   1106		qlcnic_handle_linkevent(adapter, &msg);
   1107		break;
   1108	case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
   1109		ret = (u32)(msg.body[1]);
   1110		switch (ret) {
   1111		case 0:
   1112			adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
   1113			break;
   1114		case 1:
   1115			dev_info(dev, "loopback already in progress\n");
   1116			adapter->ahw->diag_cnt = -EINPROGRESS;
   1117			break;
   1118		case 2:
   1119			dev_info(dev, "loopback cable is not connected\n");
   1120			adapter->ahw->diag_cnt = -ENODEV;
   1121			break;
   1122		default:
   1123			dev_info(dev,
   1124				 "loopback configure request failed, err %x\n",
   1125				 ret);
   1126			adapter->ahw->diag_cnt = -EIO;
   1127			break;
   1128		}
   1129		break;
   1130	case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
   1131		qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
   1132		break;
   1133	default:
   1134		break;
   1135	}
   1136}
   1137
   1138static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
   1139					    struct qlcnic_host_rds_ring *ring,
   1140					    u16 index, u16 cksum)
   1141{
   1142	struct qlcnic_rx_buffer *buffer;
   1143	struct sk_buff *skb;
   1144
   1145	buffer = &ring->rx_buf_arr[index];
   1146	if (unlikely(buffer->skb == NULL)) {
   1147		WARN_ON(1);
   1148		return NULL;
   1149	}
   1150
   1151	dma_unmap_single(&adapter->pdev->dev, buffer->dma, ring->dma_size,
   1152			 DMA_FROM_DEVICE);
   1153
   1154	skb = buffer->skb;
   1155	if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
   1156		   (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
   1157		adapter->stats.csummed++;
   1158		skb->ip_summed = CHECKSUM_UNNECESSARY;
   1159	} else {
   1160		skb_checksum_none_assert(skb);
   1161	}
   1162
   1163
   1164	buffer->skb = NULL;
   1165
   1166	return skb;
   1167}
   1168
   1169static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
   1170					  struct sk_buff *skb, u16 *vlan_tag)
   1171{
   1172	struct ethhdr *eth_hdr;
   1173
   1174	if (!__vlan_get_tag(skb, vlan_tag)) {
   1175		eth_hdr = (struct ethhdr *)skb->data;
   1176		memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
   1177		skb_pull(skb, VLAN_HLEN);
   1178	}
   1179	if (!adapter->rx_pvid)
   1180		return 0;
   1181
   1182	if (*vlan_tag == adapter->rx_pvid) {
   1183		/* Outer vlan tag. Packet should follow non-vlan path */
   1184		*vlan_tag = 0xffff;
   1185		return 0;
   1186	}
   1187	if (adapter->flags & QLCNIC_TAGGING_ENABLED)
   1188		return 0;
   1189
   1190	return -EINVAL;
   1191}
   1192
   1193static struct qlcnic_rx_buffer *
   1194qlcnic_process_rcv(struct qlcnic_adapter *adapter,
   1195		   struct qlcnic_host_sds_ring *sds_ring, int ring,
   1196		   u64 sts_data0)
   1197{
   1198	struct net_device *netdev = adapter->netdev;
   1199	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1200	struct qlcnic_rx_buffer *buffer;
   1201	struct sk_buff *skb;
   1202	struct qlcnic_host_rds_ring *rds_ring;
   1203	int index, length, cksum, pkt_offset, is_lb_pkt;
   1204	u16 vid = 0xffff, t_vid;
   1205
   1206	if (unlikely(ring >= adapter->max_rds_rings))
   1207		return NULL;
   1208
   1209	rds_ring = &recv_ctx->rds_rings[ring];
   1210
   1211	index = qlcnic_get_sts_refhandle(sts_data0);
   1212	if (unlikely(index >= rds_ring->num_desc))
   1213		return NULL;
   1214
   1215	buffer = &rds_ring->rx_buf_arr[index];
   1216	length = qlcnic_get_sts_totallength(sts_data0);
   1217	cksum  = qlcnic_get_sts_status(sts_data0);
   1218	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
   1219
   1220	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
   1221	if (!skb)
   1222		return buffer;
   1223
   1224	if (adapter->rx_mac_learn) {
   1225		t_vid = 0;
   1226		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
   1227		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
   1228	}
   1229
   1230	if (length > rds_ring->skb_size)
   1231		skb_put(skb, rds_ring->skb_size);
   1232	else
   1233		skb_put(skb, length);
   1234
   1235	if (pkt_offset)
   1236		skb_pull(skb, pkt_offset);
   1237
   1238	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
   1239		adapter->stats.rxdropped++;
   1240		dev_kfree_skb(skb);
   1241		return buffer;
   1242	}
   1243
   1244	skb->protocol = eth_type_trans(skb, netdev);
   1245
   1246	if (vid != 0xffff)
   1247		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
   1248
   1249	napi_gro_receive(&sds_ring->napi, skb);
   1250
   1251	adapter->stats.rx_pkts++;
   1252	adapter->stats.rxbytes += length;
   1253
   1254	return buffer;
   1255}
   1256
   1257#define QLC_TCP_HDR_SIZE            20
   1258#define QLC_TCP_TS_OPTION_SIZE      12
   1259#define QLC_TCP_TS_HDR_SIZE         (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
   1260
   1261static struct qlcnic_rx_buffer *
   1262qlcnic_process_lro(struct qlcnic_adapter *adapter,
   1263		   int ring, u64 sts_data0, u64 sts_data1)
   1264{
   1265	struct net_device *netdev = adapter->netdev;
   1266	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1267	struct qlcnic_rx_buffer *buffer;
   1268	struct sk_buff *skb;
   1269	struct qlcnic_host_rds_ring *rds_ring;
   1270	struct iphdr *iph;
   1271	struct ipv6hdr *ipv6h;
   1272	struct tcphdr *th;
   1273	bool push, timestamp;
   1274	int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
   1275	u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
   1276	u32 seq_number;
   1277
   1278	if (unlikely(ring >= adapter->max_rds_rings))
   1279		return NULL;
   1280
   1281	rds_ring = &recv_ctx->rds_rings[ring];
   1282
   1283	index = qlcnic_get_lro_sts_refhandle(sts_data0);
   1284	if (unlikely(index >= rds_ring->num_desc))
   1285		return NULL;
   1286
   1287	buffer = &rds_ring->rx_buf_arr[index];
   1288
   1289	timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
   1290	lro_length = qlcnic_get_lro_sts_length(sts_data0);
   1291	l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
   1292	l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
   1293	push = qlcnic_get_lro_sts_push_flag(sts_data0);
   1294	seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
   1295
   1296	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
   1297	if (!skb)
   1298		return buffer;
   1299
   1300	if (adapter->rx_mac_learn) {
   1301		t_vid = 0;
   1302		is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
   1303		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
   1304	}
   1305
   1306	if (timestamp)
   1307		data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
   1308	else
   1309		data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
   1310
   1311	skb_put(skb, lro_length + data_offset);
   1312	skb_pull(skb, l2_hdr_offset);
   1313
   1314	if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
   1315		adapter->stats.rxdropped++;
   1316		dev_kfree_skb(skb);
   1317		return buffer;
   1318	}
   1319
   1320	skb->protocol = eth_type_trans(skb, netdev);
   1321
   1322	if (ntohs(skb->protocol) == ETH_P_IPV6) {
   1323		ipv6h = (struct ipv6hdr *)skb->data;
   1324		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
   1325		length = (th->doff << 2) + lro_length;
   1326		ipv6h->payload_len = htons(length);
   1327	} else {
   1328		iph = (struct iphdr *)skb->data;
   1329		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
   1330		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
   1331		csum_replace2(&iph->check, iph->tot_len, htons(length));
   1332		iph->tot_len = htons(length);
   1333	}
   1334
   1335	th->psh = push;
   1336	th->seq = htonl(seq_number);
   1337	length = skb->len;
   1338
   1339	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
   1340		skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
   1341		if (skb->protocol == htons(ETH_P_IPV6))
   1342			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
   1343		else
   1344			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
   1345	}
   1346
   1347	if (vid != 0xffff)
   1348		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
   1349	netif_receive_skb(skb);
   1350
   1351	adapter->stats.lro_pkts++;
   1352	adapter->stats.lrobytes += length;
   1353
   1354	return buffer;
   1355}
   1356
   1357static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
   1358{
   1359	struct qlcnic_host_rds_ring *rds_ring;
   1360	struct qlcnic_adapter *adapter = sds_ring->adapter;
   1361	struct list_head *cur;
   1362	struct status_desc *desc;
   1363	struct qlcnic_rx_buffer *rxbuf;
   1364	int opcode, desc_cnt, count = 0;
   1365	u64 sts_data0, sts_data1;
   1366	u8 ring;
   1367	u32 consumer = sds_ring->consumer;
   1368
   1369	while (count < max) {
   1370		desc = &sds_ring->desc_head[consumer];
   1371		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
   1372
   1373		if (!(sts_data0 & STATUS_OWNER_HOST))
   1374			break;
   1375
   1376		desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
   1377		opcode = qlcnic_get_sts_opcode(sts_data0);
   1378		switch (opcode) {
   1379		case QLCNIC_RXPKT_DESC:
   1380		case QLCNIC_OLD_RXPKT_DESC:
   1381		case QLCNIC_SYN_OFFLOAD:
   1382			ring = qlcnic_get_sts_type(sts_data0);
   1383			rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
   1384						   sts_data0);
   1385			break;
   1386		case QLCNIC_LRO_DESC:
   1387			ring = qlcnic_get_lro_sts_type(sts_data0);
   1388			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
   1389			rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
   1390						   sts_data1);
   1391			break;
   1392		case QLCNIC_RESPONSE_DESC:
   1393			qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
   1394			goto skip;
   1395		default:
   1396			goto skip;
   1397		}
   1398		WARN_ON(desc_cnt > 1);
   1399
   1400		if (likely(rxbuf))
   1401			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
   1402		else
   1403			adapter->stats.null_rxbuf++;
   1404skip:
   1405		for (; desc_cnt > 0; desc_cnt--) {
   1406			desc = &sds_ring->desc_head[consumer];
   1407			desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
   1408			consumer = get_next_index(consumer, sds_ring->num_desc);
   1409		}
   1410		count++;
   1411	}
   1412
   1413	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
   1414		rds_ring = &adapter->recv_ctx->rds_rings[ring];
   1415		if (!list_empty(&sds_ring->free_list[ring])) {
   1416			list_for_each(cur, &sds_ring->free_list[ring]) {
   1417				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
   1418						   list);
   1419				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
   1420			}
   1421			spin_lock(&rds_ring->lock);
   1422			list_splice_tail_init(&sds_ring->free_list[ring],
   1423					      &rds_ring->free_list);
   1424			spin_unlock(&rds_ring->lock);
   1425		}
   1426
   1427		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
   1428	}
   1429
   1430	if (count) {
   1431		sds_ring->consumer = consumer;
   1432		writel(consumer, sds_ring->crb_sts_consumer);
   1433	}
   1434
   1435	return count;
   1436}
   1437
   1438void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
   1439			    struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
   1440{
   1441	struct rcv_desc *pdesc;
   1442	struct qlcnic_rx_buffer *buffer;
   1443	int count = 0;
   1444	u32 producer, handle;
   1445	struct list_head *head;
   1446
   1447	producer = rds_ring->producer;
   1448	head = &rds_ring->free_list;
   1449
   1450	while (!list_empty(head)) {
   1451
   1452		buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
   1453
   1454		if (!buffer->skb) {
   1455			if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
   1456				break;
   1457		}
   1458
   1459		count++;
   1460		list_del(&buffer->list);
   1461
   1462		/* make a rcv descriptor  */
   1463		pdesc = &rds_ring->desc_head[producer];
   1464		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
   1465		handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
   1466					       ring_id);
   1467		pdesc->reference_handle = cpu_to_le16(handle);
   1468		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
   1469		producer = get_next_index(producer, rds_ring->num_desc);
   1470	}
   1471
   1472	if (count) {
   1473		rds_ring->producer = producer;
   1474		writel((producer-1) & (rds_ring->num_desc-1),
   1475		       rds_ring->crb_rcv_producer);
   1476	}
   1477}
   1478
   1479static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
   1480{
   1481	if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
   1482		char prefix[30];
   1483
   1484		scnprintf(prefix, sizeof(prefix), "%s: %s: ",
   1485			  dev_name(&adapter->pdev->dev), __func__);
   1486
   1487		print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
   1488				     skb->data, skb->len, true);
   1489	}
   1490}
   1491
   1492static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
   1493				    u64 sts_data0)
   1494{
   1495	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1496	struct sk_buff *skb;
   1497	struct qlcnic_host_rds_ring *rds_ring;
   1498	int index, length, cksum, pkt_offset;
   1499
   1500	if (unlikely(ring >= adapter->max_rds_rings))
   1501		return;
   1502
   1503	rds_ring = &recv_ctx->rds_rings[ring];
   1504
   1505	index = qlcnic_get_sts_refhandle(sts_data0);
   1506	length = qlcnic_get_sts_totallength(sts_data0);
   1507	if (unlikely(index >= rds_ring->num_desc))
   1508		return;
   1509
   1510	cksum  = qlcnic_get_sts_status(sts_data0);
   1511	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
   1512
   1513	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
   1514	if (!skb)
   1515		return;
   1516
   1517	if (length > rds_ring->skb_size)
   1518		skb_put(skb, rds_ring->skb_size);
   1519	else
   1520		skb_put(skb, length);
   1521
   1522	if (pkt_offset)
   1523		skb_pull(skb, pkt_offset);
   1524
   1525	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
   1526		adapter->ahw->diag_cnt++;
   1527	else
   1528		dump_skb(skb, adapter);
   1529
   1530	dev_kfree_skb_any(skb);
   1531	adapter->stats.rx_pkts++;
   1532	adapter->stats.rxbytes += length;
   1533
   1534	return;
   1535}
   1536
   1537void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
   1538{
   1539	struct qlcnic_adapter *adapter = sds_ring->adapter;
   1540	struct status_desc *desc;
   1541	u64 sts_data0;
   1542	int ring, opcode, desc_cnt;
   1543
   1544	u32 consumer = sds_ring->consumer;
   1545
   1546	desc = &sds_ring->desc_head[consumer];
   1547	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
   1548
   1549	if (!(sts_data0 & STATUS_OWNER_HOST))
   1550		return;
   1551
   1552	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
   1553	opcode = qlcnic_get_sts_opcode(sts_data0);
   1554	switch (opcode) {
   1555	case QLCNIC_RESPONSE_DESC:
   1556		qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
   1557		break;
   1558	default:
   1559		ring = qlcnic_get_sts_type(sts_data0);
   1560		qlcnic_process_rcv_diag(adapter, ring, sts_data0);
   1561		break;
   1562	}
   1563
   1564	for (; desc_cnt > 0; desc_cnt--) {
   1565		desc = &sds_ring->desc_head[consumer];
   1566		desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
   1567		consumer = get_next_index(consumer, sds_ring->num_desc);
   1568	}
   1569
   1570	sds_ring->consumer = consumer;
   1571	writel(consumer, sds_ring->crb_sts_consumer);
   1572}
   1573
   1574int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
   1575			 struct net_device *netdev)
   1576{
   1577	int ring;
   1578	struct qlcnic_host_sds_ring *sds_ring;
   1579	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1580	struct qlcnic_host_tx_ring *tx_ring;
   1581
   1582	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
   1583		return -ENOMEM;
   1584
   1585	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   1586		sds_ring = &recv_ctx->sds_rings[ring];
   1587		if (qlcnic_check_multi_tx(adapter) &&
   1588		    !adapter->ahw->diag_test) {
   1589			netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
   1590				       NAPI_POLL_WEIGHT);
   1591		} else {
   1592			if (ring == (adapter->drv_sds_rings - 1))
   1593				netif_napi_add(netdev, &sds_ring->napi,
   1594					       qlcnic_poll,
   1595					       NAPI_POLL_WEIGHT);
   1596			else
   1597				netif_napi_add(netdev, &sds_ring->napi,
   1598					       qlcnic_rx_poll,
   1599					       NAPI_POLL_WEIGHT);
   1600		}
   1601	}
   1602
   1603	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
   1604		qlcnic_free_sds_rings(recv_ctx);
   1605		return -ENOMEM;
   1606	}
   1607
   1608	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
   1609		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   1610			tx_ring = &adapter->tx_ring[ring];
   1611			netif_napi_add_tx(netdev, &tx_ring->napi,
   1612					  qlcnic_tx_poll);
   1613		}
   1614	}
   1615
   1616	return 0;
   1617}
   1618
   1619void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
   1620{
   1621	int ring;
   1622	struct qlcnic_host_sds_ring *sds_ring;
   1623	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1624	struct qlcnic_host_tx_ring *tx_ring;
   1625
   1626	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   1627		sds_ring = &recv_ctx->sds_rings[ring];
   1628		netif_napi_del(&sds_ring->napi);
   1629	}
   1630
   1631	qlcnic_free_sds_rings(adapter->recv_ctx);
   1632
   1633	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
   1634		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   1635			tx_ring = &adapter->tx_ring[ring];
   1636			netif_napi_del(&tx_ring->napi);
   1637		}
   1638	}
   1639
   1640	qlcnic_free_tx_rings(adapter);
   1641}
   1642
   1643void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
   1644{
   1645	int ring;
   1646	struct qlcnic_host_sds_ring *sds_ring;
   1647	struct qlcnic_host_tx_ring *tx_ring;
   1648	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1649
   1650	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
   1651		return;
   1652
   1653	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   1654		sds_ring = &recv_ctx->sds_rings[ring];
   1655		napi_enable(&sds_ring->napi);
   1656		qlcnic_enable_sds_intr(adapter, sds_ring);
   1657	}
   1658
   1659	if (qlcnic_check_multi_tx(adapter) &&
   1660	    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
   1661	    !adapter->ahw->diag_test) {
   1662		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   1663			tx_ring = &adapter->tx_ring[ring];
   1664			napi_enable(&tx_ring->napi);
   1665			qlcnic_enable_tx_intr(adapter, tx_ring);
   1666		}
   1667	}
   1668}
   1669
   1670void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
   1671{
   1672	int ring;
   1673	struct qlcnic_host_sds_ring *sds_ring;
   1674	struct qlcnic_host_tx_ring *tx_ring;
   1675	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1676
   1677	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
   1678		return;
   1679
   1680	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   1681		sds_ring = &recv_ctx->sds_rings[ring];
   1682		qlcnic_disable_sds_intr(adapter, sds_ring);
   1683		napi_synchronize(&sds_ring->napi);
   1684		napi_disable(&sds_ring->napi);
   1685	}
   1686
   1687	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
   1688	    !adapter->ahw->diag_test &&
   1689	    qlcnic_check_multi_tx(adapter)) {
   1690		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   1691			tx_ring = &adapter->tx_ring[ring];
   1692			qlcnic_disable_tx_intr(adapter, tx_ring);
   1693			napi_synchronize(&tx_ring->napi);
   1694			napi_disable(&tx_ring->napi);
   1695		}
   1696	}
   1697}
   1698
   1699#define QLC_83XX_NORMAL_LB_PKT	(1ULL << 36)
   1700#define QLC_83XX_LRO_LB_PKT	(1ULL << 46)
   1701
   1702static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
   1703{
   1704	if (lro_pkt)
   1705		return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
   1706	else
   1707		return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
   1708}
   1709
   1710#define QLCNIC_ENCAP_LENGTH_MASK	0x7f
   1711
   1712static inline u8 qlcnic_encap_length(u64 sts_data)
   1713{
   1714	return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
   1715}
   1716
   1717static struct qlcnic_rx_buffer *
   1718qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
   1719			struct qlcnic_host_sds_ring *sds_ring,
   1720			u8 ring, u64 sts_data[])
   1721{
   1722	struct net_device *netdev = adapter->netdev;
   1723	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1724	struct qlcnic_rx_buffer *buffer;
   1725	struct sk_buff *skb;
   1726	struct qlcnic_host_rds_ring *rds_ring;
   1727	int index, length, cksum, is_lb_pkt;
   1728	u16 vid = 0xffff;
   1729	int err;
   1730
   1731	if (unlikely(ring >= adapter->max_rds_rings))
   1732		return NULL;
   1733
   1734	rds_ring = &recv_ctx->rds_rings[ring];
   1735
   1736	index = qlcnic_83xx_hndl(sts_data[0]);
   1737	if (unlikely(index >= rds_ring->num_desc))
   1738		return NULL;
   1739
   1740	buffer = &rds_ring->rx_buf_arr[index];
   1741	length = qlcnic_83xx_pktln(sts_data[0]);
   1742	cksum  = qlcnic_83xx_csum_status(sts_data[1]);
   1743	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
   1744	if (!skb)
   1745		return buffer;
   1746
   1747	if (length > rds_ring->skb_size)
   1748		skb_put(skb, rds_ring->skb_size);
   1749	else
   1750		skb_put(skb, length);
   1751
   1752	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
   1753
   1754	if (adapter->rx_mac_learn) {
   1755		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
   1756		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
   1757	}
   1758
   1759	if (unlikely(err)) {
   1760		adapter->stats.rxdropped++;
   1761		dev_kfree_skb(skb);
   1762		return buffer;
   1763	}
   1764
   1765	skb->protocol = eth_type_trans(skb, netdev);
   1766
   1767	if (qlcnic_encap_length(sts_data[1]) &&
   1768	    skb->ip_summed == CHECKSUM_UNNECESSARY) {
   1769		skb->csum_level = 1;
   1770		adapter->stats.encap_rx_csummed++;
   1771	}
   1772
   1773	if (vid != 0xffff)
   1774		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
   1775
   1776	napi_gro_receive(&sds_ring->napi, skb);
   1777
   1778	adapter->stats.rx_pkts++;
   1779	adapter->stats.rxbytes += length;
   1780
   1781	return buffer;
   1782}
   1783
   1784static struct qlcnic_rx_buffer *
   1785qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
   1786			u8 ring, u64 sts_data[])
   1787{
   1788	struct net_device *netdev = adapter->netdev;
   1789	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   1790	struct qlcnic_rx_buffer *buffer;
   1791	struct sk_buff *skb;
   1792	struct qlcnic_host_rds_ring *rds_ring;
   1793	struct iphdr *iph;
   1794	struct ipv6hdr *ipv6h;
   1795	struct tcphdr *th;
   1796	bool push;
   1797	int l2_hdr_offset, l4_hdr_offset;
   1798	int index, is_lb_pkt;
   1799	u16 lro_length, length, data_offset, gso_size;
   1800	u16 vid = 0xffff;
   1801	int err;
   1802
   1803	if (unlikely(ring >= adapter->max_rds_rings))
   1804		return NULL;
   1805
   1806	rds_ring = &recv_ctx->rds_rings[ring];
   1807
   1808	index = qlcnic_83xx_hndl(sts_data[0]);
   1809	if (unlikely(index >= rds_ring->num_desc))
   1810		return NULL;
   1811
   1812	buffer = &rds_ring->rx_buf_arr[index];
   1813
   1814	lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
   1815	l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
   1816	l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
   1817	push = qlcnic_83xx_is_psh_bit(sts_data[1]);
   1818
   1819	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
   1820	if (!skb)
   1821		return buffer;
   1822
   1823	if (qlcnic_83xx_is_tstamp(sts_data[1]))
   1824		data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
   1825	else
   1826		data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
   1827
   1828	skb_put(skb, lro_length + data_offset);
   1829	skb_pull(skb, l2_hdr_offset);
   1830
   1831	err = qlcnic_check_rx_tagging(adapter, skb, &vid);
   1832
   1833	if (adapter->rx_mac_learn) {
   1834		is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
   1835		qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
   1836	}
   1837
   1838	if (unlikely(err)) {
   1839		adapter->stats.rxdropped++;
   1840		dev_kfree_skb(skb);
   1841		return buffer;
   1842	}
   1843
   1844	skb->protocol = eth_type_trans(skb, netdev);
   1845	if (ntohs(skb->protocol) == ETH_P_IPV6) {
   1846		ipv6h = (struct ipv6hdr *)skb->data;
   1847		th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
   1848
   1849		length = (th->doff << 2) + lro_length;
   1850		ipv6h->payload_len = htons(length);
   1851	} else {
   1852		iph = (struct iphdr *)skb->data;
   1853		th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
   1854		length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
   1855		csum_replace2(&iph->check, iph->tot_len, htons(length));
   1856		iph->tot_len = htons(length);
   1857	}
   1858
   1859	th->psh = push;
   1860	length = skb->len;
   1861
   1862	if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
   1863		gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
   1864		skb_shinfo(skb)->gso_size = gso_size;
   1865		if (skb->protocol == htons(ETH_P_IPV6))
   1866			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
   1867		else
   1868			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
   1869	}
   1870
   1871	if (vid != 0xffff)
   1872		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
   1873
   1874	netif_receive_skb(skb);
   1875
   1876	adapter->stats.lro_pkts++;
   1877	adapter->stats.lrobytes += length;
   1878	return buffer;
   1879}
   1880
   1881static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
   1882					int max)
   1883{
   1884	struct qlcnic_host_rds_ring *rds_ring;
   1885	struct qlcnic_adapter *adapter = sds_ring->adapter;
   1886	struct list_head *cur;
   1887	struct status_desc *desc;
   1888	struct qlcnic_rx_buffer *rxbuf = NULL;
   1889	u8 ring;
   1890	u64 sts_data[2];
   1891	int count = 0, opcode;
   1892	u32 consumer = sds_ring->consumer;
   1893
   1894	while (count < max) {
   1895		desc = &sds_ring->desc_head[consumer];
   1896		sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
   1897		opcode = qlcnic_83xx_opcode(sts_data[1]);
   1898		if (!opcode)
   1899			break;
   1900		sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
   1901		ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
   1902
   1903		switch (opcode) {
   1904		case QLC_83XX_REG_DESC:
   1905			rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
   1906							ring, sts_data);
   1907			break;
   1908		case QLC_83XX_LRO_DESC:
   1909			rxbuf = qlcnic_83xx_process_lro(adapter, ring,
   1910							sts_data);
   1911			break;
   1912		default:
   1913			dev_info(&adapter->pdev->dev,
   1914				 "Unknown opcode: 0x%x\n", opcode);
   1915			goto skip;
   1916		}
   1917
   1918		if (likely(rxbuf))
   1919			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
   1920		else
   1921			adapter->stats.null_rxbuf++;
   1922skip:
   1923		desc = &sds_ring->desc_head[consumer];
   1924		/* Reset the descriptor */
   1925		desc->status_desc_data[1] = 0;
   1926		consumer = get_next_index(consumer, sds_ring->num_desc);
   1927		count++;
   1928	}
   1929	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
   1930		rds_ring = &adapter->recv_ctx->rds_rings[ring];
   1931		if (!list_empty(&sds_ring->free_list[ring])) {
   1932			list_for_each(cur, &sds_ring->free_list[ring]) {
   1933				rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
   1934						   list);
   1935				qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
   1936			}
   1937			spin_lock(&rds_ring->lock);
   1938			list_splice_tail_init(&sds_ring->free_list[ring],
   1939					      &rds_ring->free_list);
   1940			spin_unlock(&rds_ring->lock);
   1941		}
   1942		qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
   1943	}
   1944	if (count) {
   1945		sds_ring->consumer = consumer;
   1946		writel(consumer, sds_ring->crb_sts_consumer);
   1947	}
   1948	return count;
   1949}
   1950
   1951static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
   1952{
   1953	int tx_complete;
   1954	int work_done;
   1955	struct qlcnic_host_sds_ring *sds_ring;
   1956	struct qlcnic_adapter *adapter;
   1957	struct qlcnic_host_tx_ring *tx_ring;
   1958
   1959	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
   1960	adapter = sds_ring->adapter;
   1961	/* tx ring count = 1 */
   1962	tx_ring = adapter->tx_ring;
   1963
   1964	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
   1965	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
   1966
   1967	/* Check if we need a repoll */
   1968	if (!tx_complete)
   1969		work_done = budget;
   1970
   1971	if (work_done < budget) {
   1972		napi_complete_done(&sds_ring->napi, work_done);
   1973		qlcnic_enable_sds_intr(adapter, sds_ring);
   1974	}
   1975
   1976	return work_done;
   1977}
   1978
   1979static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
   1980{
   1981	int tx_complete;
   1982	int work_done;
   1983	struct qlcnic_host_sds_ring *sds_ring;
   1984	struct qlcnic_adapter *adapter;
   1985	struct qlcnic_host_tx_ring *tx_ring;
   1986
   1987	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
   1988	adapter = sds_ring->adapter;
   1989	/* tx ring count = 1 */
   1990	tx_ring = adapter->tx_ring;
   1991
   1992	tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
   1993	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
   1994
   1995	/* Check if we need a repoll */
   1996	if (!tx_complete)
   1997		work_done = budget;
   1998
   1999	if (work_done < budget) {
   2000		napi_complete_done(&sds_ring->napi, work_done);
   2001		qlcnic_enable_sds_intr(adapter, sds_ring);
   2002	}
   2003
   2004	return work_done;
   2005}
   2006
   2007static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
   2008{
   2009	int work_done;
   2010	struct qlcnic_host_tx_ring *tx_ring;
   2011	struct qlcnic_adapter *adapter;
   2012
   2013	tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
   2014	adapter = tx_ring->adapter;
   2015	work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
   2016	if (work_done) {
   2017		napi_complete(&tx_ring->napi);
   2018		if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
   2019			qlcnic_enable_tx_intr(adapter, tx_ring);
   2020	} else {
   2021		/* need a repoll */
   2022		work_done = budget;
   2023	}
   2024
   2025	return work_done;
   2026}
   2027
   2028static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
   2029{
   2030	int work_done;
   2031	struct qlcnic_host_sds_ring *sds_ring;
   2032	struct qlcnic_adapter *adapter;
   2033
   2034	sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
   2035	adapter = sds_ring->adapter;
   2036	work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
   2037	if (work_done < budget) {
   2038		napi_complete_done(&sds_ring->napi, work_done);
   2039		if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
   2040			qlcnic_enable_sds_intr(adapter, sds_ring);
   2041	}
   2042
   2043	return work_done;
   2044}
   2045
   2046void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
   2047{
   2048	int ring;
   2049	struct qlcnic_host_sds_ring *sds_ring;
   2050	struct qlcnic_host_tx_ring *tx_ring;
   2051	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   2052
   2053	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
   2054		return;
   2055
   2056	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   2057		sds_ring = &recv_ctx->sds_rings[ring];
   2058		napi_enable(&sds_ring->napi);
   2059		if (adapter->flags & QLCNIC_MSIX_ENABLED)
   2060			qlcnic_enable_sds_intr(adapter, sds_ring);
   2061	}
   2062
   2063	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
   2064	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
   2065		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   2066			tx_ring = &adapter->tx_ring[ring];
   2067			napi_enable(&tx_ring->napi);
   2068			qlcnic_enable_tx_intr(adapter, tx_ring);
   2069		}
   2070	}
   2071}
   2072
   2073void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
   2074{
   2075	int ring;
   2076	struct qlcnic_host_sds_ring *sds_ring;
   2077	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   2078	struct qlcnic_host_tx_ring *tx_ring;
   2079
   2080	if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
   2081		return;
   2082
   2083	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   2084		sds_ring = &recv_ctx->sds_rings[ring];
   2085		if (adapter->flags & QLCNIC_MSIX_ENABLED)
   2086			qlcnic_disable_sds_intr(adapter, sds_ring);
   2087		napi_synchronize(&sds_ring->napi);
   2088		napi_disable(&sds_ring->napi);
   2089	}
   2090
   2091	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
   2092	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
   2093		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   2094			tx_ring = &adapter->tx_ring[ring];
   2095			qlcnic_disable_tx_intr(adapter, tx_ring);
   2096			napi_synchronize(&tx_ring->napi);
   2097			napi_disable(&tx_ring->napi);
   2098		}
   2099	}
   2100}
   2101
   2102int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
   2103			 struct net_device *netdev)
   2104{
   2105	int ring;
   2106	struct qlcnic_host_sds_ring *sds_ring;
   2107	struct qlcnic_host_tx_ring *tx_ring;
   2108	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   2109
   2110	if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
   2111		return -ENOMEM;
   2112
   2113	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   2114		sds_ring = &recv_ctx->sds_rings[ring];
   2115		if (adapter->flags & QLCNIC_MSIX_ENABLED) {
   2116			if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
   2117				netif_napi_add(netdev, &sds_ring->napi,
   2118					       qlcnic_83xx_rx_poll,
   2119					       NAPI_POLL_WEIGHT);
   2120			else
   2121				netif_napi_add(netdev, &sds_ring->napi,
   2122					       qlcnic_83xx_msix_sriov_vf_poll,
   2123					       NAPI_POLL_WEIGHT);
   2124
   2125		} else {
   2126			netif_napi_add(netdev, &sds_ring->napi,
   2127				       qlcnic_83xx_poll,
   2128				       NAPI_POLL_WEIGHT);
   2129		}
   2130	}
   2131
   2132	if (qlcnic_alloc_tx_rings(adapter, netdev)) {
   2133		qlcnic_free_sds_rings(recv_ctx);
   2134		return -ENOMEM;
   2135	}
   2136
   2137	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
   2138	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
   2139		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   2140			tx_ring = &adapter->tx_ring[ring];
   2141			netif_napi_add_tx(netdev, &tx_ring->napi,
   2142					  qlcnic_83xx_msix_tx_poll);
   2143		}
   2144	}
   2145
   2146	return 0;
   2147}
   2148
   2149void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
   2150{
   2151	int ring;
   2152	struct qlcnic_host_sds_ring *sds_ring;
   2153	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   2154	struct qlcnic_host_tx_ring *tx_ring;
   2155
   2156	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
   2157		sds_ring = &recv_ctx->sds_rings[ring];
   2158		netif_napi_del(&sds_ring->napi);
   2159	}
   2160
   2161	qlcnic_free_sds_rings(adapter->recv_ctx);
   2162
   2163	if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
   2164	    !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
   2165		for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
   2166			tx_ring = &adapter->tx_ring[ring];
   2167			netif_napi_del(&tx_ring->napi);
   2168		}
   2169	}
   2170
   2171	qlcnic_free_tx_rings(adapter);
   2172}
   2173
   2174static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
   2175					 int ring, u64 sts_data[])
   2176{
   2177	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
   2178	struct sk_buff *skb;
   2179	struct qlcnic_host_rds_ring *rds_ring;
   2180	int index, length;
   2181
   2182	if (unlikely(ring >= adapter->max_rds_rings))
   2183		return;
   2184
   2185	rds_ring = &recv_ctx->rds_rings[ring];
   2186	index = qlcnic_83xx_hndl(sts_data[0]);
   2187	if (unlikely(index >= rds_ring->num_desc))
   2188		return;
   2189
   2190	length = qlcnic_83xx_pktln(sts_data[0]);
   2191
   2192	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
   2193	if (!skb)
   2194		return;
   2195
   2196	if (length > rds_ring->skb_size)
   2197		skb_put(skb, rds_ring->skb_size);
   2198	else
   2199		skb_put(skb, length);
   2200
   2201	if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
   2202		adapter->ahw->diag_cnt++;
   2203	else
   2204		dump_skb(skb, adapter);
   2205
   2206	dev_kfree_skb_any(skb);
   2207	return;
   2208}
   2209
   2210void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
   2211{
   2212	struct qlcnic_adapter *adapter = sds_ring->adapter;
   2213	struct status_desc *desc;
   2214	u64 sts_data[2];
   2215	int ring, opcode;
   2216	u32 consumer = sds_ring->consumer;
   2217
   2218	desc = &sds_ring->desc_head[consumer];
   2219	sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
   2220	sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
   2221	opcode = qlcnic_83xx_opcode(sts_data[1]);
   2222	if (!opcode)
   2223		return;
   2224
   2225	ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
   2226	qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
   2227	desc = &sds_ring->desc_head[consumer];
   2228	desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
   2229	consumer = get_next_index(consumer, sds_ring->num_desc);
   2230	sds_ring->consumer = consumer;
   2231	writel(consumer, sds_ring->crb_sts_consumer);
   2232}