cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnad.c (94176B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Linux network driver for QLogic BR-series Converged Network Adapter.
      4 */
      5/*
      6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
      7 * Copyright (c) 2014-2015 QLogic Corporation
      8 * All rights reserved
      9 * www.qlogic.com
     10 */
     11#include <linux/bitops.h>
     12#include <linux/netdevice.h>
     13#include <linux/skbuff.h>
     14#include <linux/etherdevice.h>
     15#include <linux/in.h>
     16#include <linux/ethtool.h>
     17#include <linux/if_vlan.h>
     18#include <linux/if_ether.h>
     19#include <linux/ip.h>
     20#include <linux/prefetch.h>
     21#include <linux/module.h>
     22
     23#include "bnad.h"
     24#include "bna.h"
     25#include "cna.h"
     26
     27static DEFINE_MUTEX(bnad_fwimg_mutex);
     28
     29/*
     30 * Module params
     31 */
     32static uint bnad_msix_disable;
     33module_param(bnad_msix_disable, uint, 0444);
     34MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
     35
     36static uint bnad_ioc_auto_recover = 1;
     37module_param(bnad_ioc_auto_recover, uint, 0444);
     38MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
     39
     40static uint bna_debugfs_enable = 1;
     41module_param(bna_debugfs_enable, uint, 0644);
     42MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
     43		 " Range[false:0|true:1]");
     44
     45/*
     46 * Global variables
     47 */
     48static u32 bnad_rxqs_per_cq = 2;
     49static atomic_t bna_id;
     50static const u8 bnad_bcast_addr[] __aligned(2) =
     51	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
     52
     53/*
     54 * Local MACROS
     55 */
     56#define BNAD_GET_MBOX_IRQ(_bnad)				\
     57	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
     58	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
     59	 ((_bnad)->pcidev->irq))
     60
     61#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
     62do {								\
     63	(_res_info)->res_type = BNA_RES_T_MEM;			\
     64	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
     65	(_res_info)->res_u.mem_info.num = (_num);		\
     66	(_res_info)->res_u.mem_info.len = (_size);		\
     67} while (0)
     68
     69/*
     70 * Reinitialize completions in CQ, once Rx is taken down
     71 */
     72static void
     73bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
     74{
     75	struct bna_cq_entry *cmpl;
     76	int i;
     77
     78	for (i = 0; i < ccb->q_depth; i++) {
     79		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
     80		cmpl->valid = 0;
     81	}
     82}
     83
     84/* Tx Datapath functions */
     85
     86
     87/* Caller should ensure that the entry at unmap_q[index] is valid */
     88static u32
     89bnad_tx_buff_unmap(struct bnad *bnad,
     90			      struct bnad_tx_unmap *unmap_q,
     91			      u32 q_depth, u32 index)
     92{
     93	struct bnad_tx_unmap *unmap;
     94	struct sk_buff *skb;
     95	int vector, nvecs;
     96
     97	unmap = &unmap_q[index];
     98	nvecs = unmap->nvecs;
     99
    100	skb = unmap->skb;
    101	unmap->skb = NULL;
    102	unmap->nvecs = 0;
    103	dma_unmap_single(&bnad->pcidev->dev,
    104		dma_unmap_addr(&unmap->vectors[0], dma_addr),
    105		skb_headlen(skb), DMA_TO_DEVICE);
    106	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
    107	nvecs--;
    108
    109	vector = 0;
    110	while (nvecs) {
    111		vector++;
    112		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
    113			vector = 0;
    114			BNA_QE_INDX_INC(index, q_depth);
    115			unmap = &unmap_q[index];
    116		}
    117
    118		dma_unmap_page(&bnad->pcidev->dev,
    119			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
    120			dma_unmap_len(&unmap->vectors[vector], dma_len),
    121			DMA_TO_DEVICE);
    122		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
    123		nvecs--;
    124	}
    125
    126	BNA_QE_INDX_INC(index, q_depth);
    127
    128	return index;
    129}
    130
    131/*
    132 * Frees all pending Tx Bufs
    133 * At this point no activity is expected on the Q,
    134 * so DMA unmap & freeing is fine.
    135 */
    136static void
    137bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
    138{
    139	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
    140	struct sk_buff *skb;
    141	int i;
    142
    143	for (i = 0; i < tcb->q_depth; i++) {
    144		skb = unmap_q[i].skb;
    145		if (!skb)
    146			continue;
    147		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
    148
    149		dev_kfree_skb_any(skb);
    150	}
    151}
    152
    153/*
    154 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
    155 * Can be called in a) Interrupt context
    156 *		    b) Sending context
    157 */
    158static u32
    159bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
    160{
    161	u32 sent_packets = 0, sent_bytes = 0;
    162	u32 wis, unmap_wis, hw_cons, cons, q_depth;
    163	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
    164	struct bnad_tx_unmap *unmap;
    165	struct sk_buff *skb;
    166
    167	/* Just return if TX is stopped */
    168	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
    169		return 0;
    170
    171	hw_cons = *(tcb->hw_consumer_index);
    172	rmb();
    173	cons = tcb->consumer_index;
    174	q_depth = tcb->q_depth;
    175
    176	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
    177	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
    178
    179	while (wis) {
    180		unmap = &unmap_q[cons];
    181
    182		skb = unmap->skb;
    183
    184		sent_packets++;
    185		sent_bytes += skb->len;
    186
    187		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
    188		wis -= unmap_wis;
    189
    190		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
    191		dev_kfree_skb_any(skb);
    192	}
    193
    194	/* Update consumer pointers. */
    195	tcb->consumer_index = hw_cons;
    196
    197	tcb->txq->tx_packets += sent_packets;
    198	tcb->txq->tx_bytes += sent_bytes;
    199
    200	return sent_packets;
    201}
    202
    203static u32
    204bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
    205{
    206	struct net_device *netdev = bnad->netdev;
    207	u32 sent = 0;
    208
    209	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
    210		return 0;
    211
    212	sent = bnad_txcmpl_process(bnad, tcb);
    213	if (sent) {
    214		if (netif_queue_stopped(netdev) &&
    215		    netif_carrier_ok(netdev) &&
    216		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
    217				    BNAD_NETIF_WAKE_THRESHOLD) {
    218			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
    219				netif_wake_queue(netdev);
    220				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
    221			}
    222		}
    223	}
    224
    225	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
    226		bna_ib_ack(tcb->i_dbell, sent);
    227
    228	smp_mb__before_atomic();
    229	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
    230
    231	return sent;
    232}
    233
    234/* MSIX Tx Completion Handler */
    235static irqreturn_t
    236bnad_msix_tx(int irq, void *data)
    237{
    238	struct bna_tcb *tcb = (struct bna_tcb *)data;
    239	struct bnad *bnad = tcb->bnad;
    240
    241	bnad_tx_complete(bnad, tcb);
    242
    243	return IRQ_HANDLED;
    244}
    245
    246static inline void
    247bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
    248{
    249	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    250
    251	unmap_q->reuse_pi = -1;
    252	unmap_q->alloc_order = -1;
    253	unmap_q->map_size = 0;
    254	unmap_q->type = BNAD_RXBUF_NONE;
    255}
    256
    257/* Default is page-based allocation. Multi-buffer support - TBD */
    258static int
    259bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
    260{
    261	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    262	int order;
    263
    264	bnad_rxq_alloc_uninit(bnad, rcb);
    265
    266	order = get_order(rcb->rxq->buffer_size);
    267
    268	unmap_q->type = BNAD_RXBUF_PAGE;
    269
    270	if (bna_is_small_rxq(rcb->id)) {
    271		unmap_q->alloc_order = 0;
    272		unmap_q->map_size = rcb->rxq->buffer_size;
    273	} else {
    274		if (rcb->rxq->multi_buffer) {
    275			unmap_q->alloc_order = 0;
    276			unmap_q->map_size = rcb->rxq->buffer_size;
    277			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
    278		} else {
    279			unmap_q->alloc_order = order;
    280			unmap_q->map_size =
    281				(rcb->rxq->buffer_size > 2048) ?
    282				PAGE_SIZE << order : 2048;
    283		}
    284	}
    285
    286	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
    287
    288	return 0;
    289}
    290
    291static inline void
    292bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
    293{
    294	if (!unmap->page)
    295		return;
    296
    297	dma_unmap_page(&bnad->pcidev->dev,
    298			dma_unmap_addr(&unmap->vector, dma_addr),
    299			unmap->vector.len, DMA_FROM_DEVICE);
    300	put_page(unmap->page);
    301	unmap->page = NULL;
    302	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
    303	unmap->vector.len = 0;
    304}
    305
    306static inline void
    307bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
    308{
    309	if (!unmap->skb)
    310		return;
    311
    312	dma_unmap_single(&bnad->pcidev->dev,
    313			dma_unmap_addr(&unmap->vector, dma_addr),
    314			unmap->vector.len, DMA_FROM_DEVICE);
    315	dev_kfree_skb_any(unmap->skb);
    316	unmap->skb = NULL;
    317	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
    318	unmap->vector.len = 0;
    319}
    320
    321static void
    322bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
    323{
    324	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    325	int i;
    326
    327	for (i = 0; i < rcb->q_depth; i++) {
    328		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
    329
    330		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
    331			bnad_rxq_cleanup_skb(bnad, unmap);
    332		else
    333			bnad_rxq_cleanup_page(bnad, unmap);
    334	}
    335	bnad_rxq_alloc_uninit(bnad, rcb);
    336}
    337
    338static u32
    339bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
    340{
    341	u32 alloced, prod, q_depth;
    342	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    343	struct bnad_rx_unmap *unmap, *prev;
    344	struct bna_rxq_entry *rxent;
    345	struct page *page;
    346	u32 page_offset, alloc_size;
    347	dma_addr_t dma_addr;
    348
    349	prod = rcb->producer_index;
    350	q_depth = rcb->q_depth;
    351
    352	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
    353	alloced = 0;
    354
    355	while (nalloc--) {
    356		unmap = &unmap_q->unmap[prod];
    357
    358		if (unmap_q->reuse_pi < 0) {
    359			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
    360					unmap_q->alloc_order);
    361			page_offset = 0;
    362		} else {
    363			prev = &unmap_q->unmap[unmap_q->reuse_pi];
    364			page = prev->page;
    365			page_offset = prev->page_offset + unmap_q->map_size;
    366			get_page(page);
    367		}
    368
    369		if (unlikely(!page)) {
    370			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
    371			rcb->rxq->rxbuf_alloc_failed++;
    372			goto finishing;
    373		}
    374
    375		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
    376					unmap_q->map_size, DMA_FROM_DEVICE);
    377		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
    378			put_page(page);
    379			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
    380			rcb->rxq->rxbuf_map_failed++;
    381			goto finishing;
    382		}
    383
    384		unmap->page = page;
    385		unmap->page_offset = page_offset;
    386		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
    387		unmap->vector.len = unmap_q->map_size;
    388		page_offset += unmap_q->map_size;
    389
    390		if (page_offset < alloc_size)
    391			unmap_q->reuse_pi = prod;
    392		else
    393			unmap_q->reuse_pi = -1;
    394
    395		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
    396		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
    397		BNA_QE_INDX_INC(prod, q_depth);
    398		alloced++;
    399	}
    400
    401finishing:
    402	if (likely(alloced)) {
    403		rcb->producer_index = prod;
    404		smp_mb();
    405		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
    406			bna_rxq_prod_indx_doorbell(rcb);
    407	}
    408
    409	return alloced;
    410}
    411
    412static u32
    413bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
    414{
    415	u32 alloced, prod, q_depth, buff_sz;
    416	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    417	struct bnad_rx_unmap *unmap;
    418	struct bna_rxq_entry *rxent;
    419	struct sk_buff *skb;
    420	dma_addr_t dma_addr;
    421
    422	buff_sz = rcb->rxq->buffer_size;
    423	prod = rcb->producer_index;
    424	q_depth = rcb->q_depth;
    425
    426	alloced = 0;
    427	while (nalloc--) {
    428		unmap = &unmap_q->unmap[prod];
    429
    430		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
    431
    432		if (unlikely(!skb)) {
    433			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
    434			rcb->rxq->rxbuf_alloc_failed++;
    435			goto finishing;
    436		}
    437
    438		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
    439					  buff_sz, DMA_FROM_DEVICE);
    440		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
    441			dev_kfree_skb_any(skb);
    442			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
    443			rcb->rxq->rxbuf_map_failed++;
    444			goto finishing;
    445		}
    446
    447		unmap->skb = skb;
    448		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
    449		unmap->vector.len = buff_sz;
    450
    451		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
    452		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
    453		BNA_QE_INDX_INC(prod, q_depth);
    454		alloced++;
    455	}
    456
    457finishing:
    458	if (likely(alloced)) {
    459		rcb->producer_index = prod;
    460		smp_mb();
    461		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
    462			bna_rxq_prod_indx_doorbell(rcb);
    463	}
    464
    465	return alloced;
    466}
    467
    468static inline void
    469bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
    470{
    471	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
    472	u32 to_alloc;
    473
    474	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
    475	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
    476		return;
    477
    478	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
    479		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
    480	else
    481		bnad_rxq_refill_page(bnad, rcb, to_alloc);
    482}
    483
    484#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
    485					BNA_CQ_EF_IPV6 | \
    486					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
    487					BNA_CQ_EF_L4_CKSUM_OK)
    488
    489#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
    490				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
    491#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
    492				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
    493#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
    494				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
    495#define flags_udp6 (BNA_CQ_EF_IPV6 | \
    496				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
    497
    498static void
    499bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
    500		    u32 sop_ci, u32 nvecs)
    501{
    502	struct bnad_rx_unmap_q *unmap_q;
    503	struct bnad_rx_unmap *unmap;
    504	u32 ci, vec;
    505
    506	unmap_q = rcb->unmap_q;
    507	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
    508		unmap = &unmap_q->unmap[ci];
    509		BNA_QE_INDX_INC(ci, rcb->q_depth);
    510
    511		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
    512			bnad_rxq_cleanup_skb(bnad, unmap);
    513		else
    514			bnad_rxq_cleanup_page(bnad, unmap);
    515	}
    516}
    517
    518static void
    519bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
    520{
    521	struct bna_rcb *rcb;
    522	struct bnad *bnad;
    523	struct bnad_rx_unmap_q *unmap_q;
    524	struct bna_cq_entry *cq, *cmpl;
    525	u32 ci, pi, totlen = 0;
    526
    527	cq = ccb->sw_q;
    528	pi = ccb->producer_index;
    529	cmpl = &cq[pi];
    530
    531	rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
    532	unmap_q = rcb->unmap_q;
    533	bnad = rcb->bnad;
    534	ci = rcb->consumer_index;
    535
    536	/* prefetch header */
    537	prefetch(page_address(unmap_q->unmap[ci].page) +
    538		 unmap_q->unmap[ci].page_offset);
    539
    540	while (nvecs--) {
    541		struct bnad_rx_unmap *unmap;
    542		u32 len;
    543
    544		unmap = &unmap_q->unmap[ci];
    545		BNA_QE_INDX_INC(ci, rcb->q_depth);
    546
    547		dma_unmap_page(&bnad->pcidev->dev,
    548			       dma_unmap_addr(&unmap->vector, dma_addr),
    549			       unmap->vector.len, DMA_FROM_DEVICE);
    550
    551		len = ntohs(cmpl->length);
    552		skb->truesize += unmap->vector.len;
    553		totlen += len;
    554
    555		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
    556				   unmap->page, unmap->page_offset, len);
    557
    558		unmap->page = NULL;
    559		unmap->vector.len = 0;
    560
    561		BNA_QE_INDX_INC(pi, ccb->q_depth);
    562		cmpl = &cq[pi];
    563	}
    564
    565	skb->len += totlen;
    566	skb->data_len += totlen;
    567}
    568
    569static inline void
    570bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
    571		  struct bnad_rx_unmap *unmap, u32 len)
    572{
    573	prefetch(skb->data);
    574
    575	dma_unmap_single(&bnad->pcidev->dev,
    576			dma_unmap_addr(&unmap->vector, dma_addr),
    577			unmap->vector.len, DMA_FROM_DEVICE);
    578
    579	skb_put(skb, len);
    580	skb->protocol = eth_type_trans(skb, bnad->netdev);
    581
    582	unmap->skb = NULL;
    583	unmap->vector.len = 0;
    584}
    585
    586static u32
    587bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
    588{
    589	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
    590	struct bna_rcb *rcb = NULL;
    591	struct bnad_rx_unmap_q *unmap_q;
    592	struct bnad_rx_unmap *unmap = NULL;
    593	struct sk_buff *skb = NULL;
    594	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
    595	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
    596	u32 packets = 0, len = 0, totlen = 0;
    597	u32 pi, vec, sop_ci = 0, nvecs = 0;
    598	u32 flags, masked_flags;
    599
    600	prefetch(bnad->netdev);
    601
    602	cq = ccb->sw_q;
    603
    604	while (packets < budget) {
    605		cmpl = &cq[ccb->producer_index];
    606		if (!cmpl->valid)
    607			break;
    608		/* The 'valid' field is set by the adapter, only after writing
    609		 * the other fields of completion entry. Hence, do not load
    610		 * other fields of completion entry *before* the 'valid' is
    611		 * loaded. Adding the rmb() here prevents the compiler and/or
    612		 * CPU from reordering the reads which would potentially result
    613		 * in reading stale values in completion entry.
    614		 */
    615		rmb();
    616
    617		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
    618
    619		if (bna_is_small_rxq(cmpl->rxq_id))
    620			rcb = ccb->rcb[1];
    621		else
    622			rcb = ccb->rcb[0];
    623
    624		unmap_q = rcb->unmap_q;
    625
    626		/* start of packet ci */
    627		sop_ci = rcb->consumer_index;
    628
    629		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
    630			unmap = &unmap_q->unmap[sop_ci];
    631			skb = unmap->skb;
    632		} else {
    633			skb = napi_get_frags(&rx_ctrl->napi);
    634			if (unlikely(!skb))
    635				break;
    636		}
    637		prefetch(skb);
    638
    639		flags = ntohl(cmpl->flags);
    640		len = ntohs(cmpl->length);
    641		totlen = len;
    642		nvecs = 1;
    643
    644		/* Check all the completions for this frame.
    645		 * busy-wait doesn't help much, break here.
    646		 */
    647		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
    648		    (flags & BNA_CQ_EF_EOP) == 0) {
    649			pi = ccb->producer_index;
    650			do {
    651				BNA_QE_INDX_INC(pi, ccb->q_depth);
    652				next_cmpl = &cq[pi];
    653
    654				if (!next_cmpl->valid)
    655					break;
    656				/* The 'valid' field is set by the adapter, only
    657				 * after writing the other fields of completion
    658				 * entry. Hence, do not load other fields of
    659				 * completion entry *before* the 'valid' is
    660				 * loaded. Adding the rmb() here prevents the
    661				 * compiler and/or CPU from reordering the reads
    662				 * which would potentially result in reading
    663				 * stale values in completion entry.
    664				 */
    665				rmb();
    666
    667				len = ntohs(next_cmpl->length);
    668				flags = ntohl(next_cmpl->flags);
    669
    670				nvecs++;
    671				totlen += len;
    672			} while ((flags & BNA_CQ_EF_EOP) == 0);
    673
    674			if (!next_cmpl->valid)
    675				break;
    676		}
    677		packets++;
    678
    679		/* TODO: BNA_CQ_EF_LOCAL ? */
    680		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
    681						BNA_CQ_EF_FCS_ERROR |
    682						BNA_CQ_EF_TOO_LONG))) {
    683			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
    684			rcb->rxq->rx_packets_with_error++;
    685
    686			goto next;
    687		}
    688
    689		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
    690			bnad_cq_setup_skb(bnad, skb, unmap, len);
    691		else
    692			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
    693
    694		rcb->rxq->rx_packets++;
    695		rcb->rxq->rx_bytes += totlen;
    696		ccb->bytes_per_intr += totlen;
    697
    698		masked_flags = flags & flags_cksum_prot_mask;
    699
    700		if (likely
    701		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
    702		     ((masked_flags == flags_tcp4) ||
    703		      (masked_flags == flags_udp4) ||
    704		      (masked_flags == flags_tcp6) ||
    705		      (masked_flags == flags_udp6))))
    706			skb->ip_summed = CHECKSUM_UNNECESSARY;
    707		else
    708			skb_checksum_none_assert(skb);
    709
    710		if ((flags & BNA_CQ_EF_VLAN) &&
    711		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
    712			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
    713
    714		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
    715			netif_receive_skb(skb);
    716		else
    717			napi_gro_frags(&rx_ctrl->napi);
    718
    719next:
    720		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
    721		for (vec = 0; vec < nvecs; vec++) {
    722			cmpl = &cq[ccb->producer_index];
    723			cmpl->valid = 0;
    724			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
    725		}
    726	}
    727
    728	napi_gro_flush(&rx_ctrl->napi, false);
    729	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
    730		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
    731
    732	bnad_rxq_post(bnad, ccb->rcb[0]);
    733	if (ccb->rcb[1])
    734		bnad_rxq_post(bnad, ccb->rcb[1]);
    735
    736	return packets;
    737}
    738
    739static void
    740bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
    741{
    742	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
    743	struct napi_struct *napi = &rx_ctrl->napi;
    744
    745	if (likely(napi_schedule_prep(napi))) {
    746		__napi_schedule(napi);
    747		rx_ctrl->rx_schedule++;
    748	}
    749}
    750
    751/* MSIX Rx Path Handler */
    752static irqreturn_t
    753bnad_msix_rx(int irq, void *data)
    754{
    755	struct bna_ccb *ccb = (struct bna_ccb *)data;
    756
    757	if (ccb) {
    758		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
    759		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
    760	}
    761
    762	return IRQ_HANDLED;
    763}
    764
    765/* Interrupt handlers */
    766
    767/* Mbox Interrupt Handlers */
    768static irqreturn_t
    769bnad_msix_mbox_handler(int irq, void *data)
    770{
    771	u32 intr_status;
    772	unsigned long flags;
    773	struct bnad *bnad = (struct bnad *)data;
    774
    775	spin_lock_irqsave(&bnad->bna_lock, flags);
    776	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
    777		spin_unlock_irqrestore(&bnad->bna_lock, flags);
    778		return IRQ_HANDLED;
    779	}
    780
    781	bna_intr_status_get(&bnad->bna, intr_status);
    782
    783	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
    784		bna_mbox_handler(&bnad->bna, intr_status);
    785
    786	spin_unlock_irqrestore(&bnad->bna_lock, flags);
    787
    788	return IRQ_HANDLED;
    789}
    790
    791static irqreturn_t
    792bnad_isr(int irq, void *data)
    793{
    794	int i, j;
    795	u32 intr_status;
    796	unsigned long flags;
    797	struct bnad *bnad = (struct bnad *)data;
    798	struct bnad_rx_info *rx_info;
    799	struct bnad_rx_ctrl *rx_ctrl;
    800	struct bna_tcb *tcb = NULL;
    801
    802	spin_lock_irqsave(&bnad->bna_lock, flags);
    803	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
    804		spin_unlock_irqrestore(&bnad->bna_lock, flags);
    805		return IRQ_NONE;
    806	}
    807
    808	bna_intr_status_get(&bnad->bna, intr_status);
    809
    810	if (unlikely(!intr_status)) {
    811		spin_unlock_irqrestore(&bnad->bna_lock, flags);
    812		return IRQ_NONE;
    813	}
    814
    815	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
    816		bna_mbox_handler(&bnad->bna, intr_status);
    817
    818	spin_unlock_irqrestore(&bnad->bna_lock, flags);
    819
    820	if (!BNA_IS_INTX_DATA_INTR(intr_status))
    821		return IRQ_HANDLED;
    822
    823	/* Process data interrupts */
    824	/* Tx processing */
    825	for (i = 0; i < bnad->num_tx; i++) {
    826		for (j = 0; j < bnad->num_txq_per_tx; j++) {
    827			tcb = bnad->tx_info[i].tcb[j];
    828			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
    829				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
    830		}
    831	}
    832	/* Rx processing */
    833	for (i = 0; i < bnad->num_rx; i++) {
    834		rx_info = &bnad->rx_info[i];
    835		if (!rx_info->rx)
    836			continue;
    837		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
    838			rx_ctrl = &rx_info->rx_ctrl[j];
    839			if (rx_ctrl->ccb)
    840				bnad_netif_rx_schedule_poll(bnad,
    841							    rx_ctrl->ccb);
    842		}
    843	}
    844	return IRQ_HANDLED;
    845}
    846
    847/*
    848 * Called in interrupt / callback context
    849 * with bna_lock held, so cfg_flags access is OK
    850 */
    851static void
    852bnad_enable_mbox_irq(struct bnad *bnad)
    853{
    854	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
    855
    856	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
    857}
    858
    859/*
    860 * Called with bnad->bna_lock held b'cos of
    861 * bnad->cfg_flags access.
    862 */
    863static void
    864bnad_disable_mbox_irq(struct bnad *bnad)
    865{
    866	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
    867
    868	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
    869}
    870
    871static void
    872bnad_set_netdev_perm_addr(struct bnad *bnad)
    873{
    874	struct net_device *netdev = bnad->netdev;
    875
    876	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
    877	if (is_zero_ether_addr(netdev->dev_addr))
    878		eth_hw_addr_set(netdev, bnad->perm_addr);
    879}
    880
    881/* Control Path Handlers */
    882
    883/* Callbacks */
    884void
    885bnad_cb_mbox_intr_enable(struct bnad *bnad)
    886{
    887	bnad_enable_mbox_irq(bnad);
    888}
    889
    890void
    891bnad_cb_mbox_intr_disable(struct bnad *bnad)
    892{
    893	bnad_disable_mbox_irq(bnad);
    894}
    895
    896void
    897bnad_cb_ioceth_ready(struct bnad *bnad)
    898{
    899	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
    900	complete(&bnad->bnad_completions.ioc_comp);
    901}
    902
    903void
    904bnad_cb_ioceth_failed(struct bnad *bnad)
    905{
    906	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
    907	complete(&bnad->bnad_completions.ioc_comp);
    908}
    909
    910void
    911bnad_cb_ioceth_disabled(struct bnad *bnad)
    912{
    913	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
    914	complete(&bnad->bnad_completions.ioc_comp);
    915}
    916
    917static void
    918bnad_cb_enet_disabled(void *arg)
    919{
    920	struct bnad *bnad = (struct bnad *)arg;
    921
    922	netif_carrier_off(bnad->netdev);
    923	complete(&bnad->bnad_completions.enet_comp);
    924}
    925
    926void
    927bnad_cb_ethport_link_status(struct bnad *bnad,
    928			enum bna_link_status link_status)
    929{
    930	bool link_up = false;
    931
    932	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
    933
    934	if (link_status == BNA_CEE_UP) {
    935		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
    936			BNAD_UPDATE_CTR(bnad, cee_toggle);
    937		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
    938	} else {
    939		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
    940			BNAD_UPDATE_CTR(bnad, cee_toggle);
    941		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
    942	}
    943
    944	if (link_up) {
    945		if (!netif_carrier_ok(bnad->netdev)) {
    946			uint tx_id, tcb_id;
    947			netdev_info(bnad->netdev, "link up\n");
    948			netif_carrier_on(bnad->netdev);
    949			BNAD_UPDATE_CTR(bnad, link_toggle);
    950			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
    951				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
    952				      tcb_id++) {
    953					struct bna_tcb *tcb =
    954					bnad->tx_info[tx_id].tcb[tcb_id];
    955					u32 txq_id;
    956					if (!tcb)
    957						continue;
    958
    959					txq_id = tcb->id;
    960
    961					if (test_bit(BNAD_TXQ_TX_STARTED,
    962						     &tcb->flags)) {
    963						/*
    964						 * Force an immediate
    965						 * Transmit Schedule */
    966						netif_wake_subqueue(
    967								bnad->netdev,
    968								txq_id);
    969						BNAD_UPDATE_CTR(bnad,
    970							netif_queue_wakeup);
    971					} else {
    972						netif_stop_subqueue(
    973								bnad->netdev,
    974								txq_id);
    975						BNAD_UPDATE_CTR(bnad,
    976							netif_queue_stop);
    977					}
    978				}
    979			}
    980		}
    981	} else {
    982		if (netif_carrier_ok(bnad->netdev)) {
    983			netdev_info(bnad->netdev, "link down\n");
    984			netif_carrier_off(bnad->netdev);
    985			BNAD_UPDATE_CTR(bnad, link_toggle);
    986		}
    987	}
    988}
    989
    990static void
    991bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
    992{
    993	struct bnad *bnad = (struct bnad *)arg;
    994
    995	complete(&bnad->bnad_completions.tx_comp);
    996}
    997
    998static void
    999bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
   1000{
   1001	struct bnad_tx_info *tx_info =
   1002			(struct bnad_tx_info *)tcb->txq->tx->priv;
   1003
   1004	tcb->priv = tcb;
   1005	tx_info->tcb[tcb->id] = tcb;
   1006}
   1007
   1008static void
   1009bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
   1010{
   1011	struct bnad_tx_info *tx_info =
   1012			(struct bnad_tx_info *)tcb->txq->tx->priv;
   1013
   1014	tx_info->tcb[tcb->id] = NULL;
   1015	tcb->priv = NULL;
   1016}
   1017
   1018static void
   1019bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
   1020{
   1021	struct bnad_rx_info *rx_info =
   1022			(struct bnad_rx_info *)ccb->cq->rx->priv;
   1023
   1024	rx_info->rx_ctrl[ccb->id].ccb = ccb;
   1025	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
   1026}
   1027
   1028static void
   1029bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
   1030{
   1031	struct bnad_rx_info *rx_info =
   1032			(struct bnad_rx_info *)ccb->cq->rx->priv;
   1033
   1034	rx_info->rx_ctrl[ccb->id].ccb = NULL;
   1035}
   1036
   1037static void
   1038bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
   1039{
   1040	struct bnad_tx_info *tx_info =
   1041			(struct bnad_tx_info *)tx->priv;
   1042	struct bna_tcb *tcb;
   1043	u32 txq_id;
   1044	int i;
   1045
   1046	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
   1047		tcb = tx_info->tcb[i];
   1048		if (!tcb)
   1049			continue;
   1050		txq_id = tcb->id;
   1051		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
   1052		netif_stop_subqueue(bnad->netdev, txq_id);
   1053	}
   1054}
   1055
   1056static void
   1057bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
   1058{
   1059	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
   1060	struct bna_tcb *tcb;
   1061	u32 txq_id;
   1062	int i;
   1063
   1064	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
   1065		tcb = tx_info->tcb[i];
   1066		if (!tcb)
   1067			continue;
   1068		txq_id = tcb->id;
   1069
   1070		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
   1071		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
   1072		BUG_ON(*(tcb->hw_consumer_index) != 0);
   1073
   1074		if (netif_carrier_ok(bnad->netdev)) {
   1075			netif_wake_subqueue(bnad->netdev, txq_id);
   1076			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
   1077		}
   1078	}
   1079
   1080	/*
   1081	 * Workaround for first ioceth enable failure & we
   1082	 * get a 0 MAC address. We try to get the MAC address
   1083	 * again here.
   1084	 */
   1085	if (is_zero_ether_addr(bnad->perm_addr)) {
   1086		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
   1087		bnad_set_netdev_perm_addr(bnad);
   1088	}
   1089}
   1090
   1091/*
   1092 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
   1093 */
   1094static void
   1095bnad_tx_cleanup(struct delayed_work *work)
   1096{
   1097	struct bnad_tx_info *tx_info =
   1098		container_of(work, struct bnad_tx_info, tx_cleanup_work);
   1099	struct bnad *bnad = NULL;
   1100	struct bna_tcb *tcb;
   1101	unsigned long flags;
   1102	u32 i, pending = 0;
   1103
   1104	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
   1105		tcb = tx_info->tcb[i];
   1106		if (!tcb)
   1107			continue;
   1108
   1109		bnad = tcb->bnad;
   1110
   1111		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
   1112			pending++;
   1113			continue;
   1114		}
   1115
   1116		bnad_txq_cleanup(bnad, tcb);
   1117
   1118		smp_mb__before_atomic();
   1119		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
   1120	}
   1121
   1122	if (pending) {
   1123		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
   1124			msecs_to_jiffies(1));
   1125		return;
   1126	}
   1127
   1128	spin_lock_irqsave(&bnad->bna_lock, flags);
   1129	bna_tx_cleanup_complete(tx_info->tx);
   1130	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1131}
   1132
   1133static void
   1134bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
   1135{
   1136	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
   1137	struct bna_tcb *tcb;
   1138	int i;
   1139
   1140	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
   1141		tcb = tx_info->tcb[i];
   1142		if (!tcb)
   1143			continue;
   1144	}
   1145
   1146	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
   1147}
   1148
   1149static void
   1150bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
   1151{
   1152	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
   1153	struct bna_ccb *ccb;
   1154	struct bnad_rx_ctrl *rx_ctrl;
   1155	int i;
   1156
   1157	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
   1158		rx_ctrl = &rx_info->rx_ctrl[i];
   1159		ccb = rx_ctrl->ccb;
   1160		if (!ccb)
   1161			continue;
   1162
   1163		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
   1164
   1165		if (ccb->rcb[1])
   1166			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
   1167	}
   1168}
   1169
   1170/*
   1171 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
   1172 */
   1173static void
   1174bnad_rx_cleanup(void *work)
   1175{
   1176	struct bnad_rx_info *rx_info =
   1177		container_of(work, struct bnad_rx_info, rx_cleanup_work);
   1178	struct bnad_rx_ctrl *rx_ctrl;
   1179	struct bnad *bnad = NULL;
   1180	unsigned long flags;
   1181	u32 i;
   1182
   1183	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
   1184		rx_ctrl = &rx_info->rx_ctrl[i];
   1185
   1186		if (!rx_ctrl->ccb)
   1187			continue;
   1188
   1189		bnad = rx_ctrl->ccb->bnad;
   1190
   1191		/*
   1192		 * Wait till the poll handler has exited
   1193		 * and nothing can be scheduled anymore
   1194		 */
   1195		napi_disable(&rx_ctrl->napi);
   1196
   1197		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
   1198		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
   1199		if (rx_ctrl->ccb->rcb[1])
   1200			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
   1201	}
   1202
   1203	spin_lock_irqsave(&bnad->bna_lock, flags);
   1204	bna_rx_cleanup_complete(rx_info->rx);
   1205	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1206}
   1207
   1208static void
   1209bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
   1210{
   1211	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
   1212	struct bna_ccb *ccb;
   1213	struct bnad_rx_ctrl *rx_ctrl;
   1214	int i;
   1215
   1216	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
   1217		rx_ctrl = &rx_info->rx_ctrl[i];
   1218		ccb = rx_ctrl->ccb;
   1219		if (!ccb)
   1220			continue;
   1221
   1222		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
   1223
   1224		if (ccb->rcb[1])
   1225			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
   1226	}
   1227
   1228	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
   1229}
   1230
   1231static void
   1232bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
   1233{
   1234	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
   1235	struct bna_ccb *ccb;
   1236	struct bna_rcb *rcb;
   1237	struct bnad_rx_ctrl *rx_ctrl;
   1238	int i, j;
   1239
   1240	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
   1241		rx_ctrl = &rx_info->rx_ctrl[i];
   1242		ccb = rx_ctrl->ccb;
   1243		if (!ccb)
   1244			continue;
   1245
   1246		napi_enable(&rx_ctrl->napi);
   1247
   1248		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
   1249			rcb = ccb->rcb[j];
   1250			if (!rcb)
   1251				continue;
   1252
   1253			bnad_rxq_alloc_init(bnad, rcb);
   1254			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
   1255			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
   1256			bnad_rxq_post(bnad, rcb);
   1257		}
   1258	}
   1259}
   1260
   1261static void
   1262bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
   1263{
   1264	struct bnad *bnad = (struct bnad *)arg;
   1265
   1266	complete(&bnad->bnad_completions.rx_comp);
   1267}
   1268
   1269static void
   1270bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
   1271{
   1272	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
   1273	complete(&bnad->bnad_completions.mcast_comp);
   1274}
   1275
   1276void
   1277bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
   1278		       struct bna_stats *stats)
   1279{
   1280	if (status == BNA_CB_SUCCESS)
   1281		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
   1282
   1283	if (!netif_running(bnad->netdev) ||
   1284		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
   1285		return;
   1286
   1287	mod_timer(&bnad->stats_timer,
   1288		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
   1289}
   1290
   1291static void
   1292bnad_cb_enet_mtu_set(struct bnad *bnad)
   1293{
   1294	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
   1295	complete(&bnad->bnad_completions.mtu_comp);
   1296}
   1297
   1298void
   1299bnad_cb_completion(void *arg, enum bfa_status status)
   1300{
   1301	struct bnad_iocmd_comp *iocmd_comp =
   1302			(struct bnad_iocmd_comp *)arg;
   1303
   1304	iocmd_comp->comp_status = (u32) status;
   1305	complete(&iocmd_comp->comp);
   1306}
   1307
   1308/* Resource allocation, free functions */
   1309
   1310static void
   1311bnad_mem_free(struct bnad *bnad,
   1312	      struct bna_mem_info *mem_info)
   1313{
   1314	int i;
   1315	dma_addr_t dma_pa;
   1316
   1317	if (mem_info->mdl == NULL)
   1318		return;
   1319
   1320	for (i = 0; i < mem_info->num; i++) {
   1321		if (mem_info->mdl[i].kva != NULL) {
   1322			if (mem_info->mem_type == BNA_MEM_T_DMA) {
   1323				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
   1324						dma_pa);
   1325				dma_free_coherent(&bnad->pcidev->dev,
   1326						  mem_info->mdl[i].len,
   1327						  mem_info->mdl[i].kva, dma_pa);
   1328			} else
   1329				kfree(mem_info->mdl[i].kva);
   1330		}
   1331	}
   1332	kfree(mem_info->mdl);
   1333	mem_info->mdl = NULL;
   1334}
   1335
   1336static int
   1337bnad_mem_alloc(struct bnad *bnad,
   1338	       struct bna_mem_info *mem_info)
   1339{
   1340	int i;
   1341	dma_addr_t dma_pa;
   1342
   1343	if ((mem_info->num == 0) || (mem_info->len == 0)) {
   1344		mem_info->mdl = NULL;
   1345		return 0;
   1346	}
   1347
   1348	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
   1349				GFP_KERNEL);
   1350	if (mem_info->mdl == NULL)
   1351		return -ENOMEM;
   1352
   1353	if (mem_info->mem_type == BNA_MEM_T_DMA) {
   1354		for (i = 0; i < mem_info->num; i++) {
   1355			mem_info->mdl[i].len = mem_info->len;
   1356			mem_info->mdl[i].kva =
   1357				dma_alloc_coherent(&bnad->pcidev->dev,
   1358						   mem_info->len, &dma_pa,
   1359						   GFP_KERNEL);
   1360			if (mem_info->mdl[i].kva == NULL)
   1361				goto err_return;
   1362
   1363			BNA_SET_DMA_ADDR(dma_pa,
   1364					 &(mem_info->mdl[i].dma));
   1365		}
   1366	} else {
   1367		for (i = 0; i < mem_info->num; i++) {
   1368			mem_info->mdl[i].len = mem_info->len;
   1369			mem_info->mdl[i].kva = kzalloc(mem_info->len,
   1370							GFP_KERNEL);
   1371			if (mem_info->mdl[i].kva == NULL)
   1372				goto err_return;
   1373		}
   1374	}
   1375
   1376	return 0;
   1377
   1378err_return:
   1379	bnad_mem_free(bnad, mem_info);
   1380	return -ENOMEM;
   1381}
   1382
   1383/* Free IRQ for Mailbox */
   1384static void
   1385bnad_mbox_irq_free(struct bnad *bnad)
   1386{
   1387	int irq;
   1388	unsigned long flags;
   1389
   1390	spin_lock_irqsave(&bnad->bna_lock, flags);
   1391	bnad_disable_mbox_irq(bnad);
   1392	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1393
   1394	irq = BNAD_GET_MBOX_IRQ(bnad);
   1395	free_irq(irq, bnad);
   1396}
   1397
   1398/*
   1399 * Allocates IRQ for Mailbox, but keep it disabled
   1400 * This will be enabled once we get the mbox enable callback
   1401 * from bna
   1402 */
   1403static int
   1404bnad_mbox_irq_alloc(struct bnad *bnad)
   1405{
   1406	int		err = 0;
   1407	unsigned long	irq_flags, flags;
   1408	u32	irq;
   1409	irq_handler_t	irq_handler;
   1410
   1411	spin_lock_irqsave(&bnad->bna_lock, flags);
   1412	if (bnad->cfg_flags & BNAD_CF_MSIX) {
   1413		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
   1414		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
   1415		irq_flags = 0;
   1416	} else {
   1417		irq_handler = (irq_handler_t)bnad_isr;
   1418		irq = bnad->pcidev->irq;
   1419		irq_flags = IRQF_SHARED;
   1420	}
   1421
   1422	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1423	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
   1424
   1425	/*
   1426	 * Set the Mbox IRQ disable flag, so that the IRQ handler
   1427	 * called from request_irq() for SHARED IRQs do not execute
   1428	 */
   1429	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
   1430
   1431	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
   1432
   1433	err = request_irq(irq, irq_handler, irq_flags,
   1434			  bnad->mbox_irq_name, bnad);
   1435
   1436	return err;
   1437}
   1438
   1439static void
   1440bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
   1441{
   1442	kfree(intr_info->idl);
   1443	intr_info->idl = NULL;
   1444}
   1445
   1446/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
   1447static int
   1448bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
   1449		    u32 txrx_id, struct bna_intr_info *intr_info)
   1450{
   1451	int i, vector_start = 0;
   1452	u32 cfg_flags;
   1453	unsigned long flags;
   1454
   1455	spin_lock_irqsave(&bnad->bna_lock, flags);
   1456	cfg_flags = bnad->cfg_flags;
   1457	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1458
   1459	if (cfg_flags & BNAD_CF_MSIX) {
   1460		intr_info->intr_type = BNA_INTR_T_MSIX;
   1461		intr_info->idl = kcalloc(intr_info->num,
   1462					sizeof(struct bna_intr_descr),
   1463					GFP_KERNEL);
   1464		if (!intr_info->idl)
   1465			return -ENOMEM;
   1466
   1467		switch (src) {
   1468		case BNAD_INTR_TX:
   1469			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
   1470			break;
   1471
   1472		case BNAD_INTR_RX:
   1473			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
   1474					(bnad->num_tx * bnad->num_txq_per_tx) +
   1475					txrx_id;
   1476			break;
   1477
   1478		default:
   1479			BUG();
   1480		}
   1481
   1482		for (i = 0; i < intr_info->num; i++)
   1483			intr_info->idl[i].vector = vector_start + i;
   1484	} else {
   1485		intr_info->intr_type = BNA_INTR_T_INTX;
   1486		intr_info->num = 1;
   1487		intr_info->idl = kcalloc(intr_info->num,
   1488					sizeof(struct bna_intr_descr),
   1489					GFP_KERNEL);
   1490		if (!intr_info->idl)
   1491			return -ENOMEM;
   1492
   1493		switch (src) {
   1494		case BNAD_INTR_TX:
   1495			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
   1496			break;
   1497
   1498		case BNAD_INTR_RX:
   1499			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
   1500			break;
   1501		}
   1502	}
   1503	return 0;
   1504}
   1505
   1506/* NOTE: Should be called for MSIX only
   1507 * Unregisters Tx MSIX vector(s) from the kernel
   1508 */
   1509static void
   1510bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
   1511			int num_txqs)
   1512{
   1513	int i;
   1514	int vector_num;
   1515
   1516	for (i = 0; i < num_txqs; i++) {
   1517		if (tx_info->tcb[i] == NULL)
   1518			continue;
   1519
   1520		vector_num = tx_info->tcb[i]->intr_vector;
   1521		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
   1522	}
   1523}
   1524
   1525/* NOTE: Should be called for MSIX only
   1526 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
   1527 */
   1528static int
   1529bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
   1530			u32 tx_id, int num_txqs)
   1531{
   1532	int i;
   1533	int err;
   1534	int vector_num;
   1535
   1536	for (i = 0; i < num_txqs; i++) {
   1537		vector_num = tx_info->tcb[i]->intr_vector;
   1538		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
   1539				tx_id + tx_info->tcb[i]->id);
   1540		err = request_irq(bnad->msix_table[vector_num].vector,
   1541				  (irq_handler_t)bnad_msix_tx, 0,
   1542				  tx_info->tcb[i]->name,
   1543				  tx_info->tcb[i]);
   1544		if (err)
   1545			goto err_return;
   1546	}
   1547
   1548	return 0;
   1549
   1550err_return:
   1551	if (i > 0)
   1552		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
   1553	return -1;
   1554}
   1555
   1556/* NOTE: Should be called for MSIX only
   1557 * Unregisters Rx MSIX vector(s) from the kernel
   1558 */
   1559static void
   1560bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
   1561			int num_rxps)
   1562{
   1563	int i;
   1564	int vector_num;
   1565
   1566	for (i = 0; i < num_rxps; i++) {
   1567		if (rx_info->rx_ctrl[i].ccb == NULL)
   1568			continue;
   1569
   1570		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
   1571		free_irq(bnad->msix_table[vector_num].vector,
   1572			 rx_info->rx_ctrl[i].ccb);
   1573	}
   1574}
   1575
   1576/* NOTE: Should be called for MSIX only
   1577 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
   1578 */
   1579static int
   1580bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
   1581			u32 rx_id, int num_rxps)
   1582{
   1583	int i;
   1584	int err;
   1585	int vector_num;
   1586
   1587	for (i = 0; i < num_rxps; i++) {
   1588		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
   1589		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
   1590			bnad->netdev->name,
   1591			rx_id + rx_info->rx_ctrl[i].ccb->id);
   1592		err = request_irq(bnad->msix_table[vector_num].vector,
   1593				  (irq_handler_t)bnad_msix_rx, 0,
   1594				  rx_info->rx_ctrl[i].ccb->name,
   1595				  rx_info->rx_ctrl[i].ccb);
   1596		if (err)
   1597			goto err_return;
   1598	}
   1599
   1600	return 0;
   1601
   1602err_return:
   1603	if (i > 0)
   1604		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
   1605	return -1;
   1606}
   1607
   1608/* Free Tx object Resources */
   1609static void
   1610bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
   1611{
   1612	int i;
   1613
   1614	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
   1615		if (res_info[i].res_type == BNA_RES_T_MEM)
   1616			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
   1617		else if (res_info[i].res_type == BNA_RES_T_INTR)
   1618			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
   1619	}
   1620}
   1621
   1622/* Allocates memory and interrupt resources for Tx object */
   1623static int
   1624bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
   1625		  u32 tx_id)
   1626{
   1627	int i, err = 0;
   1628
   1629	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
   1630		if (res_info[i].res_type == BNA_RES_T_MEM)
   1631			err = bnad_mem_alloc(bnad,
   1632					&res_info[i].res_u.mem_info);
   1633		else if (res_info[i].res_type == BNA_RES_T_INTR)
   1634			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
   1635					&res_info[i].res_u.intr_info);
   1636		if (err)
   1637			goto err_return;
   1638	}
   1639	return 0;
   1640
   1641err_return:
   1642	bnad_tx_res_free(bnad, res_info);
   1643	return err;
   1644}
   1645
   1646/* Free Rx object Resources */
   1647static void
   1648bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
   1649{
   1650	int i;
   1651
   1652	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
   1653		if (res_info[i].res_type == BNA_RES_T_MEM)
   1654			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
   1655		else if (res_info[i].res_type == BNA_RES_T_INTR)
   1656			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
   1657	}
   1658}
   1659
   1660/* Allocates memory and interrupt resources for Rx object */
   1661static int
   1662bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
   1663		  uint rx_id)
   1664{
   1665	int i, err = 0;
   1666
   1667	/* All memory needs to be allocated before setup_ccbs */
   1668	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
   1669		if (res_info[i].res_type == BNA_RES_T_MEM)
   1670			err = bnad_mem_alloc(bnad,
   1671					&res_info[i].res_u.mem_info);
   1672		else if (res_info[i].res_type == BNA_RES_T_INTR)
   1673			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
   1674					&res_info[i].res_u.intr_info);
   1675		if (err)
   1676			goto err_return;
   1677	}
   1678	return 0;
   1679
   1680err_return:
   1681	bnad_rx_res_free(bnad, res_info);
   1682	return err;
   1683}
   1684
   1685/* Timer callbacks */
   1686/* a) IOC timer */
   1687static void
   1688bnad_ioc_timeout(struct timer_list *t)
   1689{
   1690	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
   1691	unsigned long flags;
   1692
   1693	spin_lock_irqsave(&bnad->bna_lock, flags);
   1694	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
   1695	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1696}
   1697
   1698static void
   1699bnad_ioc_hb_check(struct timer_list *t)
   1700{
   1701	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
   1702	unsigned long flags;
   1703
   1704	spin_lock_irqsave(&bnad->bna_lock, flags);
   1705	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
   1706	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1707}
   1708
   1709static void
   1710bnad_iocpf_timeout(struct timer_list *t)
   1711{
   1712	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
   1713	unsigned long flags;
   1714
   1715	spin_lock_irqsave(&bnad->bna_lock, flags);
   1716	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
   1717	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1718}
   1719
   1720static void
   1721bnad_iocpf_sem_timeout(struct timer_list *t)
   1722{
   1723	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
   1724	unsigned long flags;
   1725
   1726	spin_lock_irqsave(&bnad->bna_lock, flags);
   1727	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
   1728	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1729}
   1730
   1731/*
   1732 * All timer routines use bnad->bna_lock to protect against
   1733 * the following race, which may occur in case of no locking:
   1734 *	Time	CPU m	CPU n
   1735 *	0       1 = test_bit
   1736 *	1			clear_bit
   1737 *	2			del_timer_sync
   1738 *	3	mod_timer
   1739 */
   1740
   1741/* b) Dynamic Interrupt Moderation Timer */
   1742static void
   1743bnad_dim_timeout(struct timer_list *t)
   1744{
   1745	struct bnad *bnad = from_timer(bnad, t, dim_timer);
   1746	struct bnad_rx_info *rx_info;
   1747	struct bnad_rx_ctrl *rx_ctrl;
   1748	int i, j;
   1749	unsigned long flags;
   1750
   1751	if (!netif_carrier_ok(bnad->netdev))
   1752		return;
   1753
   1754	spin_lock_irqsave(&bnad->bna_lock, flags);
   1755	for (i = 0; i < bnad->num_rx; i++) {
   1756		rx_info = &bnad->rx_info[i];
   1757		if (!rx_info->rx)
   1758			continue;
   1759		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
   1760			rx_ctrl = &rx_info->rx_ctrl[j];
   1761			if (!rx_ctrl->ccb)
   1762				continue;
   1763			bna_rx_dim_update(rx_ctrl->ccb);
   1764		}
   1765	}
   1766
   1767	/* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */
   1768	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
   1769		mod_timer(&bnad->dim_timer,
   1770			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
   1771	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1772}
   1773
   1774/* c)  Statistics Timer */
   1775static void
   1776bnad_stats_timeout(struct timer_list *t)
   1777{
   1778	struct bnad *bnad = from_timer(bnad, t, stats_timer);
   1779	unsigned long flags;
   1780
   1781	if (!netif_running(bnad->netdev) ||
   1782		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
   1783		return;
   1784
   1785	spin_lock_irqsave(&bnad->bna_lock, flags);
   1786	bna_hw_stats_get(&bnad->bna);
   1787	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1788}
   1789
   1790/*
   1791 * Set up timer for DIM
   1792 * Called with bnad->bna_lock held
   1793 */
   1794void
   1795bnad_dim_timer_start(struct bnad *bnad)
   1796{
   1797	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
   1798	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
   1799		timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
   1800		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
   1801		mod_timer(&bnad->dim_timer,
   1802			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
   1803	}
   1804}
   1805
   1806/*
   1807 * Set up timer for statistics
   1808 * Called with mutex_lock(&bnad->conf_mutex) held
   1809 */
   1810static void
   1811bnad_stats_timer_start(struct bnad *bnad)
   1812{
   1813	unsigned long flags;
   1814
   1815	spin_lock_irqsave(&bnad->bna_lock, flags);
   1816	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
   1817		timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
   1818		mod_timer(&bnad->stats_timer,
   1819			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
   1820	}
   1821	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1822}
   1823
   1824/*
   1825 * Stops the stats timer
   1826 * Called with mutex_lock(&bnad->conf_mutex) held
   1827 */
   1828static void
   1829bnad_stats_timer_stop(struct bnad *bnad)
   1830{
   1831	int to_del = 0;
   1832	unsigned long flags;
   1833
   1834	spin_lock_irqsave(&bnad->bna_lock, flags);
   1835	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
   1836		to_del = 1;
   1837	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1838	if (to_del)
   1839		del_timer_sync(&bnad->stats_timer);
   1840}
   1841
   1842/* Utilities */
   1843
   1844static void
   1845bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
   1846{
   1847	int i = 1; /* Index 0 has broadcast address */
   1848	struct netdev_hw_addr *mc_addr;
   1849
   1850	netdev_for_each_mc_addr(mc_addr, netdev) {
   1851		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
   1852		i++;
   1853	}
   1854}
   1855
   1856static int
   1857bnad_napi_poll_rx(struct napi_struct *napi, int budget)
   1858{
   1859	struct bnad_rx_ctrl *rx_ctrl =
   1860		container_of(napi, struct bnad_rx_ctrl, napi);
   1861	struct bnad *bnad = rx_ctrl->bnad;
   1862	int rcvd = 0;
   1863
   1864	rx_ctrl->rx_poll_ctr++;
   1865
   1866	if (!netif_carrier_ok(bnad->netdev))
   1867		goto poll_exit;
   1868
   1869	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
   1870	if (rcvd >= budget)
   1871		return rcvd;
   1872
   1873poll_exit:
   1874	napi_complete_done(napi, rcvd);
   1875
   1876	rx_ctrl->rx_complete++;
   1877
   1878	if (rx_ctrl->ccb)
   1879		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
   1880
   1881	return rcvd;
   1882}
   1883
   1884static void
   1885bnad_napi_add(struct bnad *bnad, u32 rx_id)
   1886{
   1887	struct bnad_rx_ctrl *rx_ctrl;
   1888	int i;
   1889
   1890	/* Initialize & enable NAPI */
   1891	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
   1892		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
   1893		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
   1894			       bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
   1895	}
   1896}
   1897
   1898static void
   1899bnad_napi_delete(struct bnad *bnad, u32 rx_id)
   1900{
   1901	int i;
   1902
   1903	/* First disable and then clean up */
   1904	for (i = 0; i < bnad->num_rxp_per_rx; i++)
   1905		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
   1906}
   1907
   1908/* Should be held with conf_lock held */
   1909void
   1910bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
   1911{
   1912	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
   1913	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
   1914	unsigned long flags;
   1915
   1916	if (!tx_info->tx)
   1917		return;
   1918
   1919	init_completion(&bnad->bnad_completions.tx_comp);
   1920	spin_lock_irqsave(&bnad->bna_lock, flags);
   1921	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
   1922	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1923	wait_for_completion(&bnad->bnad_completions.tx_comp);
   1924
   1925	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
   1926		bnad_tx_msix_unregister(bnad, tx_info,
   1927			bnad->num_txq_per_tx);
   1928
   1929	spin_lock_irqsave(&bnad->bna_lock, flags);
   1930	bna_tx_destroy(tx_info->tx);
   1931	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1932
   1933	tx_info->tx = NULL;
   1934	tx_info->tx_id = 0;
   1935
   1936	bnad_tx_res_free(bnad, res_info);
   1937}
   1938
   1939/* Should be held with conf_lock held */
   1940int
   1941bnad_setup_tx(struct bnad *bnad, u32 tx_id)
   1942{
   1943	int err;
   1944	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
   1945	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
   1946	struct bna_intr_info *intr_info =
   1947			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
   1948	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
   1949	static const struct bna_tx_event_cbfn tx_cbfn = {
   1950		.tcb_setup_cbfn = bnad_cb_tcb_setup,
   1951		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
   1952		.tx_stall_cbfn = bnad_cb_tx_stall,
   1953		.tx_resume_cbfn = bnad_cb_tx_resume,
   1954		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
   1955	};
   1956
   1957	struct bna_tx *tx;
   1958	unsigned long flags;
   1959
   1960	tx_info->tx_id = tx_id;
   1961
   1962	/* Initialize the Tx object configuration */
   1963	tx_config->num_txq = bnad->num_txq_per_tx;
   1964	tx_config->txq_depth = bnad->txq_depth;
   1965	tx_config->tx_type = BNA_TX_T_REGULAR;
   1966	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
   1967
   1968	/* Get BNA's resource requirement for one tx object */
   1969	spin_lock_irqsave(&bnad->bna_lock, flags);
   1970	bna_tx_res_req(bnad->num_txq_per_tx,
   1971		bnad->txq_depth, res_info);
   1972	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1973
   1974	/* Fill Unmap Q memory requirements */
   1975	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
   1976			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
   1977			bnad->txq_depth));
   1978
   1979	/* Allocate resources */
   1980	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
   1981	if (err)
   1982		return err;
   1983
   1984	/* Ask BNA to create one Tx object, supplying required resources */
   1985	spin_lock_irqsave(&bnad->bna_lock, flags);
   1986	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
   1987			tx_info);
   1988	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   1989	if (!tx) {
   1990		err = -ENOMEM;
   1991		goto err_return;
   1992	}
   1993	tx_info->tx = tx;
   1994
   1995	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
   1996			(work_func_t)bnad_tx_cleanup);
   1997
   1998	/* Register ISR for the Tx object */
   1999	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
   2000		err = bnad_tx_msix_register(bnad, tx_info,
   2001			tx_id, bnad->num_txq_per_tx);
   2002		if (err)
   2003			goto cleanup_tx;
   2004	}
   2005
   2006	spin_lock_irqsave(&bnad->bna_lock, flags);
   2007	bna_tx_enable(tx);
   2008	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2009
   2010	return 0;
   2011
   2012cleanup_tx:
   2013	spin_lock_irqsave(&bnad->bna_lock, flags);
   2014	bna_tx_destroy(tx_info->tx);
   2015	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2016	tx_info->tx = NULL;
   2017	tx_info->tx_id = 0;
   2018err_return:
   2019	bnad_tx_res_free(bnad, res_info);
   2020	return err;
   2021}
   2022
   2023/* Setup the rx config for bna_rx_create */
   2024/* bnad decides the configuration */
   2025static void
   2026bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
   2027{
   2028	memset(rx_config, 0, sizeof(*rx_config));
   2029	rx_config->rx_type = BNA_RX_T_REGULAR;
   2030	rx_config->num_paths = bnad->num_rxp_per_rx;
   2031	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
   2032
   2033	if (bnad->num_rxp_per_rx > 1) {
   2034		rx_config->rss_status = BNA_STATUS_T_ENABLED;
   2035		rx_config->rss_config.hash_type =
   2036				(BFI_ENET_RSS_IPV6 |
   2037				 BFI_ENET_RSS_IPV6_TCP |
   2038				 BFI_ENET_RSS_IPV4 |
   2039				 BFI_ENET_RSS_IPV4_TCP);
   2040		rx_config->rss_config.hash_mask =
   2041				bnad->num_rxp_per_rx - 1;
   2042		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
   2043			sizeof(rx_config->rss_config.toeplitz_hash_key));
   2044	} else {
   2045		rx_config->rss_status = BNA_STATUS_T_DISABLED;
   2046		memset(&rx_config->rss_config, 0,
   2047		       sizeof(rx_config->rss_config));
   2048	}
   2049
   2050	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
   2051	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
   2052
   2053	/* BNA_RXP_SINGLE - one data-buffer queue
   2054	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
   2055	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
   2056	 */
   2057	/* TODO: configurable param for queue type */
   2058	rx_config->rxp_type = BNA_RXP_SLR;
   2059
   2060	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
   2061	    rx_config->frame_size > 4096) {
   2062		/* though size_routing_enable is set in SLR,
   2063		 * small packets may get routed to same rxq.
   2064		 * set buf_size to 2048 instead of PAGE_SIZE.
   2065		 */
   2066		rx_config->q0_buf_size = 2048;
   2067		/* this should be in multiples of 2 */
   2068		rx_config->q0_num_vecs = 4;
   2069		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
   2070		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
   2071	} else {
   2072		rx_config->q0_buf_size = rx_config->frame_size;
   2073		rx_config->q0_num_vecs = 1;
   2074		rx_config->q0_depth = bnad->rxq_depth;
   2075	}
   2076
   2077	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
   2078	if (rx_config->rxp_type == BNA_RXP_SLR) {
   2079		rx_config->q1_depth = bnad->rxq_depth;
   2080		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
   2081	}
   2082
   2083	rx_config->vlan_strip_status =
   2084		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
   2085		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
   2086}
   2087
   2088static void
   2089bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
   2090{
   2091	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
   2092	int i;
   2093
   2094	for (i = 0; i < bnad->num_rxp_per_rx; i++)
   2095		rx_info->rx_ctrl[i].bnad = bnad;
   2096}
   2097
   2098/* Called with mutex_lock(&bnad->conf_mutex) held */
   2099static u32
   2100bnad_reinit_rx(struct bnad *bnad)
   2101{
   2102	struct net_device *netdev = bnad->netdev;
   2103	u32 err = 0, current_err = 0;
   2104	u32 rx_id = 0, count = 0;
   2105	unsigned long flags;
   2106
   2107	/* destroy and create new rx objects */
   2108	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
   2109		if (!bnad->rx_info[rx_id].rx)
   2110			continue;
   2111		bnad_destroy_rx(bnad, rx_id);
   2112	}
   2113
   2114	spin_lock_irqsave(&bnad->bna_lock, flags);
   2115	bna_enet_mtu_set(&bnad->bna.enet,
   2116			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
   2117	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2118
   2119	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
   2120		count++;
   2121		current_err = bnad_setup_rx(bnad, rx_id);
   2122		if (current_err && !err) {
   2123			err = current_err;
   2124			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
   2125		}
   2126	}
   2127
   2128	/* restore rx configuration */
   2129	if (bnad->rx_info[0].rx && !err) {
   2130		bnad_restore_vlans(bnad, 0);
   2131		bnad_enable_default_bcast(bnad);
   2132		spin_lock_irqsave(&bnad->bna_lock, flags);
   2133		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
   2134		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2135		bnad_set_rx_mode(netdev);
   2136	}
   2137
   2138	return count;
   2139}
   2140
   2141/* Called with bnad_conf_lock() held */
   2142void
   2143bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
   2144{
   2145	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
   2146	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
   2147	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
   2148	unsigned long flags;
   2149	int to_del = 0;
   2150
   2151	if (!rx_info->rx)
   2152		return;
   2153
   2154	if (0 == rx_id) {
   2155		spin_lock_irqsave(&bnad->bna_lock, flags);
   2156		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
   2157		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
   2158			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
   2159			to_del = 1;
   2160		}
   2161		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2162		if (to_del)
   2163			del_timer_sync(&bnad->dim_timer);
   2164	}
   2165
   2166	init_completion(&bnad->bnad_completions.rx_comp);
   2167	spin_lock_irqsave(&bnad->bna_lock, flags);
   2168	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
   2169	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2170	wait_for_completion(&bnad->bnad_completions.rx_comp);
   2171
   2172	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
   2173		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
   2174
   2175	bnad_napi_delete(bnad, rx_id);
   2176
   2177	spin_lock_irqsave(&bnad->bna_lock, flags);
   2178	bna_rx_destroy(rx_info->rx);
   2179
   2180	rx_info->rx = NULL;
   2181	rx_info->rx_id = 0;
   2182	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2183
   2184	bnad_rx_res_free(bnad, res_info);
   2185}
   2186
   2187/* Called with mutex_lock(&bnad->conf_mutex) held */
   2188int
   2189bnad_setup_rx(struct bnad *bnad, u32 rx_id)
   2190{
   2191	int err;
   2192	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
   2193	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
   2194	struct bna_intr_info *intr_info =
   2195			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
   2196	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
   2197	static const struct bna_rx_event_cbfn rx_cbfn = {
   2198		.rcb_setup_cbfn = NULL,
   2199		.rcb_destroy_cbfn = NULL,
   2200		.ccb_setup_cbfn = bnad_cb_ccb_setup,
   2201		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
   2202		.rx_stall_cbfn = bnad_cb_rx_stall,
   2203		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
   2204		.rx_post_cbfn = bnad_cb_rx_post,
   2205	};
   2206	struct bna_rx *rx;
   2207	unsigned long flags;
   2208
   2209	rx_info->rx_id = rx_id;
   2210
   2211	/* Initialize the Rx object configuration */
   2212	bnad_init_rx_config(bnad, rx_config);
   2213
   2214	/* Get BNA's resource requirement for one Rx object */
   2215	spin_lock_irqsave(&bnad->bna_lock, flags);
   2216	bna_rx_res_req(rx_config, res_info);
   2217	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2218
   2219	/* Fill Unmap Q memory requirements */
   2220	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
   2221				 rx_config->num_paths,
   2222			(rx_config->q0_depth *
   2223			 sizeof(struct bnad_rx_unmap)) +
   2224			 sizeof(struct bnad_rx_unmap_q));
   2225
   2226	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
   2227		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
   2228					 rx_config->num_paths,
   2229				(rx_config->q1_depth *
   2230				 sizeof(struct bnad_rx_unmap) +
   2231				 sizeof(struct bnad_rx_unmap_q)));
   2232	}
   2233	/* Allocate resource */
   2234	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
   2235	if (err)
   2236		return err;
   2237
   2238	bnad_rx_ctrl_init(bnad, rx_id);
   2239
   2240	/* Ask BNA to create one Rx object, supplying required resources */
   2241	spin_lock_irqsave(&bnad->bna_lock, flags);
   2242	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
   2243			rx_info);
   2244	if (!rx) {
   2245		err = -ENOMEM;
   2246		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2247		goto err_return;
   2248	}
   2249	rx_info->rx = rx;
   2250	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2251
   2252	INIT_WORK(&rx_info->rx_cleanup_work,
   2253			(work_func_t)(bnad_rx_cleanup));
   2254
   2255	/*
   2256	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
   2257	 * so that IRQ handler cannot schedule NAPI at this point.
   2258	 */
   2259	bnad_napi_add(bnad, rx_id);
   2260
   2261	/* Register ISR for the Rx object */
   2262	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
   2263		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
   2264						rx_config->num_paths);
   2265		if (err)
   2266			goto err_return;
   2267	}
   2268
   2269	spin_lock_irqsave(&bnad->bna_lock, flags);
   2270	if (0 == rx_id) {
   2271		/* Set up Dynamic Interrupt Moderation Vector */
   2272		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
   2273			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
   2274
   2275		/* Enable VLAN filtering only on the default Rx */
   2276		bna_rx_vlanfilter_enable(rx);
   2277
   2278		/* Start the DIM timer */
   2279		bnad_dim_timer_start(bnad);
   2280	}
   2281
   2282	bna_rx_enable(rx);
   2283	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2284
   2285	return 0;
   2286
   2287err_return:
   2288	bnad_destroy_rx(bnad, rx_id);
   2289	return err;
   2290}
   2291
   2292/* Called with conf_lock & bnad->bna_lock held */
   2293void
   2294bnad_tx_coalescing_timeo_set(struct bnad *bnad)
   2295{
   2296	struct bnad_tx_info *tx_info;
   2297
   2298	tx_info = &bnad->tx_info[0];
   2299	if (!tx_info->tx)
   2300		return;
   2301
   2302	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
   2303}
   2304
   2305/* Called with conf_lock & bnad->bna_lock held */
   2306void
   2307bnad_rx_coalescing_timeo_set(struct bnad *bnad)
   2308{
   2309	struct bnad_rx_info *rx_info;
   2310	int	i;
   2311
   2312	for (i = 0; i < bnad->num_rx; i++) {
   2313		rx_info = &bnad->rx_info[i];
   2314		if (!rx_info->rx)
   2315			continue;
   2316		bna_rx_coalescing_timeo_set(rx_info->rx,
   2317				bnad->rx_coalescing_timeo);
   2318	}
   2319}
   2320
   2321/*
   2322 * Called with bnad->bna_lock held
   2323 */
   2324int
   2325bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
   2326{
   2327	int ret;
   2328
   2329	if (!is_valid_ether_addr(mac_addr))
   2330		return -EADDRNOTAVAIL;
   2331
   2332	/* If datapath is down, pretend everything went through */
   2333	if (!bnad->rx_info[0].rx)
   2334		return 0;
   2335
   2336	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
   2337	if (ret != BNA_CB_SUCCESS)
   2338		return -EADDRNOTAVAIL;
   2339
   2340	return 0;
   2341}
   2342
   2343/* Should be called with conf_lock held */
   2344int
   2345bnad_enable_default_bcast(struct bnad *bnad)
   2346{
   2347	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
   2348	int ret;
   2349	unsigned long flags;
   2350
   2351	init_completion(&bnad->bnad_completions.mcast_comp);
   2352
   2353	spin_lock_irqsave(&bnad->bna_lock, flags);
   2354	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
   2355			       bnad_cb_rx_mcast_add);
   2356	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2357
   2358	if (ret == BNA_CB_SUCCESS)
   2359		wait_for_completion(&bnad->bnad_completions.mcast_comp);
   2360	else
   2361		return -ENODEV;
   2362
   2363	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
   2364		return -ENODEV;
   2365
   2366	return 0;
   2367}
   2368
   2369/* Called with mutex_lock(&bnad->conf_mutex) held */
   2370void
   2371bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
   2372{
   2373	u16 vid;
   2374	unsigned long flags;
   2375
   2376	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
   2377		spin_lock_irqsave(&bnad->bna_lock, flags);
   2378		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
   2379		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2380	}
   2381}
   2382
   2383/* Statistics utilities */
   2384void
   2385bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
   2386{
   2387	int i, j;
   2388
   2389	for (i = 0; i < bnad->num_rx; i++) {
   2390		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
   2391			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
   2392				stats->rx_packets += bnad->rx_info[i].
   2393				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
   2394				stats->rx_bytes += bnad->rx_info[i].
   2395					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
   2396				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
   2397					bnad->rx_info[i].rx_ctrl[j].ccb->
   2398					rcb[1]->rxq) {
   2399					stats->rx_packets +=
   2400						bnad->rx_info[i].rx_ctrl[j].
   2401						ccb->rcb[1]->rxq->rx_packets;
   2402					stats->rx_bytes +=
   2403						bnad->rx_info[i].rx_ctrl[j].
   2404						ccb->rcb[1]->rxq->rx_bytes;
   2405				}
   2406			}
   2407		}
   2408	}
   2409	for (i = 0; i < bnad->num_tx; i++) {
   2410		for (j = 0; j < bnad->num_txq_per_tx; j++) {
   2411			if (bnad->tx_info[i].tcb[j]) {
   2412				stats->tx_packets +=
   2413				bnad->tx_info[i].tcb[j]->txq->tx_packets;
   2414				stats->tx_bytes +=
   2415					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
   2416			}
   2417		}
   2418	}
   2419}
   2420
   2421/*
   2422 * Must be called with the bna_lock held.
   2423 */
   2424void
   2425bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
   2426{
   2427	struct bfi_enet_stats_mac *mac_stats;
   2428	u32 bmap;
   2429	int i;
   2430
   2431	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
   2432	stats->rx_errors =
   2433		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
   2434		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
   2435		mac_stats->rx_undersize;
   2436	stats->tx_errors = mac_stats->tx_fcs_error +
   2437					mac_stats->tx_undersize;
   2438	stats->rx_dropped = mac_stats->rx_drop;
   2439	stats->tx_dropped = mac_stats->tx_drop;
   2440	stats->multicast = mac_stats->rx_multicast;
   2441	stats->collisions = mac_stats->tx_total_collision;
   2442
   2443	stats->rx_length_errors = mac_stats->rx_frame_length_error;
   2444
   2445	/* receive ring buffer overflow  ?? */
   2446
   2447	stats->rx_crc_errors = mac_stats->rx_fcs_error;
   2448	stats->rx_frame_errors = mac_stats->rx_alignment_error;
   2449	/* recv'r fifo overrun */
   2450	bmap = bna_rx_rid_mask(&bnad->bna);
   2451	for (i = 0; bmap; i++) {
   2452		if (bmap & 1) {
   2453			stats->rx_fifo_errors +=
   2454				bnad->stats.bna_stats->
   2455					hw_stats.rxf_stats[i].frame_drops;
   2456			break;
   2457		}
   2458		bmap >>= 1;
   2459	}
   2460}
   2461
   2462static void
   2463bnad_mbox_irq_sync(struct bnad *bnad)
   2464{
   2465	u32 irq;
   2466	unsigned long flags;
   2467
   2468	spin_lock_irqsave(&bnad->bna_lock, flags);
   2469	if (bnad->cfg_flags & BNAD_CF_MSIX)
   2470		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
   2471	else
   2472		irq = bnad->pcidev->irq;
   2473	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2474
   2475	synchronize_irq(irq);
   2476}
   2477
   2478/* Utility used by bnad_start_xmit, for doing TSO */
   2479static int
   2480bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
   2481{
   2482	int err;
   2483
   2484	err = skb_cow_head(skb, 0);
   2485	if (err < 0) {
   2486		BNAD_UPDATE_CTR(bnad, tso_err);
   2487		return err;
   2488	}
   2489
   2490	/*
   2491	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
   2492	 * excluding the length field.
   2493	 */
   2494	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
   2495		struct iphdr *iph = ip_hdr(skb);
   2496
   2497		/* Do we really need these? */
   2498		iph->tot_len = 0;
   2499		iph->check = 0;
   2500
   2501		tcp_hdr(skb)->check =
   2502			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
   2503					   IPPROTO_TCP, 0);
   2504		BNAD_UPDATE_CTR(bnad, tso4);
   2505	} else {
   2506		tcp_v6_gso_csum_prep(skb);
   2507		BNAD_UPDATE_CTR(bnad, tso6);
   2508	}
   2509
   2510	return 0;
   2511}
   2512
   2513/*
   2514 * Initialize Q numbers depending on Rx Paths
   2515 * Called with bnad->bna_lock held, because of cfg_flags
   2516 * access.
   2517 */
   2518static void
   2519bnad_q_num_init(struct bnad *bnad)
   2520{
   2521	int rxps;
   2522
   2523	rxps = min((uint)num_online_cpus(),
   2524			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
   2525
   2526	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
   2527		rxps = 1;	/* INTx */
   2528
   2529	bnad->num_rx = 1;
   2530	bnad->num_tx = 1;
   2531	bnad->num_rxp_per_rx = rxps;
   2532	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
   2533}
   2534
   2535/*
   2536 * Adjusts the Q numbers, given a number of msix vectors
   2537 * Give preference to RSS as opposed to Tx priority Queues,
   2538 * in such a case, just use 1 Tx Q
   2539 * Called with bnad->bna_lock held b'cos of cfg_flags access
   2540 */
   2541static void
   2542bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
   2543{
   2544	bnad->num_txq_per_tx = 1;
   2545	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
   2546	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
   2547	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
   2548		bnad->num_rxp_per_rx = msix_vectors -
   2549			(bnad->num_tx * bnad->num_txq_per_tx) -
   2550			BNAD_MAILBOX_MSIX_VECTORS;
   2551	} else
   2552		bnad->num_rxp_per_rx = 1;
   2553}
   2554
   2555/* Enable / disable ioceth */
   2556static int
   2557bnad_ioceth_disable(struct bnad *bnad)
   2558{
   2559	unsigned long flags;
   2560	int err = 0;
   2561
   2562	spin_lock_irqsave(&bnad->bna_lock, flags);
   2563	init_completion(&bnad->bnad_completions.ioc_comp);
   2564	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
   2565	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2566
   2567	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
   2568		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
   2569
   2570	err = bnad->bnad_completions.ioc_comp_status;
   2571	return err;
   2572}
   2573
   2574static int
   2575bnad_ioceth_enable(struct bnad *bnad)
   2576{
   2577	int err = 0;
   2578	unsigned long flags;
   2579
   2580	spin_lock_irqsave(&bnad->bna_lock, flags);
   2581	init_completion(&bnad->bnad_completions.ioc_comp);
   2582	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
   2583	bna_ioceth_enable(&bnad->bna.ioceth);
   2584	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2585
   2586	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
   2587		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
   2588
   2589	err = bnad->bnad_completions.ioc_comp_status;
   2590
   2591	return err;
   2592}
   2593
   2594/* Free BNA resources */
   2595static void
   2596bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
   2597		u32 res_val_max)
   2598{
   2599	int i;
   2600
   2601	for (i = 0; i < res_val_max; i++)
   2602		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
   2603}
   2604
   2605/* Allocates memory and interrupt resources for BNA */
   2606static int
   2607bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
   2608		u32 res_val_max)
   2609{
   2610	int i, err;
   2611
   2612	for (i = 0; i < res_val_max; i++) {
   2613		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
   2614		if (err)
   2615			goto err_return;
   2616	}
   2617	return 0;
   2618
   2619err_return:
   2620	bnad_res_free(bnad, res_info, res_val_max);
   2621	return err;
   2622}
   2623
   2624/* Interrupt enable / disable */
   2625static void
   2626bnad_enable_msix(struct bnad *bnad)
   2627{
   2628	int i, ret;
   2629	unsigned long flags;
   2630
   2631	spin_lock_irqsave(&bnad->bna_lock, flags);
   2632	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
   2633		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2634		return;
   2635	}
   2636	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2637
   2638	if (bnad->msix_table)
   2639		return;
   2640
   2641	bnad->msix_table =
   2642		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
   2643
   2644	if (!bnad->msix_table)
   2645		goto intx_mode;
   2646
   2647	for (i = 0; i < bnad->msix_num; i++)
   2648		bnad->msix_table[i].entry = i;
   2649
   2650	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
   2651				    1, bnad->msix_num);
   2652	if (ret < 0) {
   2653		goto intx_mode;
   2654	} else if (ret < bnad->msix_num) {
   2655		dev_warn(&bnad->pcidev->dev,
   2656			 "%d MSI-X vectors allocated < %d requested\n",
   2657			 ret, bnad->msix_num);
   2658
   2659		spin_lock_irqsave(&bnad->bna_lock, flags);
   2660		/* ret = #of vectors that we got */
   2661		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
   2662			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
   2663		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2664
   2665		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
   2666			 BNAD_MAILBOX_MSIX_VECTORS;
   2667
   2668		if (bnad->msix_num > ret) {
   2669			pci_disable_msix(bnad->pcidev);
   2670			goto intx_mode;
   2671		}
   2672	}
   2673
   2674	pci_intx(bnad->pcidev, 0);
   2675
   2676	return;
   2677
   2678intx_mode:
   2679	dev_warn(&bnad->pcidev->dev,
   2680		 "MSI-X enable failed - operating in INTx mode\n");
   2681
   2682	kfree(bnad->msix_table);
   2683	bnad->msix_table = NULL;
   2684	bnad->msix_num = 0;
   2685	spin_lock_irqsave(&bnad->bna_lock, flags);
   2686	bnad->cfg_flags &= ~BNAD_CF_MSIX;
   2687	bnad_q_num_init(bnad);
   2688	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2689}
   2690
   2691static void
   2692bnad_disable_msix(struct bnad *bnad)
   2693{
   2694	u32 cfg_flags;
   2695	unsigned long flags;
   2696
   2697	spin_lock_irqsave(&bnad->bna_lock, flags);
   2698	cfg_flags = bnad->cfg_flags;
   2699	if (bnad->cfg_flags & BNAD_CF_MSIX)
   2700		bnad->cfg_flags &= ~BNAD_CF_MSIX;
   2701	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2702
   2703	if (cfg_flags & BNAD_CF_MSIX) {
   2704		pci_disable_msix(bnad->pcidev);
   2705		kfree(bnad->msix_table);
   2706		bnad->msix_table = NULL;
   2707	}
   2708}
   2709
   2710/* Netdev entry points */
   2711static int
   2712bnad_open(struct net_device *netdev)
   2713{
   2714	int err;
   2715	struct bnad *bnad = netdev_priv(netdev);
   2716	struct bna_pause_config pause_config;
   2717	unsigned long flags;
   2718
   2719	mutex_lock(&bnad->conf_mutex);
   2720
   2721	/* Tx */
   2722	err = bnad_setup_tx(bnad, 0);
   2723	if (err)
   2724		goto err_return;
   2725
   2726	/* Rx */
   2727	err = bnad_setup_rx(bnad, 0);
   2728	if (err)
   2729		goto cleanup_tx;
   2730
   2731	/* Port */
   2732	pause_config.tx_pause = 0;
   2733	pause_config.rx_pause = 0;
   2734
   2735	spin_lock_irqsave(&bnad->bna_lock, flags);
   2736	bna_enet_mtu_set(&bnad->bna.enet,
   2737			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
   2738	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
   2739	bna_enet_enable(&bnad->bna.enet);
   2740	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2741
   2742	/* Enable broadcast */
   2743	bnad_enable_default_bcast(bnad);
   2744
   2745	/* Restore VLANs, if any */
   2746	bnad_restore_vlans(bnad, 0);
   2747
   2748	/* Set the UCAST address */
   2749	spin_lock_irqsave(&bnad->bna_lock, flags);
   2750	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
   2751	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2752
   2753	/* Start the stats timer */
   2754	bnad_stats_timer_start(bnad);
   2755
   2756	mutex_unlock(&bnad->conf_mutex);
   2757
   2758	return 0;
   2759
   2760cleanup_tx:
   2761	bnad_destroy_tx(bnad, 0);
   2762
   2763err_return:
   2764	mutex_unlock(&bnad->conf_mutex);
   2765	return err;
   2766}
   2767
   2768static int
   2769bnad_stop(struct net_device *netdev)
   2770{
   2771	struct bnad *bnad = netdev_priv(netdev);
   2772	unsigned long flags;
   2773
   2774	mutex_lock(&bnad->conf_mutex);
   2775
   2776	/* Stop the stats timer */
   2777	bnad_stats_timer_stop(bnad);
   2778
   2779	init_completion(&bnad->bnad_completions.enet_comp);
   2780
   2781	spin_lock_irqsave(&bnad->bna_lock, flags);
   2782	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
   2783			bnad_cb_enet_disabled);
   2784	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   2785
   2786	wait_for_completion(&bnad->bnad_completions.enet_comp);
   2787
   2788	bnad_destroy_tx(bnad, 0);
   2789	bnad_destroy_rx(bnad, 0);
   2790
   2791	/* Synchronize mailbox IRQ */
   2792	bnad_mbox_irq_sync(bnad);
   2793
   2794	mutex_unlock(&bnad->conf_mutex);
   2795
   2796	return 0;
   2797}
   2798
   2799/* TX */
   2800/* Returns 0 for success */
   2801static int
   2802bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
   2803		    struct sk_buff *skb, struct bna_txq_entry *txqent)
   2804{
   2805	u16 flags = 0;
   2806	u32 gso_size;
   2807	u16 vlan_tag = 0;
   2808
   2809	if (skb_vlan_tag_present(skb)) {
   2810		vlan_tag = (u16)skb_vlan_tag_get(skb);
   2811		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
   2812	}
   2813	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
   2814		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
   2815				| (vlan_tag & 0x1fff);
   2816		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
   2817	}
   2818	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
   2819
   2820	if (skb_is_gso(skb)) {
   2821		gso_size = skb_shinfo(skb)->gso_size;
   2822		if (unlikely(gso_size > bnad->netdev->mtu)) {
   2823			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
   2824			return -EINVAL;
   2825		}
   2826		if (unlikely((gso_size + skb_transport_offset(skb) +
   2827			      tcp_hdrlen(skb)) >= skb->len)) {
   2828			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
   2829			txqent->hdr.wi.lso_mss = 0;
   2830			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
   2831		} else {
   2832			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
   2833			txqent->hdr.wi.lso_mss = htons(gso_size);
   2834		}
   2835
   2836		if (bnad_tso_prepare(bnad, skb)) {
   2837			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
   2838			return -EINVAL;
   2839		}
   2840
   2841		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
   2842		txqent->hdr.wi.l4_hdr_size_n_offset =
   2843			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
   2844			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
   2845	} else  {
   2846		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
   2847		txqent->hdr.wi.lso_mss = 0;
   2848
   2849		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
   2850			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
   2851			return -EINVAL;
   2852		}
   2853
   2854		if (skb->ip_summed == CHECKSUM_PARTIAL) {
   2855			__be16 net_proto = vlan_get_protocol(skb);
   2856			u8 proto = 0;
   2857
   2858			if (net_proto == htons(ETH_P_IP))
   2859				proto = ip_hdr(skb)->protocol;
   2860#ifdef NETIF_F_IPV6_CSUM
   2861			else if (net_proto == htons(ETH_P_IPV6)) {
   2862				/* nexthdr may not be TCP immediately. */
   2863				proto = ipv6_hdr(skb)->nexthdr;
   2864			}
   2865#endif
   2866			if (proto == IPPROTO_TCP) {
   2867				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
   2868				txqent->hdr.wi.l4_hdr_size_n_offset =
   2869					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
   2870					      (0, skb_transport_offset(skb)));
   2871
   2872				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
   2873
   2874				if (unlikely(skb_headlen(skb) <
   2875					    skb_transport_offset(skb) +
   2876				    tcp_hdrlen(skb))) {
   2877					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
   2878					return -EINVAL;
   2879				}
   2880			} else if (proto == IPPROTO_UDP) {
   2881				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
   2882				txqent->hdr.wi.l4_hdr_size_n_offset =
   2883					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
   2884					      (0, skb_transport_offset(skb)));
   2885
   2886				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
   2887				if (unlikely(skb_headlen(skb) <
   2888					    skb_transport_offset(skb) +
   2889				    sizeof(struct udphdr))) {
   2890					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
   2891					return -EINVAL;
   2892				}
   2893			} else {
   2894
   2895				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
   2896				return -EINVAL;
   2897			}
   2898		} else
   2899			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
   2900	}
   2901
   2902	txqent->hdr.wi.flags = htons(flags);
   2903	txqent->hdr.wi.frame_length = htonl(skb->len);
   2904
   2905	return 0;
   2906}
   2907
   2908/*
   2909 * bnad_start_xmit : Netdev entry point for Transmit
   2910 *		     Called under lock held by net_device
   2911 */
   2912static netdev_tx_t
   2913bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
   2914{
   2915	struct bnad *bnad = netdev_priv(netdev);
   2916	u32 txq_id = 0;
   2917	struct bna_tcb *tcb = NULL;
   2918	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
   2919	u32		prod, q_depth, vect_id;
   2920	u32		wis, vectors, len;
   2921	int		i;
   2922	dma_addr_t		dma_addr;
   2923	struct bna_txq_entry *txqent;
   2924
   2925	len = skb_headlen(skb);
   2926
   2927	/* Sanity checks for the skb */
   2928
   2929	if (unlikely(skb->len <= ETH_HLEN)) {
   2930		dev_kfree_skb_any(skb);
   2931		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
   2932		return NETDEV_TX_OK;
   2933	}
   2934	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
   2935		dev_kfree_skb_any(skb);
   2936		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
   2937		return NETDEV_TX_OK;
   2938	}
   2939	if (unlikely(len == 0)) {
   2940		dev_kfree_skb_any(skb);
   2941		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
   2942		return NETDEV_TX_OK;
   2943	}
   2944
   2945	tcb = bnad->tx_info[0].tcb[txq_id];
   2946
   2947	/*
   2948	 * Takes care of the Tx that is scheduled between clearing the flag
   2949	 * and the netif_tx_stop_all_queues() call.
   2950	 */
   2951	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
   2952		dev_kfree_skb_any(skb);
   2953		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
   2954		return NETDEV_TX_OK;
   2955	}
   2956
   2957	q_depth = tcb->q_depth;
   2958	prod = tcb->producer_index;
   2959	unmap_q = tcb->unmap_q;
   2960
   2961	vectors = 1 + skb_shinfo(skb)->nr_frags;
   2962	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
   2963
   2964	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
   2965		dev_kfree_skb_any(skb);
   2966		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
   2967		return NETDEV_TX_OK;
   2968	}
   2969
   2970	/* Check for available TxQ resources */
   2971	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
   2972		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
   2973		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
   2974			u32 sent;
   2975			sent = bnad_txcmpl_process(bnad, tcb);
   2976			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
   2977				bna_ib_ack(tcb->i_dbell, sent);
   2978			smp_mb__before_atomic();
   2979			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
   2980		} else {
   2981			netif_stop_queue(netdev);
   2982			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
   2983		}
   2984
   2985		smp_mb();
   2986		/*
   2987		 * Check again to deal with race condition between
   2988		 * netif_stop_queue here, and netif_wake_queue in
   2989		 * interrupt handler which is not inside netif tx lock.
   2990		 */
   2991		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
   2992			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
   2993			return NETDEV_TX_BUSY;
   2994		} else {
   2995			netif_wake_queue(netdev);
   2996			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
   2997		}
   2998	}
   2999
   3000	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
   3001	head_unmap = &unmap_q[prod];
   3002
   3003	/* Program the opcode, flags, frame_len, num_vectors in WI */
   3004	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
   3005		dev_kfree_skb_any(skb);
   3006		return NETDEV_TX_OK;
   3007	}
   3008	txqent->hdr.wi.reserved = 0;
   3009	txqent->hdr.wi.num_vectors = vectors;
   3010
   3011	head_unmap->skb = skb;
   3012	head_unmap->nvecs = 0;
   3013
   3014	/* Program the vectors */
   3015	unmap = head_unmap;
   3016	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
   3017				  len, DMA_TO_DEVICE);
   3018	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
   3019		dev_kfree_skb_any(skb);
   3020		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
   3021		return NETDEV_TX_OK;
   3022	}
   3023	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
   3024	txqent->vector[0].length = htons(len);
   3025	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
   3026	head_unmap->nvecs++;
   3027
   3028	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
   3029		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
   3030		u32		size = skb_frag_size(frag);
   3031
   3032		if (unlikely(size == 0)) {
   3033			/* Undo the changes starting at tcb->producer_index */
   3034			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
   3035				tcb->producer_index);
   3036			dev_kfree_skb_any(skb);
   3037			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
   3038			return NETDEV_TX_OK;
   3039		}
   3040
   3041		len += size;
   3042
   3043		vect_id++;
   3044		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
   3045			vect_id = 0;
   3046			BNA_QE_INDX_INC(prod, q_depth);
   3047			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
   3048			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
   3049			unmap = &unmap_q[prod];
   3050		}
   3051
   3052		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
   3053					    0, size, DMA_TO_DEVICE);
   3054		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
   3055			/* Undo the changes starting at tcb->producer_index */
   3056			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
   3057					   tcb->producer_index);
   3058			dev_kfree_skb_any(skb);
   3059			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
   3060			return NETDEV_TX_OK;
   3061		}
   3062
   3063		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
   3064		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
   3065		txqent->vector[vect_id].length = htons(size);
   3066		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
   3067				   dma_addr);
   3068		head_unmap->nvecs++;
   3069	}
   3070
   3071	if (unlikely(len != skb->len)) {
   3072		/* Undo the changes starting at tcb->producer_index */
   3073		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
   3074		dev_kfree_skb_any(skb);
   3075		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
   3076		return NETDEV_TX_OK;
   3077	}
   3078
   3079	BNA_QE_INDX_INC(prod, q_depth);
   3080	tcb->producer_index = prod;
   3081
   3082	wmb();
   3083
   3084	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
   3085		return NETDEV_TX_OK;
   3086
   3087	skb_tx_timestamp(skb);
   3088
   3089	bna_txq_prod_indx_doorbell(tcb);
   3090
   3091	return NETDEV_TX_OK;
   3092}
   3093
   3094/*
   3095 * Used spin_lock to synchronize reading of stats structures, which
   3096 * is written by BNA under the same lock.
   3097 */
   3098static void
   3099bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
   3100{
   3101	struct bnad *bnad = netdev_priv(netdev);
   3102	unsigned long flags;
   3103
   3104	spin_lock_irqsave(&bnad->bna_lock, flags);
   3105
   3106	bnad_netdev_qstats_fill(bnad, stats);
   3107	bnad_netdev_hwstats_fill(bnad, stats);
   3108
   3109	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3110}
   3111
   3112static void
   3113bnad_set_rx_ucast_fltr(struct bnad *bnad)
   3114{
   3115	struct net_device *netdev = bnad->netdev;
   3116	int uc_count = netdev_uc_count(netdev);
   3117	enum bna_cb_status ret;
   3118	u8 *mac_list;
   3119	struct netdev_hw_addr *ha;
   3120	int entry;
   3121
   3122	if (netdev_uc_empty(bnad->netdev)) {
   3123		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
   3124		return;
   3125	}
   3126
   3127	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
   3128		goto mode_default;
   3129
   3130	mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
   3131	if (mac_list == NULL)
   3132		goto mode_default;
   3133
   3134	entry = 0;
   3135	netdev_for_each_uc_addr(ha, netdev) {
   3136		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
   3137		entry++;
   3138	}
   3139
   3140	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
   3141	kfree(mac_list);
   3142
   3143	if (ret != BNA_CB_SUCCESS)
   3144		goto mode_default;
   3145
   3146	return;
   3147
   3148	/* ucast packets not in UCAM are routed to default function */
   3149mode_default:
   3150	bnad->cfg_flags |= BNAD_CF_DEFAULT;
   3151	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
   3152}
   3153
   3154static void
   3155bnad_set_rx_mcast_fltr(struct bnad *bnad)
   3156{
   3157	struct net_device *netdev = bnad->netdev;
   3158	int mc_count = netdev_mc_count(netdev);
   3159	enum bna_cb_status ret;
   3160	u8 *mac_list;
   3161
   3162	if (netdev->flags & IFF_ALLMULTI)
   3163		goto mode_allmulti;
   3164
   3165	if (netdev_mc_empty(netdev))
   3166		return;
   3167
   3168	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
   3169		goto mode_allmulti;
   3170
   3171	mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
   3172
   3173	if (mac_list == NULL)
   3174		goto mode_allmulti;
   3175
   3176	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
   3177
   3178	/* copy rest of the MCAST addresses */
   3179	bnad_netdev_mc_list_get(netdev, mac_list);
   3180	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
   3181	kfree(mac_list);
   3182
   3183	if (ret != BNA_CB_SUCCESS)
   3184		goto mode_allmulti;
   3185
   3186	return;
   3187
   3188mode_allmulti:
   3189	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
   3190	bna_rx_mcast_delall(bnad->rx_info[0].rx);
   3191}
   3192
   3193void
   3194bnad_set_rx_mode(struct net_device *netdev)
   3195{
   3196	struct bnad *bnad = netdev_priv(netdev);
   3197	enum bna_rxmode new_mode, mode_mask;
   3198	unsigned long flags;
   3199
   3200	spin_lock_irqsave(&bnad->bna_lock, flags);
   3201
   3202	if (bnad->rx_info[0].rx == NULL) {
   3203		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3204		return;
   3205	}
   3206
   3207	/* clear bnad flags to update it with new settings */
   3208	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
   3209			BNAD_CF_ALLMULTI);
   3210
   3211	new_mode = 0;
   3212	if (netdev->flags & IFF_PROMISC) {
   3213		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
   3214		bnad->cfg_flags |= BNAD_CF_PROMISC;
   3215	} else {
   3216		bnad_set_rx_mcast_fltr(bnad);
   3217
   3218		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
   3219			new_mode |= BNA_RXMODE_ALLMULTI;
   3220
   3221		bnad_set_rx_ucast_fltr(bnad);
   3222
   3223		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
   3224			new_mode |= BNA_RXMODE_DEFAULT;
   3225	}
   3226
   3227	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
   3228			BNA_RXMODE_ALLMULTI;
   3229	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
   3230
   3231	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3232}
   3233
   3234/*
   3235 * bna_lock is used to sync writes to netdev->addr
   3236 * conf_lock cannot be used since this call may be made
   3237 * in a non-blocking context.
   3238 */
   3239static int
   3240bnad_set_mac_address(struct net_device *netdev, void *addr)
   3241{
   3242	int err;
   3243	struct bnad *bnad = netdev_priv(netdev);
   3244	struct sockaddr *sa = (struct sockaddr *)addr;
   3245	unsigned long flags;
   3246
   3247	spin_lock_irqsave(&bnad->bna_lock, flags);
   3248
   3249	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
   3250	if (!err)
   3251		eth_hw_addr_set(netdev, sa->sa_data);
   3252
   3253	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3254
   3255	return err;
   3256}
   3257
   3258static int
   3259bnad_mtu_set(struct bnad *bnad, int frame_size)
   3260{
   3261	unsigned long flags;
   3262
   3263	init_completion(&bnad->bnad_completions.mtu_comp);
   3264
   3265	spin_lock_irqsave(&bnad->bna_lock, flags);
   3266	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
   3267	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3268
   3269	wait_for_completion(&bnad->bnad_completions.mtu_comp);
   3270
   3271	return bnad->bnad_completions.mtu_comp_status;
   3272}
   3273
   3274static int
   3275bnad_change_mtu(struct net_device *netdev, int new_mtu)
   3276{
   3277	int err, mtu;
   3278	struct bnad *bnad = netdev_priv(netdev);
   3279	u32 frame, new_frame;
   3280
   3281	mutex_lock(&bnad->conf_mutex);
   3282
   3283	mtu = netdev->mtu;
   3284	netdev->mtu = new_mtu;
   3285
   3286	frame = BNAD_FRAME_SIZE(mtu);
   3287	new_frame = BNAD_FRAME_SIZE(new_mtu);
   3288
   3289	/* check if multi-buffer needs to be enabled */
   3290	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
   3291	    netif_running(bnad->netdev)) {
   3292		/* only when transition is over 4K */
   3293		if ((frame <= 4096 && new_frame > 4096) ||
   3294		    (frame > 4096 && new_frame <= 4096))
   3295			bnad_reinit_rx(bnad);
   3296	}
   3297
   3298	err = bnad_mtu_set(bnad, new_frame);
   3299	if (err)
   3300		err = -EBUSY;
   3301
   3302	mutex_unlock(&bnad->conf_mutex);
   3303	return err;
   3304}
   3305
   3306static int
   3307bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
   3308{
   3309	struct bnad *bnad = netdev_priv(netdev);
   3310	unsigned long flags;
   3311
   3312	if (!bnad->rx_info[0].rx)
   3313		return 0;
   3314
   3315	mutex_lock(&bnad->conf_mutex);
   3316
   3317	spin_lock_irqsave(&bnad->bna_lock, flags);
   3318	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
   3319	set_bit(vid, bnad->active_vlans);
   3320	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3321
   3322	mutex_unlock(&bnad->conf_mutex);
   3323
   3324	return 0;
   3325}
   3326
   3327static int
   3328bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
   3329{
   3330	struct bnad *bnad = netdev_priv(netdev);
   3331	unsigned long flags;
   3332
   3333	if (!bnad->rx_info[0].rx)
   3334		return 0;
   3335
   3336	mutex_lock(&bnad->conf_mutex);
   3337
   3338	spin_lock_irqsave(&bnad->bna_lock, flags);
   3339	clear_bit(vid, bnad->active_vlans);
   3340	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
   3341	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3342
   3343	mutex_unlock(&bnad->conf_mutex);
   3344
   3345	return 0;
   3346}
   3347
   3348static int bnad_set_features(struct net_device *dev, netdev_features_t features)
   3349{
   3350	struct bnad *bnad = netdev_priv(dev);
   3351	netdev_features_t changed = features ^ dev->features;
   3352
   3353	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
   3354		unsigned long flags;
   3355
   3356		spin_lock_irqsave(&bnad->bna_lock, flags);
   3357
   3358		if (features & NETIF_F_HW_VLAN_CTAG_RX)
   3359			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
   3360		else
   3361			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
   3362
   3363		spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3364	}
   3365
   3366	return 0;
   3367}
   3368
   3369#ifdef CONFIG_NET_POLL_CONTROLLER
   3370static void
   3371bnad_netpoll(struct net_device *netdev)
   3372{
   3373	struct bnad *bnad = netdev_priv(netdev);
   3374	struct bnad_rx_info *rx_info;
   3375	struct bnad_rx_ctrl *rx_ctrl;
   3376	u32 curr_mask;
   3377	int i, j;
   3378
   3379	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
   3380		bna_intx_disable(&bnad->bna, curr_mask);
   3381		bnad_isr(bnad->pcidev->irq, netdev);
   3382		bna_intx_enable(&bnad->bna, curr_mask);
   3383	} else {
   3384		/*
   3385		 * Tx processing may happen in sending context, so no need
   3386		 * to explicitly process completions here
   3387		 */
   3388
   3389		/* Rx processing */
   3390		for (i = 0; i < bnad->num_rx; i++) {
   3391			rx_info = &bnad->rx_info[i];
   3392			if (!rx_info->rx)
   3393				continue;
   3394			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
   3395				rx_ctrl = &rx_info->rx_ctrl[j];
   3396				if (rx_ctrl->ccb)
   3397					bnad_netif_rx_schedule_poll(bnad,
   3398							    rx_ctrl->ccb);
   3399			}
   3400		}
   3401	}
   3402}
   3403#endif
   3404
   3405static const struct net_device_ops bnad_netdev_ops = {
   3406	.ndo_open		= bnad_open,
   3407	.ndo_stop		= bnad_stop,
   3408	.ndo_start_xmit		= bnad_start_xmit,
   3409	.ndo_get_stats64	= bnad_get_stats64,
   3410	.ndo_set_rx_mode	= bnad_set_rx_mode,
   3411	.ndo_validate_addr      = eth_validate_addr,
   3412	.ndo_set_mac_address    = bnad_set_mac_address,
   3413	.ndo_change_mtu		= bnad_change_mtu,
   3414	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
   3415	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
   3416	.ndo_set_features	= bnad_set_features,
   3417#ifdef CONFIG_NET_POLL_CONTROLLER
   3418	.ndo_poll_controller    = bnad_netpoll
   3419#endif
   3420};
   3421
   3422static void
   3423bnad_netdev_init(struct bnad *bnad)
   3424{
   3425	struct net_device *netdev = bnad->netdev;
   3426
   3427	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
   3428		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   3429		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
   3430		NETIF_F_HW_VLAN_CTAG_RX;
   3431
   3432	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
   3433		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
   3434		NETIF_F_TSO | NETIF_F_TSO6;
   3435
   3436	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
   3437			    NETIF_F_HIGHDMA;
   3438
   3439	netdev->mem_start = bnad->mmio_start;
   3440	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
   3441
   3442	/* MTU range: 46 - 9000 */
   3443	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
   3444	netdev->max_mtu = BNAD_JUMBO_MTU;
   3445
   3446	netdev->netdev_ops = &bnad_netdev_ops;
   3447	bnad_set_ethtool_ops(netdev);
   3448}
   3449
   3450/*
   3451 * 1. Initialize the bnad structure
   3452 * 2. Setup netdev pointer in pci_dev
   3453 * 3. Initialize no. of TxQ & CQs & MSIX vectors
   3454 * 4. Initialize work queue.
   3455 */
   3456static int
   3457bnad_init(struct bnad *bnad,
   3458	  struct pci_dev *pdev, struct net_device *netdev)
   3459{
   3460	unsigned long flags;
   3461
   3462	SET_NETDEV_DEV(netdev, &pdev->dev);
   3463	pci_set_drvdata(pdev, netdev);
   3464
   3465	bnad->netdev = netdev;
   3466	bnad->pcidev = pdev;
   3467	bnad->mmio_start = pci_resource_start(pdev, 0);
   3468	bnad->mmio_len = pci_resource_len(pdev, 0);
   3469	bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
   3470	if (!bnad->bar0) {
   3471		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
   3472		return -ENOMEM;
   3473	}
   3474	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
   3475		 (unsigned long long) bnad->mmio_len);
   3476
   3477	spin_lock_irqsave(&bnad->bna_lock, flags);
   3478	if (!bnad_msix_disable)
   3479		bnad->cfg_flags = BNAD_CF_MSIX;
   3480
   3481	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
   3482
   3483	bnad_q_num_init(bnad);
   3484	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3485
   3486	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
   3487		(bnad->num_rx * bnad->num_rxp_per_rx) +
   3488			 BNAD_MAILBOX_MSIX_VECTORS;
   3489
   3490	bnad->txq_depth = BNAD_TXQ_DEPTH;
   3491	bnad->rxq_depth = BNAD_RXQ_DEPTH;
   3492
   3493	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
   3494	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
   3495
   3496	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
   3497	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
   3498	if (!bnad->work_q) {
   3499		iounmap(bnad->bar0);
   3500		return -ENOMEM;
   3501	}
   3502
   3503	return 0;
   3504}
   3505
   3506/*
   3507 * Must be called after bnad_pci_uninit()
   3508 * so that iounmap() and pci_set_drvdata(NULL)
   3509 * happens only after PCI uninitialization.
   3510 */
   3511static void
   3512bnad_uninit(struct bnad *bnad)
   3513{
   3514	if (bnad->work_q) {
   3515		destroy_workqueue(bnad->work_q);
   3516		bnad->work_q = NULL;
   3517	}
   3518
   3519	if (bnad->bar0)
   3520		iounmap(bnad->bar0);
   3521}
   3522
   3523/*
   3524 * Initialize locks
   3525	a) Per ioceth mutes used for serializing configuration
   3526	   changes from OS interface
   3527	b) spin lock used to protect bna state machine
   3528 */
   3529static void
   3530bnad_lock_init(struct bnad *bnad)
   3531{
   3532	spin_lock_init(&bnad->bna_lock);
   3533	mutex_init(&bnad->conf_mutex);
   3534}
   3535
   3536static void
   3537bnad_lock_uninit(struct bnad *bnad)
   3538{
   3539	mutex_destroy(&bnad->conf_mutex);
   3540}
   3541
   3542/* PCI Initialization */
   3543static int
   3544bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
   3545{
   3546	int err;
   3547
   3548	err = pci_enable_device(pdev);
   3549	if (err)
   3550		return err;
   3551	err = pci_request_regions(pdev, BNAD_NAME);
   3552	if (err)
   3553		goto disable_device;
   3554	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
   3555	if (err)
   3556		goto release_regions;
   3557	pci_set_master(pdev);
   3558	return 0;
   3559
   3560release_regions:
   3561	pci_release_regions(pdev);
   3562disable_device:
   3563	pci_disable_device(pdev);
   3564
   3565	return err;
   3566}
   3567
   3568static void
   3569bnad_pci_uninit(struct pci_dev *pdev)
   3570{
   3571	pci_release_regions(pdev);
   3572	pci_disable_device(pdev);
   3573}
   3574
   3575static int
   3576bnad_pci_probe(struct pci_dev *pdev,
   3577		const struct pci_device_id *pcidev_id)
   3578{
   3579	int	err;
   3580	struct bnad *bnad;
   3581	struct bna *bna;
   3582	struct net_device *netdev;
   3583	struct bfa_pcidev pcidev_info;
   3584	unsigned long flags;
   3585
   3586	mutex_lock(&bnad_fwimg_mutex);
   3587	if (!cna_get_firmware_buf(pdev)) {
   3588		mutex_unlock(&bnad_fwimg_mutex);
   3589		dev_err(&pdev->dev, "failed to load firmware image!\n");
   3590		return -ENODEV;
   3591	}
   3592	mutex_unlock(&bnad_fwimg_mutex);
   3593
   3594	/*
   3595	 * Allocates sizeof(struct net_device + struct bnad)
   3596	 * bnad = netdev->priv
   3597	 */
   3598	netdev = alloc_etherdev(sizeof(struct bnad));
   3599	if (!netdev) {
   3600		err = -ENOMEM;
   3601		return err;
   3602	}
   3603	bnad = netdev_priv(netdev);
   3604	bnad_lock_init(bnad);
   3605	bnad->id = atomic_inc_return(&bna_id) - 1;
   3606
   3607	mutex_lock(&bnad->conf_mutex);
   3608	/* PCI initialization */
   3609	err = bnad_pci_init(bnad, pdev);
   3610	if (err)
   3611		goto unlock_mutex;
   3612
   3613	/*
   3614	 * Initialize bnad structure
   3615	 * Setup relation between pci_dev & netdev
   3616	 */
   3617	err = bnad_init(bnad, pdev, netdev);
   3618	if (err)
   3619		goto pci_uninit;
   3620
   3621	/* Initialize netdev structure, set up ethtool ops */
   3622	bnad_netdev_init(bnad);
   3623
   3624	/* Set link to down state */
   3625	netif_carrier_off(netdev);
   3626
   3627	/* Setup the debugfs node for this bfad */
   3628	if (bna_debugfs_enable)
   3629		bnad_debugfs_init(bnad);
   3630
   3631	/* Get resource requirement form bna */
   3632	spin_lock_irqsave(&bnad->bna_lock, flags);
   3633	bna_res_req(&bnad->res_info[0]);
   3634	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3635
   3636	/* Allocate resources from bna */
   3637	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
   3638	if (err)
   3639		goto drv_uninit;
   3640
   3641	bna = &bnad->bna;
   3642
   3643	/* Setup pcidev_info for bna_init() */
   3644	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
   3645	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
   3646	pcidev_info.device_id = bnad->pcidev->device;
   3647	pcidev_info.pci_bar_kva = bnad->bar0;
   3648
   3649	spin_lock_irqsave(&bnad->bna_lock, flags);
   3650	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
   3651	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3652
   3653	bnad->stats.bna_stats = &bna->stats;
   3654
   3655	bnad_enable_msix(bnad);
   3656	err = bnad_mbox_irq_alloc(bnad);
   3657	if (err)
   3658		goto res_free;
   3659
   3660	/* Set up timers */
   3661	timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
   3662	timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
   3663	timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
   3664	timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
   3665		    0);
   3666
   3667	/*
   3668	 * Start the chip
   3669	 * If the call back comes with error, we bail out.
   3670	 * This is a catastrophic error.
   3671	 */
   3672	err = bnad_ioceth_enable(bnad);
   3673	if (err) {
   3674		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
   3675		goto probe_success;
   3676	}
   3677
   3678	spin_lock_irqsave(&bnad->bna_lock, flags);
   3679	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
   3680		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
   3681		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
   3682			bna_attr(bna)->num_rxp - 1);
   3683		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
   3684			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
   3685			err = -EIO;
   3686	}
   3687	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3688	if (err)
   3689		goto disable_ioceth;
   3690
   3691	spin_lock_irqsave(&bnad->bna_lock, flags);
   3692	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
   3693	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3694
   3695	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
   3696	if (err) {
   3697		err = -EIO;
   3698		goto disable_ioceth;
   3699	}
   3700
   3701	spin_lock_irqsave(&bnad->bna_lock, flags);
   3702	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
   3703	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3704
   3705	/* Get the burnt-in mac */
   3706	spin_lock_irqsave(&bnad->bna_lock, flags);
   3707	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
   3708	bnad_set_netdev_perm_addr(bnad);
   3709	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3710
   3711	mutex_unlock(&bnad->conf_mutex);
   3712
   3713	/* Finally, reguister with net_device layer */
   3714	err = register_netdev(netdev);
   3715	if (err) {
   3716		dev_err(&pdev->dev, "registering net device failed\n");
   3717		goto probe_uninit;
   3718	}
   3719	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
   3720
   3721	return 0;
   3722
   3723probe_success:
   3724	mutex_unlock(&bnad->conf_mutex);
   3725	return 0;
   3726
   3727probe_uninit:
   3728	mutex_lock(&bnad->conf_mutex);
   3729	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
   3730disable_ioceth:
   3731	bnad_ioceth_disable(bnad);
   3732	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
   3733	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
   3734	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
   3735	spin_lock_irqsave(&bnad->bna_lock, flags);
   3736	bna_uninit(bna);
   3737	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3738	bnad_mbox_irq_free(bnad);
   3739	bnad_disable_msix(bnad);
   3740res_free:
   3741	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
   3742drv_uninit:
   3743	/* Remove the debugfs node for this bnad */
   3744	kfree(bnad->regdata);
   3745	bnad_debugfs_uninit(bnad);
   3746	bnad_uninit(bnad);
   3747pci_uninit:
   3748	bnad_pci_uninit(pdev);
   3749unlock_mutex:
   3750	mutex_unlock(&bnad->conf_mutex);
   3751	bnad_lock_uninit(bnad);
   3752	free_netdev(netdev);
   3753	return err;
   3754}
   3755
   3756static void
   3757bnad_pci_remove(struct pci_dev *pdev)
   3758{
   3759	struct net_device *netdev = pci_get_drvdata(pdev);
   3760	struct bnad *bnad;
   3761	struct bna *bna;
   3762	unsigned long flags;
   3763
   3764	if (!netdev)
   3765		return;
   3766
   3767	bnad = netdev_priv(netdev);
   3768	bna = &bnad->bna;
   3769
   3770	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
   3771		unregister_netdev(netdev);
   3772
   3773	mutex_lock(&bnad->conf_mutex);
   3774	bnad_ioceth_disable(bnad);
   3775	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
   3776	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
   3777	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
   3778	spin_lock_irqsave(&bnad->bna_lock, flags);
   3779	bna_uninit(bna);
   3780	spin_unlock_irqrestore(&bnad->bna_lock, flags);
   3781
   3782	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
   3783	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
   3784	bnad_mbox_irq_free(bnad);
   3785	bnad_disable_msix(bnad);
   3786	bnad_pci_uninit(pdev);
   3787	mutex_unlock(&bnad->conf_mutex);
   3788	bnad_lock_uninit(bnad);
   3789	/* Remove the debugfs node for this bnad */
   3790	kfree(bnad->regdata);
   3791	bnad_debugfs_uninit(bnad);
   3792	bnad_uninit(bnad);
   3793	free_netdev(netdev);
   3794}
   3795
   3796static const struct pci_device_id bnad_pci_id_table[] = {
   3797	{
   3798		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
   3799			PCI_DEVICE_ID_BROCADE_CT),
   3800		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
   3801		.class_mask =  0xffff00
   3802	},
   3803	{
   3804		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
   3805			BFA_PCI_DEVICE_ID_CT2),
   3806		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
   3807		.class_mask =  0xffff00
   3808	},
   3809	{0,  },
   3810};
   3811
   3812MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
   3813
   3814static struct pci_driver bnad_pci_driver = {
   3815	.name = BNAD_NAME,
   3816	.id_table = bnad_pci_id_table,
   3817	.probe = bnad_pci_probe,
   3818	.remove = bnad_pci_remove,
   3819};
   3820
   3821static int __init
   3822bnad_module_init(void)
   3823{
   3824	int err;
   3825
   3826	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
   3827
   3828	err = pci_register_driver(&bnad_pci_driver);
   3829	if (err < 0) {
   3830		pr_err("bna: PCI driver registration failed err=%d\n", err);
   3831		return err;
   3832	}
   3833
   3834	return 0;
   3835}
   3836
   3837static void __exit
   3838bnad_module_exit(void)
   3839{
   3840	pci_unregister_driver(&bnad_pci_driver);
   3841	release_firmware(bfi_fw);
   3842}
   3843
   3844module_init(bnad_module_init);
   3845module_exit(bnad_module_exit);
   3846
   3847MODULE_AUTHOR("Brocade");
   3848MODULE_LICENSE("GPL");
   3849MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
   3850MODULE_FIRMWARE(CNA_FW_FILE_CT);
   3851MODULE_FIRMWARE(CNA_FW_FILE_CT2);