cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

main.c (48698B)


      1/*
      2 * Copyright (c) 2013, 2021 Johannes Berg <johannes@sipsolutions.net>
      3 *
      4 *  This file is free software: you may copy, redistribute and/or modify it
      5 *  under the terms of the GNU General Public License as published by the
      6 *  Free Software Foundation, either version 2 of the License, or (at your
      7 *  option) any later version.
      8 *
      9 *  This file is distributed in the hope that it will be useful, but
     10 *  WITHOUT ANY WARRANTY; without even the implied warranty of
     11 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     12 *  General Public License for more details.
     13 *
     14 *  You should have received a copy of the GNU General Public License
     15 *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
     16 *
     17 * This file incorporates work covered by the following copyright and
     18 * permission notice:
     19 *
     20 * Copyright (c) 2012 Qualcomm Atheros, Inc.
     21 *
     22 * Permission to use, copy, modify, and/or distribute this software for any
     23 * purpose with or without fee is hereby granted, provided that the above
     24 * copyright notice and this permission notice appear in all copies.
     25 *
     26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     33 */
     34
     35#include <linux/module.h>
     36#include <linux/pci.h>
     37#include <linux/interrupt.h>
     38#include <linux/ip.h>
     39#include <linux/ipv6.h>
     40#include <linux/if_vlan.h>
     41#include <linux/mdio.h>
     42#include <linux/aer.h>
     43#include <linux/bitops.h>
     44#include <linux/netdevice.h>
     45#include <linux/etherdevice.h>
     46#include <net/ip6_checksum.h>
     47#include <linux/crc32.h>
     48#include "alx.h"
     49#include "hw.h"
     50#include "reg.h"
     51
     52static const char alx_drv_name[] = "alx";
     53
     54static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
     55{
     56	struct alx_buffer *txb = &txq->bufs[entry];
     57
     58	if (dma_unmap_len(txb, size)) {
     59		dma_unmap_single(txq->dev,
     60				 dma_unmap_addr(txb, dma),
     61				 dma_unmap_len(txb, size),
     62				 DMA_TO_DEVICE);
     63		dma_unmap_len_set(txb, size, 0);
     64	}
     65
     66	if (txb->skb) {
     67		dev_kfree_skb_any(txb->skb);
     68		txb->skb = NULL;
     69	}
     70}
     71
     72static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
     73{
     74	struct alx_rx_queue *rxq = alx->qnapi[0]->rxq;
     75	struct sk_buff *skb;
     76	struct alx_buffer *cur_buf;
     77	dma_addr_t dma;
     78	u16 cur, next, count = 0;
     79
     80	next = cur = rxq->write_idx;
     81	if (++next == alx->rx_ringsz)
     82		next = 0;
     83	cur_buf = &rxq->bufs[cur];
     84
     85	while (!cur_buf->skb && next != rxq->read_idx) {
     86		struct alx_rfd *rfd = &rxq->rfd[cur];
     87
     88		/*
     89		 * When DMA RX address is set to something like
     90		 * 0x....fc0, it will be very likely to cause DMA
     91		 * RFD overflow issue.
     92		 *
     93		 * To work around it, we apply rx skb with 64 bytes
     94		 * longer space, and offset the address whenever
     95		 * 0x....fc0 is detected.
     96		 */
     97		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
     98		if (!skb)
     99			break;
    100
    101		if (((unsigned long)skb->data & 0xfff) == 0xfc0)
    102			skb_reserve(skb, 64);
    103
    104		dma = dma_map_single(&alx->hw.pdev->dev,
    105				     skb->data, alx->rxbuf_size,
    106				     DMA_FROM_DEVICE);
    107		if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
    108			dev_kfree_skb(skb);
    109			break;
    110		}
    111
    112		/* Unfortunately, RX descriptor buffers must be 4-byte
    113		 * aligned, so we can't use IP alignment.
    114		 */
    115		if (WARN_ON(dma & 3)) {
    116			dev_kfree_skb(skb);
    117			break;
    118		}
    119
    120		cur_buf->skb = skb;
    121		dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
    122		dma_unmap_addr_set(cur_buf, dma, dma);
    123		rfd->addr = cpu_to_le64(dma);
    124
    125		cur = next;
    126		if (++next == alx->rx_ringsz)
    127			next = 0;
    128		cur_buf = &rxq->bufs[cur];
    129		count++;
    130	}
    131
    132	if (count) {
    133		/* flush all updates before updating hardware */
    134		wmb();
    135		rxq->write_idx = cur;
    136		alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
    137	}
    138
    139	return count;
    140}
    141
    142static struct alx_tx_queue *alx_tx_queue_mapping(struct alx_priv *alx,
    143						 struct sk_buff *skb)
    144{
    145	unsigned int r_idx = skb->queue_mapping;
    146
    147	if (r_idx >= alx->num_txq)
    148		r_idx = r_idx % alx->num_txq;
    149
    150	return alx->qnapi[r_idx]->txq;
    151}
    152
    153static struct netdev_queue *alx_get_tx_queue(const struct alx_tx_queue *txq)
    154{
    155	return netdev_get_tx_queue(txq->netdev, txq->queue_idx);
    156}
    157
    158static inline int alx_tpd_avail(struct alx_tx_queue *txq)
    159{
    160	if (txq->write_idx >= txq->read_idx)
    161		return txq->count + txq->read_idx - txq->write_idx - 1;
    162	return txq->read_idx - txq->write_idx - 1;
    163}
    164
    165static bool alx_clean_tx_irq(struct alx_tx_queue *txq)
    166{
    167	struct alx_priv *alx;
    168	struct netdev_queue *tx_queue;
    169	u16 hw_read_idx, sw_read_idx;
    170	unsigned int total_bytes = 0, total_packets = 0;
    171	int budget = ALX_DEFAULT_TX_WORK;
    172
    173	alx = netdev_priv(txq->netdev);
    174	tx_queue = alx_get_tx_queue(txq);
    175
    176	sw_read_idx = txq->read_idx;
    177	hw_read_idx = alx_read_mem16(&alx->hw, txq->c_reg);
    178
    179	if (sw_read_idx != hw_read_idx) {
    180		while (sw_read_idx != hw_read_idx && budget > 0) {
    181			struct sk_buff *skb;
    182
    183			skb = txq->bufs[sw_read_idx].skb;
    184			if (skb) {
    185				total_bytes += skb->len;
    186				total_packets++;
    187				budget--;
    188			}
    189
    190			alx_free_txbuf(txq, sw_read_idx);
    191
    192			if (++sw_read_idx == txq->count)
    193				sw_read_idx = 0;
    194		}
    195		txq->read_idx = sw_read_idx;
    196
    197		netdev_tx_completed_queue(tx_queue, total_packets, total_bytes);
    198	}
    199
    200	if (netif_tx_queue_stopped(tx_queue) && netif_carrier_ok(alx->dev) &&
    201	    alx_tpd_avail(txq) > txq->count / 4)
    202		netif_tx_wake_queue(tx_queue);
    203
    204	return sw_read_idx == hw_read_idx;
    205}
    206
    207static void alx_schedule_link_check(struct alx_priv *alx)
    208{
    209	schedule_work(&alx->link_check_wk);
    210}
    211
    212static void alx_schedule_reset(struct alx_priv *alx)
    213{
    214	schedule_work(&alx->reset_wk);
    215}
    216
    217static int alx_clean_rx_irq(struct alx_rx_queue *rxq, int budget)
    218{
    219	struct alx_priv *alx;
    220	struct alx_rrd *rrd;
    221	struct alx_buffer *rxb;
    222	struct sk_buff *skb;
    223	u16 length, rfd_cleaned = 0;
    224	int work = 0;
    225
    226	alx = netdev_priv(rxq->netdev);
    227
    228	while (work < budget) {
    229		rrd = &rxq->rrd[rxq->rrd_read_idx];
    230		if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
    231			break;
    232		rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
    233
    234		if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
    235				  RRD_SI) != rxq->read_idx ||
    236		    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
    237				  RRD_NOR) != 1) {
    238			alx_schedule_reset(alx);
    239			return work;
    240		}
    241
    242		rxb = &rxq->bufs[rxq->read_idx];
    243		dma_unmap_single(rxq->dev,
    244				 dma_unmap_addr(rxb, dma),
    245				 dma_unmap_len(rxb, size),
    246				 DMA_FROM_DEVICE);
    247		dma_unmap_len_set(rxb, size, 0);
    248		skb = rxb->skb;
    249		rxb->skb = NULL;
    250
    251		if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
    252		    rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
    253			rrd->word3 = 0;
    254			dev_kfree_skb_any(skb);
    255			goto next_pkt;
    256		}
    257
    258		length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
    259				       RRD_PKTLEN) - ETH_FCS_LEN;
    260		skb_put(skb, length);
    261		skb->protocol = eth_type_trans(skb, rxq->netdev);
    262
    263		skb_checksum_none_assert(skb);
    264		if (alx->dev->features & NETIF_F_RXCSUM &&
    265		    !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
    266				    cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
    267			switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
    268					      RRD_PID)) {
    269			case RRD_PID_IPV6UDP:
    270			case RRD_PID_IPV4UDP:
    271			case RRD_PID_IPV4TCP:
    272			case RRD_PID_IPV6TCP:
    273				skb->ip_summed = CHECKSUM_UNNECESSARY;
    274				break;
    275			}
    276		}
    277
    278		napi_gro_receive(&rxq->np->napi, skb);
    279		work++;
    280
    281next_pkt:
    282		if (++rxq->read_idx == rxq->count)
    283			rxq->read_idx = 0;
    284		if (++rxq->rrd_read_idx == rxq->count)
    285			rxq->rrd_read_idx = 0;
    286
    287		if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
    288			rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
    289	}
    290
    291	if (rfd_cleaned)
    292		alx_refill_rx_ring(alx, GFP_ATOMIC);
    293
    294	return work;
    295}
    296
    297static int alx_poll(struct napi_struct *napi, int budget)
    298{
    299	struct alx_napi *np = container_of(napi, struct alx_napi, napi);
    300	struct alx_priv *alx = np->alx;
    301	struct alx_hw *hw = &alx->hw;
    302	unsigned long flags;
    303	bool tx_complete = true;
    304	int work = 0;
    305
    306	if (np->txq)
    307		tx_complete = alx_clean_tx_irq(np->txq);
    308	if (np->rxq)
    309		work = alx_clean_rx_irq(np->rxq, budget);
    310
    311	if (!tx_complete || work == budget)
    312		return budget;
    313
    314	napi_complete_done(&np->napi, work);
    315
    316	/* enable interrupt */
    317	if (alx->hw.pdev->msix_enabled) {
    318		alx_mask_msix(hw, np->vec_idx, false);
    319	} else {
    320		spin_lock_irqsave(&alx->irq_lock, flags);
    321		alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
    322		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
    323		spin_unlock_irqrestore(&alx->irq_lock, flags);
    324	}
    325
    326	alx_post_write(hw);
    327
    328	return work;
    329}
    330
    331static bool alx_intr_handle_misc(struct alx_priv *alx, u32 intr)
    332{
    333	struct alx_hw *hw = &alx->hw;
    334
    335	if (intr & ALX_ISR_FATAL) {
    336		netif_warn(alx, hw, alx->dev,
    337			   "fatal interrupt 0x%x, resetting\n", intr);
    338		alx_schedule_reset(alx);
    339		return true;
    340	}
    341
    342	if (intr & ALX_ISR_ALERT)
    343		netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
    344
    345	if (intr & ALX_ISR_PHY) {
    346		/* suppress PHY interrupt, because the source
    347		 * is from PHY internal. only the internal status
    348		 * is cleared, the interrupt status could be cleared.
    349		 */
    350		alx->int_mask &= ~ALX_ISR_PHY;
    351		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
    352		alx_schedule_link_check(alx);
    353	}
    354
    355	return false;
    356}
    357
    358static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
    359{
    360	struct alx_hw *hw = &alx->hw;
    361
    362	spin_lock(&alx->irq_lock);
    363
    364	/* ACK interrupt */
    365	alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
    366	intr &= alx->int_mask;
    367
    368	if (alx_intr_handle_misc(alx, intr))
    369		goto out;
    370
    371	if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
    372		napi_schedule(&alx->qnapi[0]->napi);
    373		/* mask rx/tx interrupt, enable them when napi complete */
    374		alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
    375		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
    376	}
    377
    378	alx_write_mem32(hw, ALX_ISR, 0);
    379
    380 out:
    381	spin_unlock(&alx->irq_lock);
    382	return IRQ_HANDLED;
    383}
    384
    385static irqreturn_t alx_intr_msix_ring(int irq, void *data)
    386{
    387	struct alx_napi *np = data;
    388	struct alx_hw *hw = &np->alx->hw;
    389
    390	/* mask interrupt to ACK chip */
    391	alx_mask_msix(hw, np->vec_idx, true);
    392	/* clear interrupt status */
    393	alx_write_mem32(hw, ALX_ISR, np->vec_mask);
    394
    395	napi_schedule(&np->napi);
    396
    397	return IRQ_HANDLED;
    398}
    399
    400static irqreturn_t alx_intr_msix_misc(int irq, void *data)
    401{
    402	struct alx_priv *alx = data;
    403	struct alx_hw *hw = &alx->hw;
    404	u32 intr;
    405
    406	/* mask interrupt to ACK chip */
    407	alx_mask_msix(hw, 0, true);
    408
    409	/* read interrupt status */
    410	intr = alx_read_mem32(hw, ALX_ISR);
    411	intr &= (alx->int_mask & ~ALX_ISR_ALL_QUEUES);
    412
    413	if (alx_intr_handle_misc(alx, intr))
    414		return IRQ_HANDLED;
    415
    416	/* clear interrupt status */
    417	alx_write_mem32(hw, ALX_ISR, intr);
    418
    419	/* enable interrupt again */
    420	alx_mask_msix(hw, 0, false);
    421
    422	return IRQ_HANDLED;
    423}
    424
    425static irqreturn_t alx_intr_msi(int irq, void *data)
    426{
    427	struct alx_priv *alx = data;
    428
    429	return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
    430}
    431
    432static irqreturn_t alx_intr_legacy(int irq, void *data)
    433{
    434	struct alx_priv *alx = data;
    435	struct alx_hw *hw = &alx->hw;
    436	u32 intr;
    437
    438	intr = alx_read_mem32(hw, ALX_ISR);
    439
    440	if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
    441		return IRQ_NONE;
    442
    443	return alx_intr_handle(alx, intr);
    444}
    445
    446static const u16 txring_header_reg[] = {ALX_TPD_PRI0_ADDR_LO,
    447					ALX_TPD_PRI1_ADDR_LO,
    448					ALX_TPD_PRI2_ADDR_LO,
    449					ALX_TPD_PRI3_ADDR_LO};
    450
    451static void alx_init_ring_ptrs(struct alx_priv *alx)
    452{
    453	struct alx_hw *hw = &alx->hw;
    454	u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
    455	struct alx_napi *np;
    456	int i;
    457
    458	for (i = 0; i < alx->num_napi; i++) {
    459		np = alx->qnapi[i];
    460		if (np->txq) {
    461			np->txq->read_idx = 0;
    462			np->txq->write_idx = 0;
    463			alx_write_mem32(hw,
    464					txring_header_reg[np->txq->queue_idx],
    465					np->txq->tpd_dma);
    466		}
    467
    468		if (np->rxq) {
    469			np->rxq->read_idx = 0;
    470			np->rxq->write_idx = 0;
    471			np->rxq->rrd_read_idx = 0;
    472			alx_write_mem32(hw, ALX_RRD_ADDR_LO, np->rxq->rrd_dma);
    473			alx_write_mem32(hw, ALX_RFD_ADDR_LO, np->rxq->rfd_dma);
    474		}
    475	}
    476
    477	alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
    478	alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
    479
    480	alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
    481	alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
    482	alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
    483	alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
    484
    485	/* load these pointers into the chip */
    486	alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
    487}
    488
    489static void alx_free_txring_buf(struct alx_tx_queue *txq)
    490{
    491	int i;
    492
    493	if (!txq->bufs)
    494		return;
    495
    496	for (i = 0; i < txq->count; i++)
    497		alx_free_txbuf(txq, i);
    498
    499	memset(txq->bufs, 0, txq->count * sizeof(struct alx_buffer));
    500	memset(txq->tpd, 0, txq->count * sizeof(struct alx_txd));
    501	txq->write_idx = 0;
    502	txq->read_idx = 0;
    503
    504	netdev_tx_reset_queue(alx_get_tx_queue(txq));
    505}
    506
    507static void alx_free_rxring_buf(struct alx_rx_queue *rxq)
    508{
    509	struct alx_buffer *cur_buf;
    510	u16 i;
    511
    512	if (!rxq->bufs)
    513		return;
    514
    515	for (i = 0; i < rxq->count; i++) {
    516		cur_buf = rxq->bufs + i;
    517		if (cur_buf->skb) {
    518			dma_unmap_single(rxq->dev,
    519					 dma_unmap_addr(cur_buf, dma),
    520					 dma_unmap_len(cur_buf, size),
    521					 DMA_FROM_DEVICE);
    522			dev_kfree_skb(cur_buf->skb);
    523			cur_buf->skb = NULL;
    524			dma_unmap_len_set(cur_buf, size, 0);
    525			dma_unmap_addr_set(cur_buf, dma, 0);
    526		}
    527	}
    528
    529	rxq->write_idx = 0;
    530	rxq->read_idx = 0;
    531	rxq->rrd_read_idx = 0;
    532}
    533
    534static void alx_free_buffers(struct alx_priv *alx)
    535{
    536	int i;
    537
    538	for (i = 0; i < alx->num_txq; i++)
    539		if (alx->qnapi[i] && alx->qnapi[i]->txq)
    540			alx_free_txring_buf(alx->qnapi[i]->txq);
    541
    542	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
    543		alx_free_rxring_buf(alx->qnapi[0]->rxq);
    544}
    545
    546static int alx_reinit_rings(struct alx_priv *alx)
    547{
    548	alx_free_buffers(alx);
    549
    550	alx_init_ring_ptrs(alx);
    551
    552	if (!alx_refill_rx_ring(alx, GFP_KERNEL))
    553		return -ENOMEM;
    554
    555	return 0;
    556}
    557
    558static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
    559{
    560	u32 crc32, bit, reg;
    561
    562	crc32 = ether_crc(ETH_ALEN, addr);
    563	reg = (crc32 >> 31) & 0x1;
    564	bit = (crc32 >> 26) & 0x1F;
    565
    566	mc_hash[reg] |= BIT(bit);
    567}
    568
    569static void __alx_set_rx_mode(struct net_device *netdev)
    570{
    571	struct alx_priv *alx = netdev_priv(netdev);
    572	struct alx_hw *hw = &alx->hw;
    573	struct netdev_hw_addr *ha;
    574	u32 mc_hash[2] = {};
    575
    576	if (!(netdev->flags & IFF_ALLMULTI)) {
    577		netdev_for_each_mc_addr(ha, netdev)
    578			alx_add_mc_addr(hw, ha->addr, mc_hash);
    579
    580		alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
    581		alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
    582	}
    583
    584	hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
    585	if (netdev->flags & IFF_PROMISC)
    586		hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
    587	if (netdev->flags & IFF_ALLMULTI)
    588		hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
    589
    590	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
    591}
    592
    593static void alx_set_rx_mode(struct net_device *netdev)
    594{
    595	__alx_set_rx_mode(netdev);
    596}
    597
    598static int alx_set_mac_address(struct net_device *netdev, void *data)
    599{
    600	struct alx_priv *alx = netdev_priv(netdev);
    601	struct alx_hw *hw = &alx->hw;
    602	struct sockaddr *addr = data;
    603
    604	if (!is_valid_ether_addr(addr->sa_data))
    605		return -EADDRNOTAVAIL;
    606
    607	if (netdev->addr_assign_type & NET_ADDR_RANDOM)
    608		netdev->addr_assign_type ^= NET_ADDR_RANDOM;
    609
    610	eth_hw_addr_set(netdev, addr->sa_data);
    611	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
    612	alx_set_macaddr(hw, hw->mac_addr);
    613
    614	return 0;
    615}
    616
    617static int alx_alloc_tx_ring(struct alx_priv *alx, struct alx_tx_queue *txq,
    618			     int offset)
    619{
    620	txq->bufs = kcalloc(txq->count, sizeof(struct alx_buffer), GFP_KERNEL);
    621	if (!txq->bufs)
    622		return -ENOMEM;
    623
    624	txq->tpd = alx->descmem.virt + offset;
    625	txq->tpd_dma = alx->descmem.dma + offset;
    626	offset += sizeof(struct alx_txd) * txq->count;
    627
    628	return offset;
    629}
    630
    631static int alx_alloc_rx_ring(struct alx_priv *alx, struct alx_rx_queue *rxq,
    632			     int offset)
    633{
    634	rxq->bufs = kcalloc(rxq->count, sizeof(struct alx_buffer), GFP_KERNEL);
    635	if (!rxq->bufs)
    636		return -ENOMEM;
    637
    638	rxq->rrd = alx->descmem.virt + offset;
    639	rxq->rrd_dma = alx->descmem.dma + offset;
    640	offset += sizeof(struct alx_rrd) * rxq->count;
    641
    642	rxq->rfd = alx->descmem.virt + offset;
    643	rxq->rfd_dma = alx->descmem.dma + offset;
    644	offset += sizeof(struct alx_rfd) * rxq->count;
    645
    646	return offset;
    647}
    648
    649static int alx_alloc_rings(struct alx_priv *alx)
    650{
    651	int i, offset = 0;
    652
    653	/* physical tx/rx ring descriptors
    654	 *
    655	 * Allocate them as a single chunk because they must not cross a
    656	 * 4G boundary (hardware has a single register for high 32 bits
    657	 * of addresses only)
    658	 */
    659	alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz *
    660			    alx->num_txq +
    661			    sizeof(struct alx_rrd) * alx->rx_ringsz +
    662			    sizeof(struct alx_rfd) * alx->rx_ringsz;
    663	alx->descmem.virt = dma_alloc_coherent(&alx->hw.pdev->dev,
    664					       alx->descmem.size,
    665					       &alx->descmem.dma, GFP_KERNEL);
    666	if (!alx->descmem.virt)
    667		return -ENOMEM;
    668
    669	/* alignment requirements */
    670	BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
    671	BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
    672
    673	for (i = 0; i < alx->num_txq; i++) {
    674		offset = alx_alloc_tx_ring(alx, alx->qnapi[i]->txq, offset);
    675		if (offset < 0) {
    676			netdev_err(alx->dev, "Allocation of tx buffer failed!\n");
    677			return -ENOMEM;
    678		}
    679	}
    680
    681	offset = alx_alloc_rx_ring(alx, alx->qnapi[0]->rxq, offset);
    682	if (offset < 0) {
    683		netdev_err(alx->dev, "Allocation of rx buffer failed!\n");
    684		return -ENOMEM;
    685	}
    686
    687	return 0;
    688}
    689
    690static void alx_free_rings(struct alx_priv *alx)
    691{
    692	int i;
    693
    694	alx_free_buffers(alx);
    695
    696	for (i = 0; i < alx->num_txq; i++)
    697		if (alx->qnapi[i] && alx->qnapi[i]->txq)
    698			kfree(alx->qnapi[i]->txq->bufs);
    699
    700	if (alx->qnapi[0] && alx->qnapi[0]->rxq)
    701		kfree(alx->qnapi[0]->rxq->bufs);
    702
    703	if (alx->descmem.virt)
    704		dma_free_coherent(&alx->hw.pdev->dev,
    705				  alx->descmem.size,
    706				  alx->descmem.virt,
    707				  alx->descmem.dma);
    708}
    709
    710static void alx_free_napis(struct alx_priv *alx)
    711{
    712	struct alx_napi *np;
    713	int i;
    714
    715	for (i = 0; i < alx->num_napi; i++) {
    716		np = alx->qnapi[i];
    717		if (!np)
    718			continue;
    719
    720		netif_napi_del(&np->napi);
    721		kfree(np->txq);
    722		kfree(np->rxq);
    723		kfree(np);
    724		alx->qnapi[i] = NULL;
    725	}
    726}
    727
    728static const u16 tx_pidx_reg[] = {ALX_TPD_PRI0_PIDX, ALX_TPD_PRI1_PIDX,
    729				  ALX_TPD_PRI2_PIDX, ALX_TPD_PRI3_PIDX};
    730static const u16 tx_cidx_reg[] = {ALX_TPD_PRI0_CIDX, ALX_TPD_PRI1_CIDX,
    731				  ALX_TPD_PRI2_CIDX, ALX_TPD_PRI3_CIDX};
    732static const u32 tx_vect_mask[] = {ALX_ISR_TX_Q0, ALX_ISR_TX_Q1,
    733				   ALX_ISR_TX_Q2, ALX_ISR_TX_Q3};
    734static const u32 rx_vect_mask[] = {ALX_ISR_RX_Q0, ALX_ISR_RX_Q1,
    735				   ALX_ISR_RX_Q2, ALX_ISR_RX_Q3,
    736				   ALX_ISR_RX_Q4, ALX_ISR_RX_Q5,
    737				   ALX_ISR_RX_Q6, ALX_ISR_RX_Q7};
    738
    739static int alx_alloc_napis(struct alx_priv *alx)
    740{
    741	struct alx_napi *np;
    742	struct alx_rx_queue *rxq;
    743	struct alx_tx_queue *txq;
    744	int i;
    745
    746	alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
    747
    748	/* allocate alx_napi structures */
    749	for (i = 0; i < alx->num_napi; i++) {
    750		np = kzalloc(sizeof(struct alx_napi), GFP_KERNEL);
    751		if (!np)
    752			goto err_out;
    753
    754		np->alx = alx;
    755		netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
    756		alx->qnapi[i] = np;
    757	}
    758
    759	/* allocate tx queues */
    760	for (i = 0; i < alx->num_txq; i++) {
    761		np = alx->qnapi[i];
    762		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
    763		if (!txq)
    764			goto err_out;
    765
    766		np->txq = txq;
    767		txq->p_reg = tx_pidx_reg[i];
    768		txq->c_reg = tx_cidx_reg[i];
    769		txq->queue_idx = i;
    770		txq->count = alx->tx_ringsz;
    771		txq->netdev = alx->dev;
    772		txq->dev = &alx->hw.pdev->dev;
    773		np->vec_mask |= tx_vect_mask[i];
    774		alx->int_mask |= tx_vect_mask[i];
    775	}
    776
    777	/* allocate rx queues */
    778	np = alx->qnapi[0];
    779	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
    780	if (!rxq)
    781		goto err_out;
    782
    783	np->rxq = rxq;
    784	rxq->np = alx->qnapi[0];
    785	rxq->queue_idx = 0;
    786	rxq->count = alx->rx_ringsz;
    787	rxq->netdev = alx->dev;
    788	rxq->dev = &alx->hw.pdev->dev;
    789	np->vec_mask |= rx_vect_mask[0];
    790	alx->int_mask |= rx_vect_mask[0];
    791
    792	return 0;
    793
    794err_out:
    795	netdev_err(alx->dev, "error allocating internal structures\n");
    796	alx_free_napis(alx);
    797	return -ENOMEM;
    798}
    799
    800static const int txq_vec_mapping_shift[] = {
    801	0, ALX_MSI_MAP_TBL1_TXQ0_SHIFT,
    802	0, ALX_MSI_MAP_TBL1_TXQ1_SHIFT,
    803	1, ALX_MSI_MAP_TBL2_TXQ2_SHIFT,
    804	1, ALX_MSI_MAP_TBL2_TXQ3_SHIFT,
    805};
    806
    807static void alx_config_vector_mapping(struct alx_priv *alx)
    808{
    809	struct alx_hw *hw = &alx->hw;
    810	u32 tbl[2] = {0, 0};
    811	int i, vector, idx, shift;
    812
    813	if (alx->hw.pdev->msix_enabled) {
    814		/* tx mappings */
    815		for (i = 0, vector = 1; i < alx->num_txq; i++, vector++) {
    816			idx = txq_vec_mapping_shift[i * 2];
    817			shift = txq_vec_mapping_shift[i * 2 + 1];
    818			tbl[idx] |= vector << shift;
    819		}
    820
    821		/* rx mapping */
    822		tbl[0] |= 1 << ALX_MSI_MAP_TBL1_RXQ0_SHIFT;
    823	}
    824
    825	alx_write_mem32(hw, ALX_MSI_MAP_TBL1, tbl[0]);
    826	alx_write_mem32(hw, ALX_MSI_MAP_TBL2, tbl[1]);
    827	alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
    828}
    829
    830static int alx_enable_msix(struct alx_priv *alx)
    831{
    832	int err, num_vec, num_txq, num_rxq;
    833
    834	num_txq = min_t(int, num_online_cpus(), ALX_MAX_TX_QUEUES);
    835	num_rxq = 1;
    836	num_vec = max_t(int, num_txq, num_rxq) + 1;
    837
    838	err = pci_alloc_irq_vectors(alx->hw.pdev, num_vec, num_vec,
    839			PCI_IRQ_MSIX);
    840	if (err < 0) {
    841		netdev_warn(alx->dev, "Enabling MSI-X interrupts failed!\n");
    842		return err;
    843	}
    844
    845	alx->num_vec = num_vec;
    846	alx->num_napi = num_vec - 1;
    847	alx->num_txq = num_txq;
    848	alx->num_rxq = num_rxq;
    849
    850	return err;
    851}
    852
    853static int alx_request_msix(struct alx_priv *alx)
    854{
    855	struct net_device *netdev = alx->dev;
    856	int i, err, vector = 0, free_vector = 0;
    857
    858	err = request_irq(pci_irq_vector(alx->hw.pdev, 0), alx_intr_msix_misc,
    859			  0, netdev->name, alx);
    860	if (err)
    861		goto out_err;
    862
    863	for (i = 0; i < alx->num_napi; i++) {
    864		struct alx_napi *np = alx->qnapi[i];
    865
    866		vector++;
    867
    868		if (np->txq && np->rxq)
    869			sprintf(np->irq_lbl, "%s-TxRx-%u", netdev->name,
    870				np->txq->queue_idx);
    871		else if (np->txq)
    872			sprintf(np->irq_lbl, "%s-tx-%u", netdev->name,
    873				np->txq->queue_idx);
    874		else if (np->rxq)
    875			sprintf(np->irq_lbl, "%s-rx-%u", netdev->name,
    876				np->rxq->queue_idx);
    877		else
    878			sprintf(np->irq_lbl, "%s-unused", netdev->name);
    879
    880		np->vec_idx = vector;
    881		err = request_irq(pci_irq_vector(alx->hw.pdev, vector),
    882				  alx_intr_msix_ring, 0, np->irq_lbl, np);
    883		if (err)
    884			goto out_free;
    885	}
    886	return 0;
    887
    888out_free:
    889	free_irq(pci_irq_vector(alx->hw.pdev, free_vector++), alx);
    890
    891	vector--;
    892	for (i = 0; i < vector; i++)
    893		free_irq(pci_irq_vector(alx->hw.pdev,free_vector++),
    894			 alx->qnapi[i]);
    895
    896out_err:
    897	return err;
    898}
    899
    900static int alx_init_intr(struct alx_priv *alx)
    901{
    902	int ret;
    903
    904	ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
    905			PCI_IRQ_MSI | PCI_IRQ_LEGACY);
    906	if (ret < 0)
    907		return ret;
    908
    909	alx->num_vec = 1;
    910	alx->num_napi = 1;
    911	alx->num_txq = 1;
    912	alx->num_rxq = 1;
    913	return 0;
    914}
    915
    916static void alx_irq_enable(struct alx_priv *alx)
    917{
    918	struct alx_hw *hw = &alx->hw;
    919	int i;
    920
    921	/* level-1 interrupt switch */
    922	alx_write_mem32(hw, ALX_ISR, 0);
    923	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
    924	alx_post_write(hw);
    925
    926	if (alx->hw.pdev->msix_enabled) {
    927		/* enable all msix irqs */
    928		for (i = 0; i < alx->num_vec; i++)
    929			alx_mask_msix(hw, i, false);
    930	}
    931}
    932
    933static void alx_irq_disable(struct alx_priv *alx)
    934{
    935	struct alx_hw *hw = &alx->hw;
    936	int i;
    937
    938	alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
    939	alx_write_mem32(hw, ALX_IMR, 0);
    940	alx_post_write(hw);
    941
    942	if (alx->hw.pdev->msix_enabled) {
    943		for (i = 0; i < alx->num_vec; i++) {
    944			alx_mask_msix(hw, i, true);
    945			synchronize_irq(pci_irq_vector(alx->hw.pdev, i));
    946		}
    947	} else {
    948		synchronize_irq(pci_irq_vector(alx->hw.pdev, 0));
    949	}
    950}
    951
    952static int alx_realloc_resources(struct alx_priv *alx)
    953{
    954	int err;
    955
    956	alx_free_rings(alx);
    957	alx_free_napis(alx);
    958	pci_free_irq_vectors(alx->hw.pdev);
    959
    960	err = alx_init_intr(alx);
    961	if (err)
    962		return err;
    963
    964	err = alx_alloc_napis(alx);
    965	if (err)
    966		return err;
    967
    968	err = alx_alloc_rings(alx);
    969	if (err)
    970		return err;
    971
    972	return 0;
    973}
    974
    975static int alx_request_irq(struct alx_priv *alx)
    976{
    977	struct pci_dev *pdev = alx->hw.pdev;
    978	struct alx_hw *hw = &alx->hw;
    979	int err;
    980	u32 msi_ctrl;
    981
    982	msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
    983
    984	if (alx->hw.pdev->msix_enabled) {
    985		alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, msi_ctrl);
    986		err = alx_request_msix(alx);
    987		if (!err)
    988			goto out;
    989
    990		/* msix request failed, realloc resources */
    991		err = alx_realloc_resources(alx);
    992		if (err)
    993			goto out;
    994	}
    995
    996	if (alx->hw.pdev->msi_enabled) {
    997		alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
    998				msi_ctrl | ALX_MSI_MASK_SEL_LINE);
    999		err = request_irq(pci_irq_vector(pdev, 0), alx_intr_msi, 0,
   1000				  alx->dev->name, alx);
   1001		if (!err)
   1002			goto out;
   1003
   1004		/* fall back to legacy interrupt */
   1005		pci_free_irq_vectors(alx->hw.pdev);
   1006	}
   1007
   1008	alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
   1009	err = request_irq(pci_irq_vector(pdev, 0), alx_intr_legacy, IRQF_SHARED,
   1010			  alx->dev->name, alx);
   1011out:
   1012	if (!err)
   1013		alx_config_vector_mapping(alx);
   1014	else
   1015		netdev_err(alx->dev, "IRQ registration failed!\n");
   1016	return err;
   1017}
   1018
   1019static void alx_free_irq(struct alx_priv *alx)
   1020{
   1021	struct pci_dev *pdev = alx->hw.pdev;
   1022	int i;
   1023
   1024	free_irq(pci_irq_vector(pdev, 0), alx);
   1025	if (alx->hw.pdev->msix_enabled) {
   1026		for (i = 0; i < alx->num_napi; i++)
   1027			free_irq(pci_irq_vector(pdev, i + 1), alx->qnapi[i]);
   1028	}
   1029
   1030	pci_free_irq_vectors(pdev);
   1031}
   1032
   1033static int alx_identify_hw(struct alx_priv *alx)
   1034{
   1035	struct alx_hw *hw = &alx->hw;
   1036	int rev = alx_hw_revision(hw);
   1037
   1038	if (rev > ALX_REV_C0)
   1039		return -EINVAL;
   1040
   1041	hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
   1042
   1043	return 0;
   1044}
   1045
   1046static int alx_init_sw(struct alx_priv *alx)
   1047{
   1048	struct pci_dev *pdev = alx->hw.pdev;
   1049	struct alx_hw *hw = &alx->hw;
   1050	int err;
   1051
   1052	err = alx_identify_hw(alx);
   1053	if (err) {
   1054		dev_err(&pdev->dev, "unrecognized chip, aborting\n");
   1055		return err;
   1056	}
   1057
   1058	alx->hw.lnk_patch =
   1059		pdev->device == ALX_DEV_ID_AR8161 &&
   1060		pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
   1061		pdev->subsystem_device == 0x0091 &&
   1062		pdev->revision == 0;
   1063
   1064	hw->smb_timer = 400;
   1065	hw->mtu = alx->dev->mtu;
   1066	alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
   1067	/* MTU range: 34 - 9256 */
   1068	alx->dev->min_mtu = 34;
   1069	alx->dev->max_mtu = ALX_MAX_FRAME_LEN(ALX_MAX_FRAME_SIZE);
   1070	alx->tx_ringsz = 256;
   1071	alx->rx_ringsz = 512;
   1072	hw->imt = 200;
   1073	alx->int_mask = ALX_ISR_MISC;
   1074	hw->dma_chnl = hw->max_dma_chnl;
   1075	hw->ith_tpd = alx->tx_ringsz / 3;
   1076	hw->link_speed = SPEED_UNKNOWN;
   1077	hw->duplex = DUPLEX_UNKNOWN;
   1078	hw->adv_cfg = ADVERTISED_Autoneg |
   1079		      ADVERTISED_10baseT_Half |
   1080		      ADVERTISED_10baseT_Full |
   1081		      ADVERTISED_100baseT_Full |
   1082		      ADVERTISED_100baseT_Half |
   1083		      ADVERTISED_1000baseT_Full;
   1084	hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
   1085
   1086	hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
   1087		      ALX_MAC_CTRL_MHASH_ALG_HI5B |
   1088		      ALX_MAC_CTRL_BRD_EN |
   1089		      ALX_MAC_CTRL_PCRCE |
   1090		      ALX_MAC_CTRL_CRCE |
   1091		      ALX_MAC_CTRL_RXFC_EN |
   1092		      ALX_MAC_CTRL_TXFC_EN |
   1093		      7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
   1094	mutex_init(&alx->mtx);
   1095
   1096	return 0;
   1097}
   1098
   1099
   1100static netdev_features_t alx_fix_features(struct net_device *netdev,
   1101					  netdev_features_t features)
   1102{
   1103	if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
   1104		features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
   1105
   1106	return features;
   1107}
   1108
   1109static void alx_netif_stop(struct alx_priv *alx)
   1110{
   1111	int i;
   1112
   1113	netif_trans_update(alx->dev);
   1114	if (netif_carrier_ok(alx->dev)) {
   1115		netif_carrier_off(alx->dev);
   1116		netif_tx_disable(alx->dev);
   1117		for (i = 0; i < alx->num_napi; i++)
   1118			napi_disable(&alx->qnapi[i]->napi);
   1119	}
   1120}
   1121
   1122static void alx_halt(struct alx_priv *alx)
   1123{
   1124	struct alx_hw *hw = &alx->hw;
   1125
   1126	lockdep_assert_held(&alx->mtx);
   1127
   1128	alx_netif_stop(alx);
   1129	hw->link_speed = SPEED_UNKNOWN;
   1130	hw->duplex = DUPLEX_UNKNOWN;
   1131
   1132	alx_reset_mac(hw);
   1133
   1134	/* disable l0s/l1 */
   1135	alx_enable_aspm(hw, false, false);
   1136	alx_irq_disable(alx);
   1137	alx_free_buffers(alx);
   1138}
   1139
   1140static void alx_configure(struct alx_priv *alx)
   1141{
   1142	struct alx_hw *hw = &alx->hw;
   1143
   1144	alx_configure_basic(hw);
   1145	alx_disable_rss(hw);
   1146	__alx_set_rx_mode(alx->dev);
   1147
   1148	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
   1149}
   1150
   1151static void alx_activate(struct alx_priv *alx)
   1152{
   1153	lockdep_assert_held(&alx->mtx);
   1154
   1155	/* hardware setting lost, restore it */
   1156	alx_reinit_rings(alx);
   1157	alx_configure(alx);
   1158
   1159	/* clear old interrupts */
   1160	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
   1161
   1162	alx_irq_enable(alx);
   1163
   1164	alx_schedule_link_check(alx);
   1165}
   1166
   1167static void alx_reinit(struct alx_priv *alx)
   1168{
   1169	lockdep_assert_held(&alx->mtx);
   1170
   1171	alx_halt(alx);
   1172	alx_activate(alx);
   1173}
   1174
   1175static int alx_change_mtu(struct net_device *netdev, int mtu)
   1176{
   1177	struct alx_priv *alx = netdev_priv(netdev);
   1178	int max_frame = ALX_MAX_FRAME_LEN(mtu);
   1179
   1180	netdev->mtu = mtu;
   1181	alx->hw.mtu = mtu;
   1182	alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
   1183	netdev_update_features(netdev);
   1184	if (netif_running(netdev)) {
   1185		mutex_lock(&alx->mtx);
   1186		alx_reinit(alx);
   1187		mutex_unlock(&alx->mtx);
   1188	}
   1189	return 0;
   1190}
   1191
   1192static void alx_netif_start(struct alx_priv *alx)
   1193{
   1194	int i;
   1195
   1196	netif_tx_wake_all_queues(alx->dev);
   1197	for (i = 0; i < alx->num_napi; i++)
   1198		napi_enable(&alx->qnapi[i]->napi);
   1199	netif_carrier_on(alx->dev);
   1200}
   1201
   1202static int __alx_open(struct alx_priv *alx, bool resume)
   1203{
   1204	int err;
   1205
   1206	err = alx_enable_msix(alx);
   1207	if (err < 0) {
   1208		err = alx_init_intr(alx);
   1209		if (err)
   1210			return err;
   1211	}
   1212
   1213	if (!resume)
   1214		netif_carrier_off(alx->dev);
   1215
   1216	err = alx_alloc_napis(alx);
   1217	if (err)
   1218		goto out_disable_adv_intr;
   1219
   1220	err = alx_alloc_rings(alx);
   1221	if (err)
   1222		goto out_free_rings;
   1223
   1224	alx_configure(alx);
   1225
   1226	err = alx_request_irq(alx);
   1227	if (err)
   1228		goto out_free_rings;
   1229
   1230	/* must be called after alx_request_irq because the chip stops working
   1231	 * if we copy the dma addresses in alx_init_ring_ptrs twice when
   1232	 * requesting msi-x interrupts failed
   1233	 */
   1234	alx_reinit_rings(alx);
   1235
   1236	netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
   1237	netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
   1238
   1239	/* clear old interrupts */
   1240	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
   1241
   1242	alx_irq_enable(alx);
   1243
   1244	if (!resume)
   1245		netif_tx_start_all_queues(alx->dev);
   1246
   1247	alx_schedule_link_check(alx);
   1248	return 0;
   1249
   1250out_free_rings:
   1251	alx_free_rings(alx);
   1252	alx_free_napis(alx);
   1253out_disable_adv_intr:
   1254	pci_free_irq_vectors(alx->hw.pdev);
   1255	return err;
   1256}
   1257
   1258static void __alx_stop(struct alx_priv *alx)
   1259{
   1260	lockdep_assert_held(&alx->mtx);
   1261
   1262	alx_free_irq(alx);
   1263
   1264	cancel_work_sync(&alx->link_check_wk);
   1265	cancel_work_sync(&alx->reset_wk);
   1266
   1267	alx_halt(alx);
   1268	alx_free_rings(alx);
   1269	alx_free_napis(alx);
   1270}
   1271
   1272static const char *alx_speed_desc(struct alx_hw *hw)
   1273{
   1274	switch (alx_speed_to_ethadv(hw->link_speed, hw->duplex)) {
   1275	case ADVERTISED_1000baseT_Full:
   1276		return "1 Gbps Full";
   1277	case ADVERTISED_100baseT_Full:
   1278		return "100 Mbps Full";
   1279	case ADVERTISED_100baseT_Half:
   1280		return "100 Mbps Half";
   1281	case ADVERTISED_10baseT_Full:
   1282		return "10 Mbps Full";
   1283	case ADVERTISED_10baseT_Half:
   1284		return "10 Mbps Half";
   1285	default:
   1286		return "Unknown speed";
   1287	}
   1288}
   1289
   1290static void alx_check_link(struct alx_priv *alx)
   1291{
   1292	struct alx_hw *hw = &alx->hw;
   1293	unsigned long flags;
   1294	int old_speed;
   1295	int err;
   1296
   1297	lockdep_assert_held(&alx->mtx);
   1298
   1299	/* clear PHY internal interrupt status, otherwise the main
   1300	 * interrupt status will be asserted forever
   1301	 */
   1302	alx_clear_phy_intr(hw);
   1303
   1304	old_speed = hw->link_speed;
   1305	err = alx_read_phy_link(hw);
   1306	if (err < 0)
   1307		goto reset;
   1308
   1309	spin_lock_irqsave(&alx->irq_lock, flags);
   1310	alx->int_mask |= ALX_ISR_PHY;
   1311	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
   1312	spin_unlock_irqrestore(&alx->irq_lock, flags);
   1313
   1314	if (old_speed == hw->link_speed)
   1315		return;
   1316
   1317	if (hw->link_speed != SPEED_UNKNOWN) {
   1318		netif_info(alx, link, alx->dev,
   1319			   "NIC Up: %s\n", alx_speed_desc(hw));
   1320		alx_post_phy_link(hw);
   1321		alx_enable_aspm(hw, true, true);
   1322		alx_start_mac(hw);
   1323
   1324		if (old_speed == SPEED_UNKNOWN)
   1325			alx_netif_start(alx);
   1326	} else {
   1327		/* link is now down */
   1328		alx_netif_stop(alx);
   1329		netif_info(alx, link, alx->dev, "Link Down\n");
   1330		err = alx_reset_mac(hw);
   1331		if (err)
   1332			goto reset;
   1333		alx_irq_disable(alx);
   1334
   1335		/* MAC reset causes all HW settings to be lost, restore all */
   1336		err = alx_reinit_rings(alx);
   1337		if (err)
   1338			goto reset;
   1339		alx_configure(alx);
   1340		alx_enable_aspm(hw, false, true);
   1341		alx_post_phy_link(hw);
   1342		alx_irq_enable(alx);
   1343	}
   1344
   1345	return;
   1346
   1347reset:
   1348	alx_schedule_reset(alx);
   1349}
   1350
   1351static int alx_open(struct net_device *netdev)
   1352{
   1353	struct alx_priv *alx = netdev_priv(netdev);
   1354	int ret;
   1355
   1356	mutex_lock(&alx->mtx);
   1357	ret = __alx_open(alx, false);
   1358	mutex_unlock(&alx->mtx);
   1359
   1360	return ret;
   1361}
   1362
   1363static int alx_stop(struct net_device *netdev)
   1364{
   1365	struct alx_priv *alx = netdev_priv(netdev);
   1366
   1367	mutex_lock(&alx->mtx);
   1368	__alx_stop(alx);
   1369	mutex_unlock(&alx->mtx);
   1370
   1371	return 0;
   1372}
   1373
   1374static void alx_link_check(struct work_struct *work)
   1375{
   1376	struct alx_priv *alx;
   1377
   1378	alx = container_of(work, struct alx_priv, link_check_wk);
   1379
   1380	mutex_lock(&alx->mtx);
   1381	alx_check_link(alx);
   1382	mutex_unlock(&alx->mtx);
   1383}
   1384
   1385static void alx_reset(struct work_struct *work)
   1386{
   1387	struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
   1388
   1389	mutex_lock(&alx->mtx);
   1390	alx_reinit(alx);
   1391	mutex_unlock(&alx->mtx);
   1392}
   1393
   1394static int alx_tpd_req(struct sk_buff *skb)
   1395{
   1396	int num;
   1397
   1398	num = skb_shinfo(skb)->nr_frags + 1;
   1399	/* we need one extra descriptor for LSOv2 */
   1400	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
   1401		num++;
   1402
   1403	return num;
   1404}
   1405
   1406static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
   1407{
   1408	u8 cso, css;
   1409
   1410	if (skb->ip_summed != CHECKSUM_PARTIAL)
   1411		return 0;
   1412
   1413	cso = skb_checksum_start_offset(skb);
   1414	if (cso & 1)
   1415		return -EINVAL;
   1416
   1417	css = cso + skb->csum_offset;
   1418	first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
   1419	first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
   1420	first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
   1421
   1422	return 0;
   1423}
   1424
   1425static int alx_tso(struct sk_buff *skb, struct alx_txd *first)
   1426{
   1427	int err;
   1428
   1429	if (skb->ip_summed != CHECKSUM_PARTIAL)
   1430		return 0;
   1431
   1432	if (!skb_is_gso(skb))
   1433		return 0;
   1434
   1435	err = skb_cow_head(skb, 0);
   1436	if (err < 0)
   1437		return err;
   1438
   1439	if (skb->protocol == htons(ETH_P_IP)) {
   1440		struct iphdr *iph = ip_hdr(skb);
   1441
   1442		iph->check = 0;
   1443		tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
   1444							 0, IPPROTO_TCP, 0);
   1445		first->word1 |= 1 << TPD_IPV4_SHIFT;
   1446	} else if (skb_is_gso_v6(skb)) {
   1447		tcp_v6_gso_csum_prep(skb);
   1448		/* LSOv2: the first TPD only provides the packet length */
   1449		first->adrl.l.pkt_len = skb->len;
   1450		first->word1 |= 1 << TPD_LSO_V2_SHIFT;
   1451	}
   1452
   1453	first->word1 |= 1 << TPD_LSO_EN_SHIFT;
   1454	first->word1 |= (skb_transport_offset(skb) &
   1455			 TPD_L4HDROFFSET_MASK) << TPD_L4HDROFFSET_SHIFT;
   1456	first->word1 |= (skb_shinfo(skb)->gso_size &
   1457			 TPD_MSS_MASK) << TPD_MSS_SHIFT;
   1458	return 1;
   1459}
   1460
   1461static int alx_map_tx_skb(struct alx_tx_queue *txq, struct sk_buff *skb)
   1462{
   1463	struct alx_txd *tpd, *first_tpd;
   1464	dma_addr_t dma;
   1465	int maplen, f, first_idx = txq->write_idx;
   1466
   1467	first_tpd = &txq->tpd[txq->write_idx];
   1468	tpd = first_tpd;
   1469
   1470	if (tpd->word1 & (1 << TPD_LSO_V2_SHIFT)) {
   1471		if (++txq->write_idx == txq->count)
   1472			txq->write_idx = 0;
   1473
   1474		tpd = &txq->tpd[txq->write_idx];
   1475		tpd->len = first_tpd->len;
   1476		tpd->vlan_tag = first_tpd->vlan_tag;
   1477		tpd->word1 = first_tpd->word1;
   1478	}
   1479
   1480	maplen = skb_headlen(skb);
   1481	dma = dma_map_single(txq->dev, skb->data, maplen,
   1482			     DMA_TO_DEVICE);
   1483	if (dma_mapping_error(txq->dev, dma))
   1484		goto err_dma;
   1485
   1486	dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
   1487	dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
   1488
   1489	tpd->adrl.addr = cpu_to_le64(dma);
   1490	tpd->len = cpu_to_le16(maplen);
   1491
   1492	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
   1493		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
   1494
   1495		if (++txq->write_idx == txq->count)
   1496			txq->write_idx = 0;
   1497		tpd = &txq->tpd[txq->write_idx];
   1498
   1499		tpd->word1 = first_tpd->word1;
   1500
   1501		maplen = skb_frag_size(frag);
   1502		dma = skb_frag_dma_map(txq->dev, frag, 0,
   1503				       maplen, DMA_TO_DEVICE);
   1504		if (dma_mapping_error(txq->dev, dma))
   1505			goto err_dma;
   1506		dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
   1507		dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
   1508
   1509		tpd->adrl.addr = cpu_to_le64(dma);
   1510		tpd->len = cpu_to_le16(maplen);
   1511	}
   1512
   1513	/* last TPD, set EOP flag and store skb */
   1514	tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
   1515	txq->bufs[txq->write_idx].skb = skb;
   1516
   1517	if (++txq->write_idx == txq->count)
   1518		txq->write_idx = 0;
   1519
   1520	return 0;
   1521
   1522err_dma:
   1523	f = first_idx;
   1524	while (f != txq->write_idx) {
   1525		alx_free_txbuf(txq, f);
   1526		if (++f == txq->count)
   1527			f = 0;
   1528	}
   1529	return -ENOMEM;
   1530}
   1531
   1532static netdev_tx_t alx_start_xmit_ring(struct sk_buff *skb,
   1533				       struct alx_tx_queue *txq)
   1534{
   1535	struct alx_priv *alx;
   1536	struct alx_txd *first;
   1537	int tso;
   1538
   1539	alx = netdev_priv(txq->netdev);
   1540
   1541	if (alx_tpd_avail(txq) < alx_tpd_req(skb)) {
   1542		netif_tx_stop_queue(alx_get_tx_queue(txq));
   1543		goto drop;
   1544	}
   1545
   1546	first = &txq->tpd[txq->write_idx];
   1547	memset(first, 0, sizeof(*first));
   1548
   1549	tso = alx_tso(skb, first);
   1550	if (tso < 0)
   1551		goto drop;
   1552	else if (!tso && alx_tx_csum(skb, first))
   1553		goto drop;
   1554
   1555	if (alx_map_tx_skb(txq, skb) < 0)
   1556		goto drop;
   1557
   1558	netdev_tx_sent_queue(alx_get_tx_queue(txq), skb->len);
   1559
   1560	/* flush updates before updating hardware */
   1561	wmb();
   1562	alx_write_mem16(&alx->hw, txq->p_reg, txq->write_idx);
   1563
   1564	if (alx_tpd_avail(txq) < txq->count / 8)
   1565		netif_tx_stop_queue(alx_get_tx_queue(txq));
   1566
   1567	return NETDEV_TX_OK;
   1568
   1569drop:
   1570	dev_kfree_skb_any(skb);
   1571	return NETDEV_TX_OK;
   1572}
   1573
   1574static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
   1575				  struct net_device *netdev)
   1576{
   1577	struct alx_priv *alx = netdev_priv(netdev);
   1578	return alx_start_xmit_ring(skb, alx_tx_queue_mapping(alx, skb));
   1579}
   1580
   1581static void alx_tx_timeout(struct net_device *dev, unsigned int txqueue)
   1582{
   1583	struct alx_priv *alx = netdev_priv(dev);
   1584
   1585	alx_schedule_reset(alx);
   1586}
   1587
   1588static int alx_mdio_read(struct net_device *netdev,
   1589			 int prtad, int devad, u16 addr)
   1590{
   1591	struct alx_priv *alx = netdev_priv(netdev);
   1592	struct alx_hw *hw = &alx->hw;
   1593	u16 val;
   1594	int err;
   1595
   1596	if (prtad != hw->mdio.prtad)
   1597		return -EINVAL;
   1598
   1599	if (devad == MDIO_DEVAD_NONE)
   1600		err = alx_read_phy_reg(hw, addr, &val);
   1601	else
   1602		err = alx_read_phy_ext(hw, devad, addr, &val);
   1603
   1604	if (err)
   1605		return err;
   1606	return val;
   1607}
   1608
   1609static int alx_mdio_write(struct net_device *netdev,
   1610			  int prtad, int devad, u16 addr, u16 val)
   1611{
   1612	struct alx_priv *alx = netdev_priv(netdev);
   1613	struct alx_hw *hw = &alx->hw;
   1614
   1615	if (prtad != hw->mdio.prtad)
   1616		return -EINVAL;
   1617
   1618	if (devad == MDIO_DEVAD_NONE)
   1619		return alx_write_phy_reg(hw, addr, val);
   1620
   1621	return alx_write_phy_ext(hw, devad, addr, val);
   1622}
   1623
   1624static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
   1625{
   1626	struct alx_priv *alx = netdev_priv(netdev);
   1627
   1628	if (!netif_running(netdev))
   1629		return -EAGAIN;
   1630
   1631	return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
   1632}
   1633
   1634#ifdef CONFIG_NET_POLL_CONTROLLER
   1635static void alx_poll_controller(struct net_device *netdev)
   1636{
   1637	struct alx_priv *alx = netdev_priv(netdev);
   1638	int i;
   1639
   1640	if (alx->hw.pdev->msix_enabled) {
   1641		alx_intr_msix_misc(0, alx);
   1642		for (i = 0; i < alx->num_txq; i++)
   1643			alx_intr_msix_ring(0, alx->qnapi[i]);
   1644	} else if (alx->hw.pdev->msi_enabled)
   1645		alx_intr_msi(0, alx);
   1646	else
   1647		alx_intr_legacy(0, alx);
   1648}
   1649#endif
   1650
   1651static void alx_get_stats64(struct net_device *dev,
   1652			    struct rtnl_link_stats64 *net_stats)
   1653{
   1654	struct alx_priv *alx = netdev_priv(dev);
   1655	struct alx_hw_stats *hw_stats = &alx->hw.stats;
   1656
   1657	spin_lock(&alx->stats_lock);
   1658
   1659	alx_update_hw_stats(&alx->hw);
   1660
   1661	net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
   1662	net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
   1663	net_stats->multicast  = hw_stats->rx_mcast;
   1664	net_stats->collisions = hw_stats->tx_single_col +
   1665				hw_stats->tx_multi_col +
   1666				hw_stats->tx_late_col +
   1667				hw_stats->tx_abort_col;
   1668
   1669	net_stats->rx_errors  = hw_stats->rx_frag +
   1670				hw_stats->rx_fcs_err +
   1671				hw_stats->rx_len_err +
   1672				hw_stats->rx_ov_sz +
   1673				hw_stats->rx_ov_rrd +
   1674				hw_stats->rx_align_err +
   1675				hw_stats->rx_ov_rxf;
   1676
   1677	net_stats->rx_fifo_errors   = hw_stats->rx_ov_rxf;
   1678	net_stats->rx_length_errors = hw_stats->rx_len_err;
   1679	net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
   1680	net_stats->rx_frame_errors  = hw_stats->rx_align_err;
   1681	net_stats->rx_dropped       = hw_stats->rx_ov_rrd;
   1682
   1683	net_stats->tx_errors = hw_stats->tx_late_col +
   1684			       hw_stats->tx_abort_col +
   1685			       hw_stats->tx_underrun +
   1686			       hw_stats->tx_trunc;
   1687
   1688	net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
   1689	net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
   1690	net_stats->tx_window_errors  = hw_stats->tx_late_col;
   1691
   1692	net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
   1693	net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
   1694
   1695	spin_unlock(&alx->stats_lock);
   1696}
   1697
   1698static const struct net_device_ops alx_netdev_ops = {
   1699	.ndo_open               = alx_open,
   1700	.ndo_stop               = alx_stop,
   1701	.ndo_start_xmit         = alx_start_xmit,
   1702	.ndo_get_stats64        = alx_get_stats64,
   1703	.ndo_set_rx_mode        = alx_set_rx_mode,
   1704	.ndo_validate_addr      = eth_validate_addr,
   1705	.ndo_set_mac_address    = alx_set_mac_address,
   1706	.ndo_change_mtu         = alx_change_mtu,
   1707	.ndo_eth_ioctl           = alx_ioctl,
   1708	.ndo_tx_timeout         = alx_tx_timeout,
   1709	.ndo_fix_features	= alx_fix_features,
   1710#ifdef CONFIG_NET_POLL_CONTROLLER
   1711	.ndo_poll_controller    = alx_poll_controller,
   1712#endif
   1713};
   1714
   1715static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
   1716{
   1717	struct net_device *netdev;
   1718	struct alx_priv *alx;
   1719	struct alx_hw *hw;
   1720	bool phy_configured;
   1721	int err;
   1722
   1723	err = pci_enable_device_mem(pdev);
   1724	if (err)
   1725		return err;
   1726
   1727	/* The alx chip can DMA to 64-bit addresses, but it uses a single
   1728	 * shared register for the high 32 bits, so only a single, aligned,
   1729	 * 4 GB physical address range can be used for descriptors.
   1730	 */
   1731	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
   1732		dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
   1733	} else {
   1734		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
   1735		if (err) {
   1736			dev_err(&pdev->dev, "No usable DMA config, aborting\n");
   1737			goto out_pci_disable;
   1738		}
   1739	}
   1740
   1741	err = pci_request_mem_regions(pdev, alx_drv_name);
   1742	if (err) {
   1743		dev_err(&pdev->dev,
   1744			"pci_request_mem_regions failed\n");
   1745		goto out_pci_disable;
   1746	}
   1747
   1748	pci_enable_pcie_error_reporting(pdev);
   1749	pci_set_master(pdev);
   1750
   1751	if (!pdev->pm_cap) {
   1752		dev_err(&pdev->dev,
   1753			"Can't find power management capability, aborting\n");
   1754		err = -EIO;
   1755		goto out_pci_release;
   1756	}
   1757
   1758	netdev = alloc_etherdev_mqs(sizeof(*alx),
   1759				    ALX_MAX_TX_QUEUES, 1);
   1760	if (!netdev) {
   1761		err = -ENOMEM;
   1762		goto out_pci_release;
   1763	}
   1764
   1765	SET_NETDEV_DEV(netdev, &pdev->dev);
   1766	alx = netdev_priv(netdev);
   1767	spin_lock_init(&alx->hw.mdio_lock);
   1768	spin_lock_init(&alx->irq_lock);
   1769	spin_lock_init(&alx->stats_lock);
   1770	alx->dev = netdev;
   1771	alx->hw.pdev = pdev;
   1772	alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
   1773			  NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
   1774	hw = &alx->hw;
   1775	pci_set_drvdata(pdev, alx);
   1776
   1777	hw->hw_addr = pci_ioremap_bar(pdev, 0);
   1778	if (!hw->hw_addr) {
   1779		dev_err(&pdev->dev, "cannot map device registers\n");
   1780		err = -EIO;
   1781		goto out_free_netdev;
   1782	}
   1783
   1784	netdev->netdev_ops = &alx_netdev_ops;
   1785	netdev->ethtool_ops = &alx_ethtool_ops;
   1786	netdev->irq = pci_irq_vector(pdev, 0);
   1787	netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
   1788
   1789	if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
   1790		pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
   1791
   1792	err = alx_init_sw(alx);
   1793	if (err) {
   1794		dev_err(&pdev->dev, "net device private data init failed\n");
   1795		goto out_unmap;
   1796	}
   1797
   1798	mutex_lock(&alx->mtx);
   1799
   1800	alx_reset_pcie(hw);
   1801
   1802	phy_configured = alx_phy_configured(hw);
   1803
   1804	if (!phy_configured)
   1805		alx_reset_phy(hw);
   1806
   1807	err = alx_reset_mac(hw);
   1808	if (err) {
   1809		dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
   1810		goto out_unlock;
   1811	}
   1812
   1813	/* setup link to put it in a known good starting state */
   1814	if (!phy_configured) {
   1815		err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
   1816		if (err) {
   1817			dev_err(&pdev->dev,
   1818				"failed to configure PHY speed/duplex (err=%d)\n",
   1819				err);
   1820			goto out_unlock;
   1821		}
   1822	}
   1823
   1824	netdev->hw_features = NETIF_F_SG |
   1825			      NETIF_F_HW_CSUM |
   1826			      NETIF_F_RXCSUM |
   1827			      NETIF_F_TSO |
   1828			      NETIF_F_TSO6;
   1829
   1830	if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
   1831		dev_warn(&pdev->dev,
   1832			 "Invalid permanent address programmed, using random one\n");
   1833		eth_hw_addr_random(netdev);
   1834		memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
   1835	}
   1836
   1837	memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
   1838	eth_hw_addr_set(netdev, hw->mac_addr);
   1839	memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
   1840
   1841	hw->mdio.prtad = 0;
   1842	hw->mdio.mmds = 0;
   1843	hw->mdio.dev = netdev;
   1844	hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
   1845				MDIO_SUPPORTS_C22 |
   1846				MDIO_EMULATE_C22;
   1847	hw->mdio.mdio_read = alx_mdio_read;
   1848	hw->mdio.mdio_write = alx_mdio_write;
   1849
   1850	if (!alx_get_phy_info(hw)) {
   1851		dev_err(&pdev->dev, "failed to identify PHY\n");
   1852		err = -EIO;
   1853		goto out_unlock;
   1854	}
   1855
   1856	mutex_unlock(&alx->mtx);
   1857
   1858	INIT_WORK(&alx->link_check_wk, alx_link_check);
   1859	INIT_WORK(&alx->reset_wk, alx_reset);
   1860	netif_carrier_off(netdev);
   1861
   1862	err = register_netdev(netdev);
   1863	if (err) {
   1864		dev_err(&pdev->dev, "register netdevice failed\n");
   1865		goto out_unmap;
   1866	}
   1867
   1868	netdev_info(netdev,
   1869		    "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
   1870		    netdev->dev_addr);
   1871
   1872	return 0;
   1873
   1874out_unlock:
   1875	mutex_unlock(&alx->mtx);
   1876out_unmap:
   1877	iounmap(hw->hw_addr);
   1878out_free_netdev:
   1879	free_netdev(netdev);
   1880out_pci_release:
   1881	pci_release_mem_regions(pdev);
   1882	pci_disable_pcie_error_reporting(pdev);
   1883out_pci_disable:
   1884	pci_disable_device(pdev);
   1885	return err;
   1886}
   1887
   1888static void alx_remove(struct pci_dev *pdev)
   1889{
   1890	struct alx_priv *alx = pci_get_drvdata(pdev);
   1891	struct alx_hw *hw = &alx->hw;
   1892
   1893	/* restore permanent mac address */
   1894	alx_set_macaddr(hw, hw->perm_addr);
   1895
   1896	unregister_netdev(alx->dev);
   1897	iounmap(hw->hw_addr);
   1898	pci_release_mem_regions(pdev);
   1899
   1900	pci_disable_pcie_error_reporting(pdev);
   1901	pci_disable_device(pdev);
   1902
   1903	mutex_destroy(&alx->mtx);
   1904
   1905	free_netdev(alx->dev);
   1906}
   1907
   1908#ifdef CONFIG_PM_SLEEP
   1909static int alx_suspend(struct device *dev)
   1910{
   1911	struct alx_priv *alx = dev_get_drvdata(dev);
   1912
   1913	if (!netif_running(alx->dev))
   1914		return 0;
   1915	netif_device_detach(alx->dev);
   1916
   1917	mutex_lock(&alx->mtx);
   1918	__alx_stop(alx);
   1919	mutex_unlock(&alx->mtx);
   1920
   1921	return 0;
   1922}
   1923
   1924static int alx_resume(struct device *dev)
   1925{
   1926	struct alx_priv *alx = dev_get_drvdata(dev);
   1927	struct alx_hw *hw = &alx->hw;
   1928	int err;
   1929
   1930	mutex_lock(&alx->mtx);
   1931	alx_reset_phy(hw);
   1932
   1933	if (!netif_running(alx->dev)) {
   1934		err = 0;
   1935		goto unlock;
   1936	}
   1937
   1938	err = __alx_open(alx, true);
   1939	if (err)
   1940		goto unlock;
   1941
   1942	netif_device_attach(alx->dev);
   1943
   1944unlock:
   1945	mutex_unlock(&alx->mtx);
   1946	return err;
   1947}
   1948
   1949static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
   1950#define ALX_PM_OPS      (&alx_pm_ops)
   1951#else
   1952#define ALX_PM_OPS      NULL
   1953#endif
   1954
   1955
   1956static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
   1957					       pci_channel_state_t state)
   1958{
   1959	struct alx_priv *alx = pci_get_drvdata(pdev);
   1960	struct net_device *netdev = alx->dev;
   1961	pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
   1962
   1963	dev_info(&pdev->dev, "pci error detected\n");
   1964
   1965	mutex_lock(&alx->mtx);
   1966
   1967	if (netif_running(netdev)) {
   1968		netif_device_detach(netdev);
   1969		alx_halt(alx);
   1970	}
   1971
   1972	if (state == pci_channel_io_perm_failure)
   1973		rc = PCI_ERS_RESULT_DISCONNECT;
   1974	else
   1975		pci_disable_device(pdev);
   1976
   1977	mutex_unlock(&alx->mtx);
   1978
   1979	return rc;
   1980}
   1981
   1982static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
   1983{
   1984	struct alx_priv *alx = pci_get_drvdata(pdev);
   1985	struct alx_hw *hw = &alx->hw;
   1986	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
   1987
   1988	dev_info(&pdev->dev, "pci error slot reset\n");
   1989
   1990	mutex_lock(&alx->mtx);
   1991
   1992	if (pci_enable_device(pdev)) {
   1993		dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
   1994		goto out;
   1995	}
   1996
   1997	pci_set_master(pdev);
   1998
   1999	alx_reset_pcie(hw);
   2000	if (!alx_reset_mac(hw))
   2001		rc = PCI_ERS_RESULT_RECOVERED;
   2002out:
   2003	mutex_unlock(&alx->mtx);
   2004
   2005	return rc;
   2006}
   2007
   2008static void alx_pci_error_resume(struct pci_dev *pdev)
   2009{
   2010	struct alx_priv *alx = pci_get_drvdata(pdev);
   2011	struct net_device *netdev = alx->dev;
   2012
   2013	dev_info(&pdev->dev, "pci error resume\n");
   2014
   2015	mutex_lock(&alx->mtx);
   2016
   2017	if (netif_running(netdev)) {
   2018		alx_activate(alx);
   2019		netif_device_attach(netdev);
   2020	}
   2021
   2022	mutex_unlock(&alx->mtx);
   2023}
   2024
   2025static const struct pci_error_handlers alx_err_handlers = {
   2026	.error_detected = alx_pci_error_detected,
   2027	.slot_reset     = alx_pci_error_slot_reset,
   2028	.resume         = alx_pci_error_resume,
   2029};
   2030
   2031static const struct pci_device_id alx_pci_tbl[] = {
   2032	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
   2033	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
   2034	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
   2035	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
   2036	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
   2037	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
   2038	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
   2039	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
   2040	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
   2041	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
   2042	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
   2043	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
   2044	{}
   2045};
   2046
   2047static struct pci_driver alx_driver = {
   2048	.name        = alx_drv_name,
   2049	.id_table    = alx_pci_tbl,
   2050	.probe       = alx_probe,
   2051	.remove      = alx_remove,
   2052	.err_handler = &alx_err_handlers,
   2053	.driver.pm   = ALX_PM_OPS,
   2054};
   2055
   2056module_pci_driver(alx_driver);
   2057MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
   2058MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
   2059MODULE_AUTHOR("Qualcomm Corporation");
   2060MODULE_DESCRIPTION(
   2061	"Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
   2062MODULE_LICENSE("GPL");